mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 10:17:32 +00:00
b542e383d8
The recursion protection for eventfd_signal() is based on a per CPU variable and relies on the !RT semantics of spin_lock_irqsave() for protecting this per CPU variable. On RT kernels spin_lock_irqsave() neither disables preemption nor interrupts which allows the spin lock held section to be preempted. If the preempting task invokes eventfd_signal() as well, then the recursion warning triggers. Paolo suggested to protect the per CPU variable with a local lock, but that's heavyweight and actually not necessary. The goal of this protection is to prevent the task stack from overflowing, which can be achieved with a per task recursion protection as well. Replace the per CPU variable with a per task bit similar to other recursion protection bits like task_struct::in_page_owner. This works on both !RT and RT kernels and removes as a side effect the extra per CPU storage. No functional change for !RT kernels. Reported-by: Daniel Bristot de Oliveira <bristot@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Daniel Bristot de Oliveira <bristot@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Link: https://lore.kernel.org/r/87wnp9idso.ffs@tglx
94 lines
2.1 KiB
C
94 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/eventfd.h
|
|
*
|
|
* Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
|
|
*
|
|
*/
|
|
|
|
#ifndef _LINUX_EVENTFD_H
|
|
#define _LINUX_EVENTFD_H
|
|
|
|
#include <linux/fcntl.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/err.h>
|
|
#include <linux/percpu-defs.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/sched.h>
|
|
|
|
/*
|
|
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
|
|
* new flags, since they might collide with O_* ones. We want
|
|
* to re-use O_* flags that couldn't possibly have a meaning
|
|
* from eventfd, in order to leave a free define-space for
|
|
* shared O_* flags.
|
|
*/
|
|
#define EFD_SEMAPHORE (1 << 0)
|
|
#define EFD_CLOEXEC O_CLOEXEC
|
|
#define EFD_NONBLOCK O_NONBLOCK
|
|
|
|
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
|
|
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
|
|
|
|
struct eventfd_ctx;
|
|
struct file;
|
|
|
|
#ifdef CONFIG_EVENTFD
|
|
|
|
void eventfd_ctx_put(struct eventfd_ctx *ctx);
|
|
struct file *eventfd_fget(int fd);
|
|
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
|
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
|
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
|
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
|
__u64 *cnt);
|
|
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
|
|
|
static inline bool eventfd_signal_allowed(void)
|
|
{
|
|
return !current->in_eventfd_signal;
|
|
}
|
|
|
|
#else /* CONFIG_EVENTFD */
|
|
|
|
/*
|
|
* Ugly ugly ugly error layer to support modules that uses eventfd but
|
|
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
|
|
*/
|
|
|
|
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
|
|
{
|
|
return ERR_PTR(-ENOSYS);
|
|
}
|
|
|
|
static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
|
{
|
|
|
|
}
|
|
|
|
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
|
|
wait_queue_entry_t *wait, __u64 *cnt)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline bool eventfd_signal_allowed(void)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* _LINUX_EVENTFD_H */
|
|
|