mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-15 01:24:33 +00:00
Merge branch 'vfs-6.14.poll' into vfs.fixes
Bring in the fixes for __pollwait() and waitqueue_active() interactions. Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
commit
1623bc27a8
@ -31,8 +31,8 @@ struct poll_table_struct;
|
||||
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
|
||||
|
||||
/*
|
||||
* Do not touch the structure directly, use the access functions
|
||||
* poll_does_not_wait() and poll_requested_events() instead.
|
||||
* Do not touch the structure directly, use the access function
|
||||
* poll_requested_events() instead.
|
||||
*/
|
||||
typedef struct poll_table_struct {
|
||||
poll_queue_proc _qproc;
|
||||
@ -41,18 +41,16 @@ typedef struct poll_table_struct {
|
||||
|
||||
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
|
||||
{
|
||||
if (p && p->_qproc && wait_address)
|
||||
if (p && p->_qproc) {
|
||||
p->_qproc(filp, wait_address, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if it is guaranteed that poll will not wait. This is the case
|
||||
* if the poll() of another file descriptor in the set got an event, so there
|
||||
* is no need for waiting.
|
||||
*/
|
||||
static inline bool poll_does_not_wait(const poll_table *p)
|
||||
{
|
||||
return p == NULL || p->_qproc == NULL;
|
||||
/*
|
||||
* This memory barrier is paired in the wq_has_sleeper().
|
||||
* See the comment above prepare_to_wait(), we need to
|
||||
* ensure that subsequent tests in this thread can't be
|
||||
* reordered with __add_wait_queue() in _qproc() paths.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2297,7 +2297,7 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
|
||||
}
|
||||
|
||||
/**
|
||||
* sock_poll_wait - place memory barrier behind the poll_wait call.
|
||||
* sock_poll_wait - wrapper for the poll_wait call.
|
||||
* @filp: file
|
||||
* @sock: socket to wait on
|
||||
* @p: poll_table
|
||||
@ -2307,15 +2307,12 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
|
||||
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
|
||||
poll_table *p)
|
||||
{
|
||||
if (!poll_does_not_wait(p)) {
|
||||
poll_wait(filp, &sock->wq.wait, p);
|
||||
/* We need to be sure we are in sync with the
|
||||
* socket flags modification.
|
||||
*
|
||||
* This memory barrier is paired in the wq_has_sleeper.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
/* Provides a barrier we need to be sure we are in sync
|
||||
* with the socket flags modification.
|
||||
*
|
||||
* This memory barrier is paired in the wq_has_sleeper.
|
||||
*/
|
||||
poll_wait(filp, &sock->wq.wait, p);
|
||||
}
|
||||
|
||||
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
|
||||
|
@ -2813,13 +2813,12 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
|
||||
|
||||
if (unlikely(!ctx->poll_activated))
|
||||
io_activate_pollwq(ctx);
|
||||
|
||||
poll_wait(file, &ctx->poll_wq, wait);
|
||||
/*
|
||||
* synchronizes with barrier from wq_has_sleeper call in
|
||||
* io_commit_cqring
|
||||
* provides mb() which pairs with barrier from wq_has_sleeper
|
||||
* call in io_commit_cqring
|
||||
*/
|
||||
smp_rmb();
|
||||
poll_wait(file, &ctx->poll_wq, wait);
|
||||
|
||||
if (!io_sqring_full(ctx))
|
||||
mask |= EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user