Merge branch 'vfs-6.14.poll' into vfs.fixes

Bring in the fixes for __pollwait() and waitqueue_active() interactions.

Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2025-01-10 12:01:21 +01:00
commit 1623bc27a8
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
3 changed files with 23 additions and 29 deletions

View File

@ -25,14 +25,14 @@
struct poll_table_struct; struct poll_table_struct;
/* /*
* structures and helpers for f_op->poll implementations * structures and helpers for f_op->poll implementations
*/ */
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
/* /*
* Do not touch the structure directly, use the access functions * Do not touch the structure directly, use the access function
* poll_does_not_wait() and poll_requested_events() instead. * poll_requested_events() instead.
*/ */
typedef struct poll_table_struct { typedef struct poll_table_struct {
poll_queue_proc _qproc; poll_queue_proc _qproc;
@ -41,18 +41,16 @@ typedef struct poll_table_struct {
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{ {
if (p && p->_qproc && wait_address) if (p && p->_qproc) {
p->_qproc(filp, wait_address, p); p->_qproc(filp, wait_address, p);
} /*
* This memory barrier is paired in the wq_has_sleeper().
/* * See the comment above prepare_to_wait(), we need to
* Return true if it is guaranteed that poll will not wait. This is the case * ensure that subsequent tests in this thread can't be
* if the poll() of another file descriptor in the set got an event, so there * reordered with __add_wait_queue() in _qproc() paths.
* is no need for waiting. */
*/ smp_mb();
static inline bool poll_does_not_wait(const poll_table *p) }
{
return p == NULL || p->_qproc == NULL;
} }
/* /*

View File

@ -2297,7 +2297,7 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
} }
/** /**
* sock_poll_wait - place memory barrier behind the poll_wait call. * sock_poll_wait - wrapper for the poll_wait call.
* @filp: file * @filp: file
* @sock: socket to wait on * @sock: socket to wait on
* @p: poll_table * @p: poll_table
@ -2307,15 +2307,12 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
static inline void sock_poll_wait(struct file *filp, struct socket *sock, static inline void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p) poll_table *p)
{ {
if (!poll_does_not_wait(p)) { /* Provides a barrier we need to be sure we are in sync
poll_wait(filp, &sock->wq.wait, p); * with the socket flags modification.
/* We need to be sure we are in sync with the *
* socket flags modification. * This memory barrier is paired in the wq_has_sleeper.
* */
* This memory barrier is paired in the wq_has_sleeper. poll_wait(filp, &sock->wq.wait, p);
*/
smp_mb();
}
} }
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)

View File

@ -2813,13 +2813,12 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
if (unlikely(!ctx->poll_activated)) if (unlikely(!ctx->poll_activated))
io_activate_pollwq(ctx); io_activate_pollwq(ctx);
poll_wait(file, &ctx->poll_wq, wait);
/* /*
* synchronizes with barrier from wq_has_sleeper call in * provides mb() which pairs with barrier from wq_has_sleeper
* io_commit_cqring * call in io_commit_cqring
*/ */
smp_rmb(); poll_wait(file, &ctx->poll_wq, wait);
if (!io_sqring_full(ctx)) if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM; mask |= EPOLLOUT | EPOLLWRNORM;