mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Merge branch 'for-6.2/io_uring' into for-6.2/io_uring-next
* for-6.2/io_uring: (41 commits) io_uring: keep unlock_post inlined in hot path io_uring: don't use complete_post in kbuf io_uring: spelling fix io_uring: remove io_req_complete_post_tw io_uring: allow multishot polled reqs to defer completion io_uring: remove overflow param from io_post_aux_cqe io_uring: add lockdep assertion in io_fill_cqe_aux io_uring: make io_fill_cqe_aux static io_uring: add io_aux_cqe which allows deferred completion io_uring: allow defer completion for aux posted cqes io_uring: defer all io_req_complete_failed io_uring: always lock in io_apoll_task_func io_uring: remove iopoll spinlock io_uring: iopoll protect complete_post io_uring: inline __io_req_complete_put() io_uring: remove io_req_tw_post_queue io_uring: use io_req_task_complete() in timeout io_uring: hold locks for io_req_complete_failed io_uring: add completion locking for iopoll io_uring: kill io_cqring_ev_posted() and __io_cq_unlock_post() ...
This commit is contained in:
commit
b2cf789f6c
37
fs/eventfd.c
37
fs/eventfd.c
@ -43,21 +43,7 @@ struct eventfd_ctx {
|
|||||||
int id;
|
int id;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
|
||||||
* eventfd_signal - Adds @n to the eventfd counter.
|
|
||||||
* @ctx: [in] Pointer to the eventfd context.
|
|
||||||
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
|
||||||
* The value cannot be negative.
|
|
||||||
*
|
|
||||||
* This function is supposed to be called by the kernel in paths that do not
|
|
||||||
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
|
||||||
* value, and we signal this as overflow condition by returning a EPOLLERR
|
|
||||||
* to poll(2).
|
|
||||||
*
|
|
||||||
* Returns the amount by which the counter was incremented. This will be less
|
|
||||||
* than @n if the counter has overflowed.
|
|
||||||
*/
|
|
||||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -78,12 +64,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
|||||||
n = ULLONG_MAX - ctx->count;
|
n = ULLONG_MAX - ctx->count;
|
||||||
ctx->count += n;
|
ctx->count += n;
|
||||||
if (waitqueue_active(&ctx->wqh))
|
if (waitqueue_active(&ctx->wqh))
|
||||||
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
|
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
|
||||||
current->in_eventfd = 0;
|
current->in_eventfd = 0;
|
||||||
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
||||||
|
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* eventfd_signal - Adds @n to the eventfd counter.
|
||||||
|
* @ctx: [in] Pointer to the eventfd context.
|
||||||
|
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
||||||
|
* The value cannot be negative.
|
||||||
|
*
|
||||||
|
* This function is supposed to be called by the kernel in paths that do not
|
||||||
|
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
||||||
|
* value, and we signal this as overflow condition by returning a EPOLLERR
|
||||||
|
* to poll(2).
|
||||||
|
*
|
||||||
|
* Returns the amount by which the counter was incremented. This will be less
|
||||||
|
* than @n if the counter has overflowed.
|
||||||
|
*/
|
||||||
|
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||||
|
{
|
||||||
|
return eventfd_signal_mask(ctx, n, 0);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(eventfd_signal);
|
EXPORT_SYMBOL_GPL(eventfd_signal);
|
||||||
|
|
||||||
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
|
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
|
||||||
|
@ -491,7 +491,8 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
|
|||||||
*/
|
*/
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
|
||||||
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
|
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
|
||||||
|
unsigned pollflags)
|
||||||
{
|
{
|
||||||
struct eventpoll *ep_src;
|
struct eventpoll *ep_src;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -522,16 +523,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
|
|||||||
}
|
}
|
||||||
spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
|
spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
|
||||||
ep->nests = nests + 1;
|
ep->nests = nests + 1;
|
||||||
wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
|
wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
|
||||||
ep->nests = 0;
|
ep->nests = 0;
|
||||||
spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
|
spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
|
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
|
||||||
|
unsigned pollflags)
|
||||||
{
|
{
|
||||||
wake_up_poll(&ep->poll_wait, EPOLLIN);
|
wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -742,7 +744,7 @@ static void ep_free(struct eventpoll *ep)
|
|||||||
|
|
||||||
/* We need to release all tasks waiting for these file */
|
/* We need to release all tasks waiting for these file */
|
||||||
if (waitqueue_active(&ep->poll_wait))
|
if (waitqueue_active(&ep->poll_wait))
|
||||||
ep_poll_safewake(ep, NULL);
|
ep_poll_safewake(ep, NULL, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to lock this because we could be hit by
|
* We need to lock this because we could be hit by
|
||||||
@ -1208,7 +1210,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
|
|||||||
|
|
||||||
/* We have to call this outside the lock */
|
/* We have to call this outside the lock */
|
||||||
if (pwake)
|
if (pwake)
|
||||||
ep_poll_safewake(ep, epi);
|
ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
|
||||||
|
|
||||||
if (!(epi->event.events & EPOLLEXCLUSIVE))
|
if (!(epi->event.events & EPOLLEXCLUSIVE))
|
||||||
ewake = 1;
|
ewake = 1;
|
||||||
@ -1553,7 +1555,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
|||||||
|
|
||||||
/* We have to call this outside the lock */
|
/* We have to call this outside the lock */
|
||||||
if (pwake)
|
if (pwake)
|
||||||
ep_poll_safewake(ep, NULL);
|
ep_poll_safewake(ep, NULL, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1629,7 +1631,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
|
|||||||
|
|
||||||
/* We have to call this outside the lock */
|
/* We have to call this outside the lock */
|
||||||
if (pwake)
|
if (pwake)
|
||||||
ep_poll_safewake(ep, NULL);
|
ep_poll_safewake(ep, NULL, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ struct file *eventfd_fget(int fd);
|
|||||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
||||||
|
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
|
||||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||||
__u64 *cnt);
|
__u64 *cnt);
|
||||||
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||||
@ -66,6 +67,12 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
|
|||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
|
||||||
|
unsigned mask)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -174,7 +174,9 @@ struct io_submit_state {
|
|||||||
bool plug_started;
|
bool plug_started;
|
||||||
bool need_plug;
|
bool need_plug;
|
||||||
unsigned short submit_nr;
|
unsigned short submit_nr;
|
||||||
|
unsigned int cqes_count;
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
|
struct io_uring_cqe cqes[16];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct io_ev_fd {
|
struct io_ev_fd {
|
||||||
|
@ -41,6 +41,12 @@
|
|||||||
#define EPOLLMSG (__force __poll_t)0x00000400
|
#define EPOLLMSG (__force __poll_t)0x00000400
|
||||||
#define EPOLLRDHUP (__force __poll_t)0x00002000
|
#define EPOLLRDHUP (__force __poll_t)0x00002000
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Internal flag - wakeup generated by io_uring, used to detect recursion back
|
||||||
|
* into the io_uring poll handler.
|
||||||
|
*/
|
||||||
|
#define EPOLL_URING_WAKE ((__force __poll_t)(1U << 27))
|
||||||
|
|
||||||
/* Set exclusive wakeup mode for the target file descriptor */
|
/* Set exclusive wakeup mode for the target file descriptor */
|
||||||
#define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28))
|
#define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28))
|
||||||
|
|
||||||
|
@ -296,10 +296,28 @@ enum io_uring_op {
|
|||||||
*
|
*
|
||||||
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
|
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
|
||||||
* the buf_index field.
|
* the buf_index field.
|
||||||
|
*
|
||||||
|
* IORING_SEND_ZC_REPORT_USAGE
|
||||||
|
* If set, SEND[MSG]_ZC should report
|
||||||
|
* the zerocopy usage in cqe.res
|
||||||
|
* for the IORING_CQE_F_NOTIF cqe.
|
||||||
|
* 0 is reported if zerocopy was actually possible.
|
||||||
|
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
|
||||||
|
* (at least partially).
|
||||||
*/
|
*/
|
||||||
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
|
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
|
||||||
#define IORING_RECV_MULTISHOT (1U << 1)
|
#define IORING_RECV_MULTISHOT (1U << 1)
|
||||||
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
|
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
|
||||||
|
#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cqe.res for IORING_CQE_F_NOTIF if
|
||||||
|
* IORING_SEND_ZC_REPORT_USAGE was requested
|
||||||
|
*
|
||||||
|
* It should be treated as a flag, all other
|
||||||
|
* bits of cqe.res should be treated as reserved!
|
||||||
|
*/
|
||||||
|
#define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* accept flags stored in sqe->ioprio
|
* accept flags stored in sqe->ioprio
|
||||||
|
@ -167,7 +167,8 @@ EXPORT_SYMBOL(io_uring_get_socket);
|
|||||||
|
|
||||||
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
if (!wq_list_empty(&ctx->submit_state.compl_reqs))
|
if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
|
||||||
|
ctx->submit_state.cqes_count)
|
||||||
__io_submit_flush_completions(ctx);
|
__io_submit_flush_completions(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,7 +496,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
|
|||||||
int ops = atomic_xchg(&ev_fd->ops, 0);
|
int ops = atomic_xchg(&ev_fd->ops, 0);
|
||||||
|
|
||||||
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
|
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
|
||||||
eventfd_signal(ev_fd->cq_ev_fd, 1);
|
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
|
||||||
|
|
||||||
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
|
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
|
||||||
* ordering in a race but if references are 0 we know we have to free
|
* ordering in a race but if references are 0 we know we have to free
|
||||||
@ -531,7 +532,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (likely(eventfd_signal_allowed())) {
|
if (likely(eventfd_signal_allowed())) {
|
||||||
eventfd_signal(ev_fd->cq_ev_fd, 1);
|
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
|
||||||
} else {
|
} else {
|
||||||
atomic_inc(&ev_fd->refs);
|
atomic_inc(&ev_fd->refs);
|
||||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
|
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
|
||||||
@ -581,23 +582,21 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
|
|||||||
io_eventfd_flush_signal(ctx);
|
io_eventfd_flush_signal(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
|
/* keep it inlined for io_submit_flush_completions() */
|
||||||
{
|
static inline void io_cq_unlock_post_inline(struct io_ring_ctx *ctx)
|
||||||
io_commit_cqring_flush(ctx);
|
|
||||||
io_cqring_wake(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
|
|
||||||
__releases(ctx->completion_lock)
|
__releases(ctx->completion_lock)
|
||||||
{
|
{
|
||||||
io_commit_cqring(ctx);
|
io_commit_cqring(ctx);
|
||||||
spin_unlock(&ctx->completion_lock);
|
spin_unlock(&ctx->completion_lock);
|
||||||
io_cqring_ev_posted(ctx);
|
|
||||||
|
io_commit_cqring_flush(ctx);
|
||||||
|
io_cqring_wake(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_cq_unlock_post(struct io_ring_ctx *ctx)
|
void io_cq_unlock_post(struct io_ring_ctx *ctx)
|
||||||
|
__releases(ctx->completion_lock)
|
||||||
{
|
{
|
||||||
__io_cq_unlock_post(ctx);
|
io_cq_unlock_post_inline(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns true if there are no backlogged entries after the flush */
|
/* Returns true if there are no backlogged entries after the flush */
|
||||||
@ -778,11 +777,13 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
|
|||||||
return &rings->cqes[off];
|
return &rings->cqes[off];
|
||||||
}
|
}
|
||||||
|
|
||||||
bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
|
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
|
||||||
bool allow_overflow)
|
bool allow_overflow)
|
||||||
{
|
{
|
||||||
struct io_uring_cqe *cqe;
|
struct io_uring_cqe *cqe;
|
||||||
|
|
||||||
|
lockdep_assert_held(&ctx->completion_lock);
|
||||||
|
|
||||||
ctx->cq_extra++;
|
ctx->cq_extra++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -811,9 +812,23 @@ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx,
|
static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
|
||||||
u64 user_data, s32 res, u32 cflags,
|
__must_hold(&ctx->uring_lock)
|
||||||
bool allow_overflow)
|
{
|
||||||
|
struct io_submit_state *state = &ctx->submit_state;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
lockdep_assert_held(&ctx->uring_lock);
|
||||||
|
for (i = 0; i < state->cqes_count; i++) {
|
||||||
|
struct io_uring_cqe *cqe = &state->cqes[i];
|
||||||
|
|
||||||
|
io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags, true);
|
||||||
|
}
|
||||||
|
state->cqes_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
|
||||||
|
bool allow_overflow)
|
||||||
{
|
{
|
||||||
bool filled;
|
bool filled;
|
||||||
|
|
||||||
@ -823,15 +838,58 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
|
|||||||
return filled;
|
return filled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __io_req_complete_put(struct io_kiocb *req)
|
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
|
||||||
{
|
{
|
||||||
|
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
|
||||||
|
bool allow_overflow)
|
||||||
|
{
|
||||||
|
struct io_uring_cqe *cqe;
|
||||||
|
unsigned int length;
|
||||||
|
|
||||||
|
if (!defer)
|
||||||
|
return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
|
||||||
|
|
||||||
|
length = ARRAY_SIZE(ctx->submit_state.cqes);
|
||||||
|
|
||||||
|
lockdep_assert_held(&ctx->uring_lock);
|
||||||
|
|
||||||
|
if (ctx->submit_state.cqes_count == length) {
|
||||||
|
io_cq_lock(ctx);
|
||||||
|
__io_flush_post_cqes(ctx);
|
||||||
|
/* no need to flush - flush is deferred */
|
||||||
|
spin_unlock(&ctx->completion_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* For defered completions this is not as strict as it is otherwise,
|
||||||
|
* however it's main job is to prevent unbounded posted completions,
|
||||||
|
* and in that it works just as well.
|
||||||
|
*/
|
||||||
|
if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
|
||||||
|
cqe->user_data = user_data;
|
||||||
|
cqe->res = res;
|
||||||
|
cqe->flags = cflags;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __io_req_complete_post(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
|
io_cq_lock(ctx);
|
||||||
|
if (!(req->flags & REQ_F_CQE_SKIP))
|
||||||
|
__io_fill_cqe_req(ctx, req);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we're the last reference to this request, add to our locked
|
* If we're the last reference to this request, add to our locked
|
||||||
* free_list cache.
|
* free_list cache.
|
||||||
*/
|
*/
|
||||||
if (req_ref_put_and_test(req)) {
|
if (req_ref_put_and_test(req)) {
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
|
||||||
|
|
||||||
if (req->flags & IO_REQ_LINK_FLAGS) {
|
if (req->flags & IO_REQ_LINK_FLAGS) {
|
||||||
if (req->flags & IO_DISARM_MASK)
|
if (req->flags & IO_DISARM_MASK)
|
||||||
io_disarm_next(req);
|
io_disarm_next(req);
|
||||||
@ -852,38 +910,35 @@ static void __io_req_complete_put(struct io_kiocb *req)
|
|||||||
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
|
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
|
||||||
ctx->locked_free_nr++;
|
ctx->locked_free_nr++;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void __io_req_complete_post(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
if (!(req->flags & REQ_F_CQE_SKIP))
|
|
||||||
__io_fill_cqe_req(req->ctx, req);
|
|
||||||
__io_req_complete_put(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
void io_req_complete_post(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
|
||||||
|
|
||||||
io_cq_lock(ctx);
|
|
||||||
__io_req_complete_post(req);
|
|
||||||
io_cq_unlock_post(ctx);
|
io_cq_unlock_post(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
|
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
|
||||||
{
|
{
|
||||||
io_req_complete_post(req);
|
if (!(issue_flags & IO_URING_F_UNLOCKED) ||
|
||||||
|
!(req->ctx->flags & IORING_SETUP_IOPOLL)) {
|
||||||
|
__io_req_complete_post(req);
|
||||||
|
} else {
|
||||||
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
|
mutex_lock(&ctx->uring_lock);
|
||||||
|
__io_req_complete_post(req);
|
||||||
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_req_complete_failed(struct io_kiocb *req, s32 res)
|
void io_req_defer_failed(struct io_kiocb *req, s32 res)
|
||||||
|
__must_hold(&ctx->uring_lock)
|
||||||
{
|
{
|
||||||
const struct io_op_def *def = &io_op_defs[req->opcode];
|
const struct io_op_def *def = &io_op_defs[req->opcode];
|
||||||
|
|
||||||
|
lockdep_assert_held(&req->ctx->uring_lock);
|
||||||
|
|
||||||
req_set_fail(req);
|
req_set_fail(req);
|
||||||
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
|
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
|
||||||
if (def->fail)
|
if (def->fail)
|
||||||
def->fail(req);
|
def->fail(req);
|
||||||
io_req_complete_post(req);
|
io_req_complete_defer(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1105,6 +1160,20 @@ void tctx_task_work(struct callback_head *cb)
|
|||||||
trace_io_uring_task_work_run(tctx, count, loops);
|
trace_io_uring_task_work_run(tctx, count, loops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __cold void io_fallback_tw(struct io_uring_task *tctx)
|
||||||
|
{
|
||||||
|
struct llist_node *node = llist_del_all(&tctx->task_list);
|
||||||
|
struct io_kiocb *req;
|
||||||
|
|
||||||
|
while (node) {
|
||||||
|
req = container_of(node, struct io_kiocb, io_task_work.node);
|
||||||
|
node = node->next;
|
||||||
|
if (llist_add(&req->io_task_work.node,
|
||||||
|
&req->ctx->fallback_llist))
|
||||||
|
schedule_delayed_work(&req->ctx->fallback_work, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void io_req_local_work_add(struct io_kiocb *req)
|
static void io_req_local_work_add(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
@ -1127,11 +1196,10 @@ static void io_req_local_work_add(struct io_kiocb *req)
|
|||||||
__io_cqring_wake(ctx);
|
__io_cqring_wake(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
|
void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
|
||||||
{
|
{
|
||||||
struct io_uring_task *tctx = req->task->io_uring;
|
struct io_uring_task *tctx = req->task->io_uring;
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct llist_node *node;
|
|
||||||
|
|
||||||
if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
|
if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
|
||||||
io_req_local_work_add(req);
|
io_req_local_work_add(req);
|
||||||
@ -1148,20 +1216,7 @@ static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local
|
|||||||
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
|
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
node = llist_del_all(&tctx->task_list);
|
io_fallback_tw(tctx);
|
||||||
|
|
||||||
while (node) {
|
|
||||||
req = container_of(node, struct io_kiocb, io_task_work.node);
|
|
||||||
node = node->next;
|
|
||||||
if (llist_add(&req->io_task_work.node,
|
|
||||||
&req->ctx->fallback_llist))
|
|
||||||
schedule_delayed_work(&req->ctx->fallback_work, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void io_req_task_work_add(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
__io_req_task_work_add(req, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
||||||
@ -1237,23 +1292,10 @@ int io_run_local_work(struct io_ring_ctx *ctx)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
|
|
||||||
{
|
|
||||||
io_req_complete_post(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
|
|
||||||
{
|
|
||||||
io_req_set_res(req, res, cflags);
|
|
||||||
req->io_task_work.func = io_req_tw_post;
|
|
||||||
io_req_task_work_add(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
|
static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
|
||||||
{
|
{
|
||||||
/* not needed for normal modes, but SQPOLL depends on it */
|
|
||||||
io_tw_lock(req->ctx, locked);
|
io_tw_lock(req->ctx, locked);
|
||||||
io_req_complete_failed(req, req->cqe.res);
|
io_req_defer_failed(req, req->cqe.res);
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_req_task_submit(struct io_kiocb *req, bool *locked)
|
void io_req_task_submit(struct io_kiocb *req, bool *locked)
|
||||||
@ -1263,7 +1305,7 @@ void io_req_task_submit(struct io_kiocb *req, bool *locked)
|
|||||||
if (likely(!(req->task->flags & PF_EXITING)))
|
if (likely(!(req->task->flags & PF_EXITING)))
|
||||||
io_queue_sqe(req);
|
io_queue_sqe(req);
|
||||||
else
|
else
|
||||||
io_req_complete_failed(req, -EFAULT);
|
io_req_defer_failed(req, -EFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_req_task_queue_fail(struct io_kiocb *req, int ret)
|
void io_req_task_queue_fail(struct io_kiocb *req, int ret)
|
||||||
@ -1344,6 +1386,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
|
|||||||
struct io_submit_state *state = &ctx->submit_state;
|
struct io_submit_state *state = &ctx->submit_state;
|
||||||
|
|
||||||
io_cq_lock(ctx);
|
io_cq_lock(ctx);
|
||||||
|
/* must come first to preserve CQE ordering in failure cases */
|
||||||
|
if (state->cqes_count)
|
||||||
|
__io_flush_post_cqes(ctx);
|
||||||
wq_list_for_each(node, prev, &state->compl_reqs) {
|
wq_list_for_each(node, prev, &state->compl_reqs) {
|
||||||
struct io_kiocb *req = container_of(node, struct io_kiocb,
|
struct io_kiocb *req = container_of(node, struct io_kiocb,
|
||||||
comp_list);
|
comp_list);
|
||||||
@ -1351,10 +1396,12 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
|
|||||||
if (!(req->flags & REQ_F_CQE_SKIP))
|
if (!(req->flags & REQ_F_CQE_SKIP))
|
||||||
__io_fill_cqe_req(ctx, req);
|
__io_fill_cqe_req(ctx, req);
|
||||||
}
|
}
|
||||||
__io_cq_unlock_post(ctx);
|
io_cq_unlock_post_inline(ctx);
|
||||||
|
|
||||||
io_free_batch_list(ctx, state->compl_reqs.first);
|
if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
|
||||||
INIT_WQ_LIST(&state->compl_reqs);
|
io_free_batch_list(ctx, state->compl_reqs.first);
|
||||||
|
INIT_WQ_LIST(&state->compl_reqs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1476,16 +1523,10 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
|
|||||||
|
|
||||||
void io_req_task_complete(struct io_kiocb *req, bool *locked)
|
void io_req_task_complete(struct io_kiocb *req, bool *locked)
|
||||||
{
|
{
|
||||||
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
|
|
||||||
unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
|
|
||||||
|
|
||||||
req->cqe.flags |= io_put_kbuf(req, issue_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*locked)
|
if (*locked)
|
||||||
io_req_complete_defer(req);
|
io_req_complete_defer(req);
|
||||||
else
|
else
|
||||||
io_req_complete_post(req);
|
io_req_complete_post(req, IO_URING_F_UNLOCKED);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1635,6 +1676,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static __cold void io_drain_req(struct io_kiocb *req)
|
static __cold void io_drain_req(struct io_kiocb *req)
|
||||||
|
__must_hold(&ctx->uring_lock)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct io_defer_entry *de;
|
struct io_defer_entry *de;
|
||||||
@ -1655,7 +1697,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
|
|||||||
ret = io_req_prep_async(req);
|
ret = io_req_prep_async(req);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
fail:
|
fail:
|
||||||
io_req_complete_failed(req, ret);
|
io_req_defer_failed(req, ret);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
io_prep_async_link(req);
|
io_prep_async_link(req);
|
||||||
@ -1752,7 +1794,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
|
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
|
||||||
io_req_complete_defer(req);
|
io_req_complete_defer(req);
|
||||||
else
|
else
|
||||||
io_req_complete_post(req);
|
io_req_complete_post(req, issue_flags);
|
||||||
} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
|
} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1768,7 +1810,8 @@ int io_poll_issue(struct io_kiocb *req, bool *locked)
|
|||||||
io_tw_lock(req->ctx, locked);
|
io_tw_lock(req->ctx, locked);
|
||||||
if (unlikely(req->task->flags & PF_EXITING))
|
if (unlikely(req->task->flags & PF_EXITING))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
|
return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
|
||||||
|
IO_URING_F_COMPLETE_DEFER);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
|
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
|
||||||
@ -1787,7 +1830,7 @@ void io_wq_submit_work(struct io_wq_work *work)
|
|||||||
bool needs_poll = false;
|
bool needs_poll = false;
|
||||||
int ret = 0, err = -ECANCELED;
|
int ret = 0, err = -ECANCELED;
|
||||||
|
|
||||||
/* one will be dropped by ->io_free_work() after returning to io-wq */
|
/* one will be dropped by ->io_wq_free_work() after returning to io-wq */
|
||||||
if (!(req->flags & REQ_F_REFCOUNT))
|
if (!(req->flags & REQ_F_REFCOUNT))
|
||||||
__io_req_set_refcount(req, 2);
|
__io_req_set_refcount(req, 2);
|
||||||
else
|
else
|
||||||
@ -1885,7 +1928,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
|
|||||||
struct io_kiocb *linked_timeout;
|
struct io_kiocb *linked_timeout;
|
||||||
|
|
||||||
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
|
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
|
||||||
io_req_complete_failed(req, ret);
|
io_req_defer_failed(req, ret);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1935,14 +1978,14 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
|
|||||||
*/
|
*/
|
||||||
req->flags &= ~REQ_F_HARDLINK;
|
req->flags &= ~REQ_F_HARDLINK;
|
||||||
req->flags |= REQ_F_LINK;
|
req->flags |= REQ_F_LINK;
|
||||||
io_req_complete_failed(req, req->cqe.res);
|
io_req_defer_failed(req, req->cqe.res);
|
||||||
} else if (unlikely(req->ctx->drain_active)) {
|
} else if (unlikely(req->ctx->drain_active)) {
|
||||||
io_drain_req(req);
|
io_drain_req(req);
|
||||||
} else {
|
} else {
|
||||||
int ret = io_req_prep_async(req);
|
int ret = io_req_prep_async(req);
|
||||||
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
io_req_complete_failed(req, ret);
|
io_req_defer_failed(req, ret);
|
||||||
else
|
else
|
||||||
io_queue_iowq(req, NULL);
|
io_queue_iowq(req, NULL);
|
||||||
}
|
}
|
||||||
@ -2670,7 +2713,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
|
|||||||
* lock(&ep->mtx);
|
* lock(&ep->mtx);
|
||||||
*
|
*
|
||||||
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this
|
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this
|
||||||
* pushs them to do the flush.
|
* pushes them to do the flush.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (io_cqring_events(ctx) || io_has_work(ctx))
|
if (io_cqring_events(ctx) || io_has_work(ctx))
|
||||||
@ -2869,7 +2912,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
|
|||||||
while (!list_empty(&list)) {
|
while (!list_empty(&list)) {
|
||||||
de = list_first_entry(&list, struct io_defer_entry, list);
|
de = list_first_entry(&list, struct io_defer_entry, list);
|
||||||
list_del_init(&de->list);
|
list_del_init(&de->list);
|
||||||
io_req_complete_failed(de->req, -ECANCELED);
|
io_req_task_queue_fail(de->req, -ECANCELED);
|
||||||
kfree(de);
|
kfree(de);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -4062,8 +4105,6 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
|
|||||||
|
|
||||||
ctx = f.file->private_data;
|
ctx = f.file->private_data;
|
||||||
|
|
||||||
io_run_task_work_ctx(ctx);
|
|
||||||
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
ret = __io_uring_register(ctx, opcode, arg, nr_args);
|
ret = __io_uring_register(ctx, opcode, arg, nr_args);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/io_uring_types.h>
|
#include <linux/io_uring_types.h>
|
||||||
|
#include <uapi/linux/eventpoll.h>
|
||||||
#include "io-wq.h"
|
#include "io-wq.h"
|
||||||
#include "slist.h"
|
#include "slist.h"
|
||||||
#include "filetable.h"
|
#include "filetable.h"
|
||||||
@ -29,14 +30,11 @@ bool io_req_cqe_overflow(struct io_kiocb *req);
|
|||||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||||
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
|
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
|
||||||
int io_run_local_work(struct io_ring_ctx *ctx);
|
int io_run_local_work(struct io_ring_ctx *ctx);
|
||||||
void io_req_complete_failed(struct io_kiocb *req, s32 res);
|
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||||
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
|
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
|
||||||
void io_req_complete_post(struct io_kiocb *req);
|
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
||||||
void __io_req_complete_post(struct io_kiocb *req);
|
bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
|
||||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
|
bool allow_overflow);
|
||||||
bool allow_overflow);
|
|
||||||
bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
|
|
||||||
bool allow_overflow);
|
|
||||||
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
||||||
|
|
||||||
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
|
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
|
||||||
@ -50,10 +48,9 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
|
|||||||
return req->flags & REQ_F_FIXED_FILE;
|
return req->flags & REQ_F_FIXED_FILE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
|
||||||
bool io_is_uring_fops(struct file *file);
|
bool io_is_uring_fops(struct file *file);
|
||||||
bool io_alloc_async_data(struct io_kiocb *req);
|
bool io_alloc_async_data(struct io_kiocb *req);
|
||||||
void io_req_task_work_add(struct io_kiocb *req);
|
|
||||||
void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
|
|
||||||
void io_req_task_queue(struct io_kiocb *req);
|
void io_req_task_queue(struct io_kiocb *req);
|
||||||
void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
|
void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
|
||||||
void io_req_task_complete(struct io_kiocb *req, bool *locked);
|
void io_req_task_complete(struct io_kiocb *req, bool *locked);
|
||||||
@ -82,6 +79,11 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
|
|||||||
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
||||||
bool cancel_all);
|
bool cancel_all);
|
||||||
|
|
||||||
|
static inline void io_req_task_work_add(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
__io_req_task_work_add(req, true);
|
||||||
|
}
|
||||||
|
|
||||||
#define io_for_each_link(pos, head) \
|
#define io_for_each_link(pos, head) \
|
||||||
for (pos = (head); pos; pos = pos->link)
|
for (pos = (head); pos; pos = pos->link)
|
||||||
|
|
||||||
@ -207,12 +209,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
|||||||
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
|
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* wake_up_all() may seem excessive, but io_wake_function() and
|
* Trigger waitqueue handler on all waiters on our waitqueue. This
|
||||||
* io_should_wake() handle the termination of the loop and only
|
* won't necessarily wake up all the tasks, io_should_wake() will make
|
||||||
* wake as many waiters as we need to.
|
* that decision.
|
||||||
|
*
|
||||||
|
* Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
|
||||||
|
* set in the mask so that if we recurse back into our own poll
|
||||||
|
* waitqueue handlers, we know we have a dependency between eventfd or
|
||||||
|
* epoll and should terminate multishot poll at that point.
|
||||||
*/
|
*/
|
||||||
if (waitqueue_active(&ctx->cq_wait))
|
if (waitqueue_active(&ctx->cq_wait))
|
||||||
wake_up_all(&ctx->cq_wait);
|
__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
|
||||||
|
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
|
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
|
||||||
@ -369,4 +377,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
|
|||||||
ctx->submitter_task == current);
|
ctx->submitter_task == current);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
|
||||||
|
{
|
||||||
|
io_req_set_res(req, res, 0);
|
||||||
|
req->io_task_work.func = io_req_task_complete;
|
||||||
|
io_req_task_work_add(req);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -306,14 +306,11 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
if (!bl->buf_nr_pages)
|
if (!bl->buf_nr_pages)
|
||||||
ret = __io_remove_buffers(ctx, bl, p->nbufs);
|
ret = __io_remove_buffers(ctx, bl, p->nbufs);
|
||||||
}
|
}
|
||||||
|
io_ring_submit_unlock(ctx, issue_flags);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
req_set_fail(req);
|
req_set_fail(req);
|
||||||
|
|
||||||
/* complete before unlock, IOPOLL may need the lock */
|
|
||||||
io_req_set_res(req, ret, 0);
|
io_req_set_res(req, ret, 0);
|
||||||
__io_req_complete(req, issue_flags);
|
return IOU_OK;
|
||||||
io_ring_submit_unlock(ctx, issue_flags);
|
|
||||||
return IOU_ISSUE_SKIP_COMPLETE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||||
@ -458,13 +455,12 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
|
|
||||||
ret = io_add_buffers(ctx, p, bl);
|
ret = io_add_buffers(ctx, p, bl);
|
||||||
err:
|
err:
|
||||||
|
io_ring_submit_unlock(ctx, issue_flags);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
req_set_fail(req);
|
req_set_fail(req);
|
||||||
/* complete before unlock, IOPOLL may need the lock */
|
|
||||||
io_req_set_res(req, ret, 0);
|
io_req_set_res(req, ret, 0);
|
||||||
__io_req_complete(req, issue_flags);
|
return IOU_OK;
|
||||||
io_ring_submit_unlock(ctx, issue_flags);
|
|
||||||
return IOU_ISSUE_SKIP_COMPLETE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
||||||
|
@ -31,7 +31,7 @@ static int io_msg_ring_data(struct io_kiocb *req)
|
|||||||
if (msg->src_fd || msg->dst_fd || msg->flags)
|
if (msg->src_fd || msg->dst_fd || msg->flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
|
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
@ -116,7 +116,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
* completes with -EOVERFLOW, then the sender must ensure that a
|
* completes with -EOVERFLOW, then the sender must ensure that a
|
||||||
* later IORING_OP_MSG_RING delivers the message.
|
* later IORING_OP_MSG_RING delivers the message.
|
||||||
*/
|
*/
|
||||||
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
|
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
|
||||||
ret = -EOVERFLOW;
|
ret = -EOVERFLOW;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
io_double_unlock_ctx(ctx, target_ctx, issue_flags);
|
io_double_unlock_ctx(ctx, target_ctx, issue_flags);
|
||||||
|
@ -125,13 +125,15 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
|
|||||||
struct io_cache_entry *entry;
|
struct io_cache_entry *entry;
|
||||||
struct io_async_msghdr *hdr;
|
struct io_async_msghdr *hdr;
|
||||||
|
|
||||||
if (!(issue_flags & IO_URING_F_UNLOCKED) &&
|
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
|
||||||
(entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
|
entry = io_alloc_cache_get(&ctx->netmsg_cache);
|
||||||
hdr = container_of(entry, struct io_async_msghdr, cache);
|
if (entry) {
|
||||||
hdr->free_iov = NULL;
|
hdr = container_of(entry, struct io_async_msghdr, cache);
|
||||||
req->flags |= REQ_F_ASYNC_DATA;
|
hdr->free_iov = NULL;
|
||||||
req->async_data = hdr;
|
req->flags |= REQ_F_ASYNC_DATA;
|
||||||
return hdr;
|
req->async_data = hdr;
|
||||||
|
return hdr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!io_alloc_async_data(req)) {
|
if (!io_alloc_async_data(req)) {
|
||||||
@ -599,16 +601,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!mshot_finished) {
|
if (!mshot_finished) {
|
||||||
if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
|
if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
||||||
cflags | IORING_CQE_F_MORE, false)) {
|
req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
|
||||||
io_recv_prep_retry(req);
|
io_recv_prep_retry(req);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
/*
|
/* Otherwise stop multishot but use the current result. */
|
||||||
* Otherwise stop multishot but use the current result.
|
|
||||||
* Probably will end up going into overflow, but this means
|
|
||||||
* we cannot trust the ordering anymore
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
io_req_set_res(req, *ret, cflags);
|
io_req_set_res(req, *ret, cflags);
|
||||||
@ -923,6 +921,9 @@ void io_send_zc_cleanup(struct io_kiocb *req)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
|
||||||
|
#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
|
||||||
|
|
||||||
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||||
{
|
{
|
||||||
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
|
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||||
@ -935,10 +936,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||||||
if (req->flags & REQ_F_CQE_SKIP)
|
if (req->flags & REQ_F_CQE_SKIP)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
zc->flags = READ_ONCE(sqe->ioprio);
|
|
||||||
if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
|
|
||||||
IORING_RECVSEND_FIXED_BUF))
|
|
||||||
return -EINVAL;
|
|
||||||
notif = zc->notif = io_alloc_notif(ctx);
|
notif = zc->notif = io_alloc_notif(ctx);
|
||||||
if (!notif)
|
if (!notif)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -946,6 +943,17 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||||||
notif->cqe.res = 0;
|
notif->cqe.res = 0;
|
||||||
notif->cqe.flags = IORING_CQE_F_NOTIF;
|
notif->cqe.flags = IORING_CQE_F_NOTIF;
|
||||||
req->flags |= REQ_F_NEED_CLEANUP;
|
req->flags |= REQ_F_NEED_CLEANUP;
|
||||||
|
|
||||||
|
zc->flags = READ_ONCE(sqe->ioprio);
|
||||||
|
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
|
||||||
|
if (zc->flags & ~IO_ZC_FLAGS_VALID)
|
||||||
|
return -EINVAL;
|
||||||
|
if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
|
||||||
|
io_notif_set_extended(notif);
|
||||||
|
io_notif_to_data(notif)->zc_report = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
|
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
|
||||||
unsigned idx = READ_ONCE(sqe->buf_index);
|
unsigned idx = READ_ONCE(sqe->buf_index);
|
||||||
|
|
||||||
@ -1087,6 +1095,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
return ret;
|
return ret;
|
||||||
msg.sg_from_iter = io_sg_from_iter;
|
msg.sg_from_iter = io_sg_from_iter;
|
||||||
} else {
|
} else {
|
||||||
|
io_notif_set_extended(zc->notif);
|
||||||
ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
|
ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
|
||||||
&msg.msg_iter);
|
&msg.msg_iter);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
@ -1148,6 +1157,8 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
unsigned flags;
|
unsigned flags;
|
||||||
int ret, min_ret = 0;
|
int ret, min_ret = 0;
|
||||||
|
|
||||||
|
io_notif_set_extended(sr->notif);
|
||||||
|
|
||||||
sock = sock_from_file(req->file);
|
sock = sock_from_file(req->file);
|
||||||
if (unlikely(!sock))
|
if (unlikely(!sock))
|
||||||
return -ENOTSOCK;
|
return -ENOTSOCK;
|
||||||
@ -1307,12 +1318,13 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
return IOU_OK;
|
return IOU_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret >= 0 &&
|
if (ret < 0)
|
||||||
io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
|
return ret;
|
||||||
|
if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
||||||
|
req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
io_req_set_res(req, ret, 0);
|
return -ECANCELED;
|
||||||
return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||||
|
@ -9,11 +9,14 @@
|
|||||||
#include "notif.h"
|
#include "notif.h"
|
||||||
#include "rsrc.h"
|
#include "rsrc.h"
|
||||||
|
|
||||||
static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
|
static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
|
||||||
{
|
{
|
||||||
struct io_notif_data *nd = io_notif_to_data(notif);
|
struct io_notif_data *nd = io_notif_to_data(notif);
|
||||||
struct io_ring_ctx *ctx = notif->ctx;
|
struct io_ring_ctx *ctx = notif->ctx;
|
||||||
|
|
||||||
|
if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
|
||||||
|
notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
|
||||||
|
|
||||||
if (nd->account_pages && ctx->user) {
|
if (nd->account_pages && ctx->user) {
|
||||||
__io_unaccount_mem(ctx->user, nd->account_pages);
|
__io_unaccount_mem(ctx->user, nd->account_pages);
|
||||||
nd->account_pages = 0;
|
nd->account_pages = 0;
|
||||||
@ -21,16 +24,41 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
|
|||||||
io_req_task_complete(notif, locked);
|
io_req_task_complete(notif, locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
|
static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg,
|
||||||
struct ubuf_info *uarg,
|
bool success)
|
||||||
bool success)
|
|
||||||
{
|
{
|
||||||
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
|
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
|
||||||
struct io_kiocb *notif = cmd_to_io_kiocb(nd);
|
struct io_kiocb *notif = cmd_to_io_kiocb(nd);
|
||||||
|
|
||||||
if (refcount_dec_and_test(&uarg->refcnt)) {
|
if (refcount_dec_and_test(&uarg->refcnt))
|
||||||
notif->io_task_work.func = __io_notif_complete_tw;
|
|
||||||
io_req_task_work_add(notif);
|
io_req_task_work_add(notif);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
|
||||||
|
bool success)
|
||||||
|
{
|
||||||
|
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
|
||||||
|
|
||||||
|
if (nd->zc_report) {
|
||||||
|
if (success && !nd->zc_used && skb)
|
||||||
|
WRITE_ONCE(nd->zc_used, true);
|
||||||
|
else if (!success && !nd->zc_copied)
|
||||||
|
WRITE_ONCE(nd->zc_copied, true);
|
||||||
|
}
|
||||||
|
io_tx_ubuf_callback(skb, uarg, success);
|
||||||
|
}
|
||||||
|
|
||||||
|
void io_notif_set_extended(struct io_kiocb *notif)
|
||||||
|
{
|
||||||
|
struct io_notif_data *nd = io_notif_to_data(notif);
|
||||||
|
|
||||||
|
if (nd->uarg.callback != io_tx_ubuf_callback_ext) {
|
||||||
|
nd->account_pages = 0;
|
||||||
|
nd->zc_report = false;
|
||||||
|
nd->zc_used = false;
|
||||||
|
nd->zc_copied = false;
|
||||||
|
nd->uarg.callback = io_tx_ubuf_callback_ext;
|
||||||
|
notif->io_task_work.func = io_notif_complete_tw_ext;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,24 +77,11 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
|
|||||||
notif->task = current;
|
notif->task = current;
|
||||||
io_get_task_refs(1);
|
io_get_task_refs(1);
|
||||||
notif->rsrc_node = NULL;
|
notif->rsrc_node = NULL;
|
||||||
io_req_set_rsrc_node(notif, ctx, 0);
|
notif->io_task_work.func = io_req_task_complete;
|
||||||
|
|
||||||
nd = io_notif_to_data(notif);
|
nd = io_notif_to_data(notif);
|
||||||
nd->account_pages = 0;
|
|
||||||
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
|
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
|
||||||
nd->uarg.callback = io_uring_tx_zerocopy_callback;
|
nd->uarg.callback = io_tx_ubuf_callback;
|
||||||
refcount_set(&nd->uarg.refcnt, 1);
|
refcount_set(&nd->uarg.refcnt, 1);
|
||||||
return notif;
|
return notif;
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_notif_flush(struct io_kiocb *notif)
|
|
||||||
__must_hold(&slot->notif->ctx->uring_lock)
|
|
||||||
{
|
|
||||||
struct io_notif_data *nd = io_notif_to_data(notif);
|
|
||||||
|
|
||||||
/* drop slot's master ref */
|
|
||||||
if (refcount_dec_and_test(&nd->uarg.refcnt)) {
|
|
||||||
notif->io_task_work.func = __io_notif_complete_tw;
|
|
||||||
io_req_task_work_add(notif);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -13,16 +13,29 @@ struct io_notif_data {
|
|||||||
struct file *file;
|
struct file *file;
|
||||||
struct ubuf_info uarg;
|
struct ubuf_info uarg;
|
||||||
unsigned long account_pages;
|
unsigned long account_pages;
|
||||||
|
bool zc_report;
|
||||||
|
bool zc_used;
|
||||||
|
bool zc_copied;
|
||||||
};
|
};
|
||||||
|
|
||||||
void io_notif_flush(struct io_kiocb *notif);
|
|
||||||
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
|
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
|
||||||
|
void io_notif_set_extended(struct io_kiocb *notif);
|
||||||
|
|
||||||
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
|
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
|
||||||
{
|
{
|
||||||
return io_kiocb_to_cmd(notif, struct io_notif_data);
|
return io_kiocb_to_cmd(notif, struct io_notif_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void io_notif_flush(struct io_kiocb *notif)
|
||||||
|
__must_hold(¬if->ctx->uring_lock)
|
||||||
|
{
|
||||||
|
struct io_notif_data *nd = io_notif_to_data(notif);
|
||||||
|
|
||||||
|
/* drop slot's master ref */
|
||||||
|
if (refcount_dec_and_test(&nd->uarg.refcnt))
|
||||||
|
io_req_task_work_add(notif);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
|
static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = notif->ctx;
|
struct io_ring_ctx *ctx = notif->ctx;
|
||||||
|
@ -280,16 +280,14 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
|||||||
continue;
|
continue;
|
||||||
if (req->apoll_events & EPOLLONESHOT)
|
if (req->apoll_events & EPOLLONESHOT)
|
||||||
return IOU_POLL_DONE;
|
return IOU_POLL_DONE;
|
||||||
if (io_is_uring_fops(req->file))
|
|
||||||
return IOU_POLL_DONE;
|
|
||||||
|
|
||||||
/* multishot, just fill a CQE and proceed */
|
/* multishot, just fill a CQE and proceed */
|
||||||
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
|
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
|
||||||
__poll_t mask = mangle_poll(req->cqe.res &
|
__poll_t mask = mangle_poll(req->cqe.res &
|
||||||
req->apoll_events);
|
req->apoll_events);
|
||||||
|
|
||||||
if (!io_post_aux_cqe(ctx, req->cqe.user_data,
|
if (!io_aux_cqe(ctx, *locked, req->cqe.user_data,
|
||||||
mask, IORING_CQE_F_MORE, false)) {
|
mask, IORING_CQE_F_MORE, false)) {
|
||||||
io_req_set_res(req, mask, 0);
|
io_req_set_res(req, mask, 0);
|
||||||
return IOU_POLL_REMOVE_POLL_USE_RES;
|
return IOU_POLL_REMOVE_POLL_USE_RES;
|
||||||
}
|
}
|
||||||
@ -345,26 +343,22 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
|
|||||||
if (ret == IOU_POLL_NO_ACTION)
|
if (ret == IOU_POLL_NO_ACTION)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
io_tw_lock(req->ctx, locked);
|
||||||
io_poll_remove_entries(req);
|
io_poll_remove_entries(req);
|
||||||
io_poll_tw_hash_eject(req, locked);
|
io_poll_tw_hash_eject(req, locked);
|
||||||
|
|
||||||
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
|
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
|
||||||
io_req_complete_post(req);
|
io_req_task_complete(req, locked);
|
||||||
else if (ret == IOU_POLL_DONE)
|
else if (ret == IOU_POLL_DONE)
|
||||||
io_req_task_submit(req, locked);
|
io_req_task_submit(req, locked);
|
||||||
else
|
else
|
||||||
io_req_complete_failed(req, ret);
|
io_req_defer_failed(req, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __io_poll_execute(struct io_kiocb *req, int mask)
|
static void __io_poll_execute(struct io_kiocb *req, int mask)
|
||||||
{
|
{
|
||||||
io_req_set_res(req, mask, 0);
|
io_req_set_res(req, mask, 0);
|
||||||
/*
|
|
||||||
* This is useful for poll that is armed on behalf of another
|
|
||||||
* request, and where the wakeup path could be on a different
|
|
||||||
* CPU. We want to avoid pulling in req->apoll->events for that
|
|
||||||
* case.
|
|
||||||
*/
|
|
||||||
if (req->opcode == IORING_OP_POLL_ADD)
|
if (req->opcode == IORING_OP_POLL_ADD)
|
||||||
req->io_task_work.func = io_poll_task_func;
|
req->io_task_work.func = io_poll_task_func;
|
||||||
else
|
else
|
||||||
@ -429,6 +423,14 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (io_poll_get_ownership(req)) {
|
if (io_poll_get_ownership(req)) {
|
||||||
|
/*
|
||||||
|
* If we trigger a multishot poll off our own wakeup path,
|
||||||
|
* disable multishot as there is a circular dependency between
|
||||||
|
* CQ posting and triggering the event.
|
||||||
|
*/
|
||||||
|
if (mask & EPOLL_URING_WAKE)
|
||||||
|
poll->events |= EPOLLONESHOT;
|
||||||
|
|
||||||
/* optional, saves extra locking for removal in tw handler */
|
/* optional, saves extra locking for removal in tw handler */
|
||||||
if (mask && poll->events & EPOLLONESHOT) {
|
if (mask && poll->events & EPOLLONESHOT) {
|
||||||
list_del_init(&poll->wait.entry);
|
list_del_init(&poll->wait.entry);
|
||||||
@ -648,10 +650,13 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
|
|||||||
if (req->flags & REQ_F_POLLED) {
|
if (req->flags & REQ_F_POLLED) {
|
||||||
apoll = req->apoll;
|
apoll = req->apoll;
|
||||||
kfree(apoll->double_poll);
|
kfree(apoll->double_poll);
|
||||||
} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
|
} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
|
||||||
(entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
|
entry = io_alloc_cache_get(&ctx->apoll_cache);
|
||||||
|
if (entry == NULL)
|
||||||
|
goto alloc_apoll;
|
||||||
apoll = container_of(entry, struct async_poll, cache);
|
apoll = container_of(entry, struct async_poll, cache);
|
||||||
} else {
|
} else {
|
||||||
|
alloc_apoll:
|
||||||
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
|
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
|
||||||
if (unlikely(!apoll))
|
if (unlikely(!apoll))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -170,10 +170,10 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
|
|||||||
if (prsrc->tag) {
|
if (prsrc->tag) {
|
||||||
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true);
|
io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
} else {
|
} else {
|
||||||
io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true);
|
io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,6 +321,11 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
|
|||||||
if (atomic_dec_and_test(&data->refs))
|
if (atomic_dec_and_test(&data->refs))
|
||||||
break;
|
break;
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
|
||||||
|
ret = io_run_task_work_sig(ctx);
|
||||||
|
if (ret < 0)
|
||||||
|
goto reinit;
|
||||||
|
|
||||||
flush_delayed_work(&ctx->rsrc_put_work);
|
flush_delayed_work(&ctx->rsrc_put_work);
|
||||||
ret = wait_for_completion_interruptible(&data->done);
|
ret = wait_for_completion_interruptible(&data->done);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -336,12 +341,12 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reinit:
|
||||||
atomic_inc(&data->refs);
|
atomic_inc(&data->refs);
|
||||||
/* wait for all works potentially completing data->done */
|
/* wait for all works potentially completing data->done */
|
||||||
flush_delayed_work(&ctx->rsrc_put_work);
|
flush_delayed_work(&ctx->rsrc_put_work);
|
||||||
reinit_completion(&data->done);
|
reinit_completion(&data->done);
|
||||||
|
|
||||||
ret = io_run_task_work_sig(ctx);
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
} while (ret >= 0);
|
} while (ret >= 0);
|
||||||
data->quiesce = false;
|
data->quiesce = false;
|
||||||
|
@ -286,6 +286,12 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
|
|||||||
static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
|
static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
|
||||||
{
|
{
|
||||||
io_req_io_end(req);
|
io_req_io_end(req);
|
||||||
|
|
||||||
|
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
|
||||||
|
unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
|
||||||
|
|
||||||
|
req->cqe.flags |= io_put_kbuf(req, issue_flags);
|
||||||
|
}
|
||||||
io_req_task_complete(req, locked);
|
io_req_task_complete(req, locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
|
|||||||
atomic_set(&req->ctx->cq_timeouts,
|
atomic_set(&req->ctx->cq_timeouts,
|
||||||
atomic_read(&req->ctx->cq_timeouts) + 1);
|
atomic_read(&req->ctx->cq_timeouts) + 1);
|
||||||
list_del_init(&timeout->list);
|
list_del_init(&timeout->list);
|
||||||
io_req_tw_post_queue(req, status, 0);
|
io_req_queue_tw_complete(req, status);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -159,7 +159,7 @@ void io_disarm_next(struct io_kiocb *req)
|
|||||||
req->flags &= ~REQ_F_ARM_LTIMEOUT;
|
req->flags &= ~REQ_F_ARM_LTIMEOUT;
|
||||||
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
|
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
|
||||||
io_remove_next_linked(req);
|
io_remove_next_linked(req);
|
||||||
io_req_tw_post_queue(link, -ECANCELED, 0);
|
io_req_queue_tw_complete(link, -ECANCELED);
|
||||||
}
|
}
|
||||||
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
|
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
@ -168,7 +168,7 @@ void io_disarm_next(struct io_kiocb *req)
|
|||||||
link = io_disarm_linked_timeout(req);
|
link = io_disarm_linked_timeout(req);
|
||||||
spin_unlock_irq(&ctx->timeout_lock);
|
spin_unlock_irq(&ctx->timeout_lock);
|
||||||
if (link)
|
if (link)
|
||||||
io_req_tw_post_queue(link, -ECANCELED, 0);
|
io_req_queue_tw_complete(link, -ECANCELED);
|
||||||
}
|
}
|
||||||
if (unlikely((req->flags & REQ_F_FAIL) &&
|
if (unlikely((req->flags & REQ_F_FAIL) &&
|
||||||
!(req->flags & REQ_F_HARDLINK)))
|
!(req->flags & REQ_F_HARDLINK)))
|
||||||
@ -282,11 +282,11 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
|
|||||||
ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
|
ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
|
||||||
}
|
}
|
||||||
io_req_set_res(req, ret ?: -ETIME, 0);
|
io_req_set_res(req, ret ?: -ETIME, 0);
|
||||||
io_req_complete_post(req);
|
io_req_task_complete(req, locked);
|
||||||
io_put_req(prev);
|
io_put_req(prev);
|
||||||
} else {
|
} else {
|
||||||
io_req_set_res(req, -ETIME, 0);
|
io_req_set_res(req, -ETIME, 0);
|
||||||
io_req_complete_post(req);
|
io_req_task_complete(req, locked);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
|
|||||||
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
|
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
|
||||||
smp_store_release(&req->iopoll_completed, 1);
|
smp_store_release(&req->iopoll_completed, 1);
|
||||||
else
|
else
|
||||||
__io_req_complete(req, 0);
|
io_req_complete_post(req, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
|
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user