io_uring: add io_local_work_pending()

In preparation for adding a new llist of tw to retry due to hitting the
tw limit, add a helper io_local_work_pending(). This function returns
true if there is any local tw pending. For now it only checks
ctx->work_llist.

Signed-off-by: David Wei <dw@davidwei.uk>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/20241120221452.3762588-2-dw@davidwei.uk
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
David Wei 2024-11-20 14:14:51 -08:00 committed by Jens Axboe
parent 2ae6bdb1e1
commit 40cfe55324
2 changed files with 14 additions and 9 deletions

View File

@ -1261,7 +1261,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
int min_events)
{
if (llist_empty(&ctx->work_llist))
if (!io_local_work_pending(ctx))
return false;
if (events < min_events)
return true;
@ -1314,7 +1314,7 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
{
struct io_tw_state ts = {};
if (llist_empty(&ctx->work_llist))
if (!io_local_work_pending(ctx))
return 0;
return __io_run_local_work(ctx, &ts, min_events);
}
@ -2329,7 +2329,7 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
int io_run_task_work_sig(struct io_ring_ctx *ctx)
{
if (!llist_empty(&ctx->work_llist)) {
if (io_local_work_pending(ctx)) {
__set_current_state(TASK_RUNNING);
if (io_run_local_work(ctx, INT_MAX) > 0)
return 0;
@ -2459,7 +2459,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
{
if (unlikely(READ_ONCE(ctx->check_cq)))
return 1;
if (unlikely(!llist_empty(&ctx->work_llist)))
if (unlikely(io_local_work_pending(ctx)))
return 1;
if (unlikely(task_work_pending(current)))
return 1;
@ -2493,7 +2493,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
if (!io_allowed_run_tw(ctx))
return -EEXIST;
if (!llist_empty(&ctx->work_llist))
if (io_local_work_pending(ctx))
io_run_local_work(ctx, min_events);
io_run_task_work();
@ -2564,7 +2564,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
* If we got woken because of task_work being processed, run it
* now rather than let the caller do another wait loop.
*/
if (!llist_empty(&ctx->work_llist))
if (io_local_work_pending(ctx))
io_run_local_work(ctx, nr_wait);
io_run_task_work();
@ -3158,7 +3158,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
io_run_task_work();
io_uring_drop_tctx_refs(current);
xa_for_each(&tctx->xa, index, node) {
if (!llist_empty(&node->ctx->work_llist)) {
if (io_local_work_pending(node->ctx)) {
WARN_ON_ONCE(node->ctx->submitter_task &&
node->ctx->submitter_task != current);
goto end_wait;

View File

@ -347,9 +347,14 @@ static inline int io_run_task_work(void)
return ret;
}
static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
{
return !llist_empty(&ctx->work_llist);
}
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
return task_work_pending(current) || !llist_empty(&ctx->work_llist);
return task_work_pending(current) || io_local_work_pending(ctx);
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
@ -484,6 +489,6 @@ enum {
static inline bool io_has_work(struct io_ring_ctx *ctx)
{
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
!llist_empty(&ctx->work_llist);
io_local_work_pending(ctx);
}
#endif