From 71a7e2f5b6670c5ea201210ac8c44158b504de49 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Mar 2024 22:00:28 +0000 Subject: [PATCH] io_uring/rw: avoid punting to io-wq directly Commit 6e6b8c62120a22acd8cb759304e4cd2e3215d488 upstream. kiocb_done() should care to specifically redirecting requests to io-wq. Remove the hopping to tw to then queue an io-wq, return -EAGAIN and let the core code io_uring handle offloading. Signed-off-by: Pavel Begunkov Tested-by: Ming Lei Link: https://lore.kernel.org/r/413564e550fe23744a970e1783dfa566291b0e6f.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe (cherry picked from commit 6e6b8c62120a22acd8cb759304e4cd2e3215d488) Signed-off-by: Greg Kroah-Hartman --- io_uring/io_uring.c | 6 +++--- io_uring/io_uring.h | 1 - io_uring/rw.c | 8 +------- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 10070cd867b4..9b58ba4616d4 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -434,7 +434,7 @@ static void io_prep_async_link(struct io_kiocb *req) } } -void io_queue_iowq(struct io_kiocb *req, bool *dont_use) +static void io_queue_iowq(struct io_kiocb *req) { struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->task->io_uring; @@ -1913,7 +1913,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) break; case IO_APOLL_ABORTED: io_kbuf_recycle(req, 0); - io_queue_iowq(req, NULL); + io_queue_iowq(req); break; case IO_APOLL_OK: break; @@ -1962,7 +1962,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) if (unlikely(req->ctx->drain_active)) io_drain_req(req); else - io_queue_iowq(req, NULL); + io_queue_iowq(req); } } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 3b87f5421eb6..a1f679b8199e 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -54,7 +54,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req) void __io_req_task_work_add(struct io_kiocb *req, bool allow_local); bool io_alloc_async_data(struct io_kiocb *req); void io_req_task_queue(struct io_kiocb *req); -void io_queue_iowq(struct io_kiocb *req, bool *dont_use); void io_req_task_complete(struct io_kiocb *req, bool *locked); void io_req_task_queue_fail(struct io_kiocb *req, int ret); void io_req_task_submit(struct io_kiocb *req, bool *locked); diff --git a/io_uring/rw.c b/io_uring/rw.c index b32395d872c6..692663bd864f 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -167,12 +167,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) return NULL; } -static void io_req_task_queue_reissue(struct io_kiocb *req) -{ - req->io_task_work.func = io_queue_iowq; - io_req_task_work_add(req); -} - #ifdef CONFIG_BLOCK static bool io_resubmit_prep(struct io_kiocb *req) { @@ -341,7 +335,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, if (req->flags & REQ_F_REISSUE) { req->flags &= ~REQ_F_REISSUE; if (io_resubmit_prep(req)) - io_req_task_queue_reissue(req); + return -EAGAIN; else io_req_task_queue_fail(req, final_ret); }