io_uring: defer all io_req_complete_failed

All failures happen under lock now, and can be deferred. To be consistent
when the failure has happened after some multishot cqe has been
deferred (and keep ordering), always defer failures.

To make this obvious at the caller (and to help prevent a future bug)
rename io_req_complete_failed to io_req_defer_failed.

Signed-off-by: Dylan Yudaken <dylany@meta.com>
Link: https://lore.kernel.org/r/20221124093559.3780686-4-dylany@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Dylan Yudaken 2022-11-24 01:35:53 -08:00 committed by Jens Axboe
parent c06c6c5d27
commit 973fc83f3a
3 changed files with 10 additions and 11 deletions

View File

@ -864,7 +864,7 @@ void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
}
}
void io_req_complete_failed(struct io_kiocb *req, s32 res)
void io_req_defer_failed(struct io_kiocb *req, s32 res)
__must_hold(&ctx->uring_lock)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
@ -875,7 +875,7 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res)
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
if (def->fail)
def->fail(req);
io_req_complete_post(req, 0);
io_req_complete_defer(req);
}
/*
@ -1231,9 +1231,8 @@ int io_run_local_work(struct io_ring_ctx *ctx)
static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
{
/* not needed for normal modes, but SQPOLL depends on it */
io_tw_lock(req->ctx, locked);
io_req_complete_failed(req, req->cqe.res);
io_req_defer_failed(req, req->cqe.res);
}
void io_req_task_submit(struct io_kiocb *req, bool *locked)
@ -1243,7 +1242,7 @@ void io_req_task_submit(struct io_kiocb *req, bool *locked)
if (likely(!(req->task->flags & PF_EXITING)))
io_queue_sqe(req);
else
io_req_complete_failed(req, -EFAULT);
io_req_defer_failed(req, -EFAULT);
}
void io_req_task_queue_fail(struct io_kiocb *req, int ret)
@ -1630,7 +1629,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
ret = io_req_prep_async(req);
if (ret) {
fail:
io_req_complete_failed(req, ret);
io_req_defer_failed(req, ret);
return;
}
io_prep_async_link(req);
@ -1860,7 +1859,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
struct io_kiocb *linked_timeout;
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
io_req_complete_failed(req, ret);
io_req_defer_failed(req, ret);
return;
}
@ -1910,14 +1909,14 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
*/
req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK;
io_req_complete_failed(req, req->cqe.res);
io_req_defer_failed(req, req->cqe.res);
} else if (unlikely(req->ctx->drain_active)) {
io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
if (unlikely(ret))
io_req_complete_failed(req, ret);
io_req_defer_failed(req, ret);
else
io_queue_iowq(req, NULL);
}

View File

@ -30,7 +30,7 @@ bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
bool allow_overflow);

View File

@ -317,7 +317,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
else if (ret == IOU_POLL_DONE)
io_req_task_submit(req, locked);
else
io_req_complete_failed(req, ret);
io_req_defer_failed(req, ret);
}
static void __io_poll_execute(struct io_kiocb *req, int mask)