mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 14:05:39 +00:00
io_uring: optimise out unlikely link queue
__io_queue_sqe() tries to issue as much requests of a link as it can, and uses io_put_req_find_next() to extract a next one, targeting inline completed requests. As now __io_queue_sqe() is always used together with struct io_comp_state, it leaves next propagation only a small window and only for async reqs, that doesn't justify its existence. Remove it, make __io_queue_sqe() to issue only a head request. It simplifies the code and will allow other optimisations. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bd75904590
commit
d3d7298d05
@ -6563,26 +6563,20 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
|
||||
|
||||
static void __io_queue_sqe(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *linked_timeout;
|
||||
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
|
||||
const struct cred *old_creds = NULL;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
linked_timeout = io_prep_linked_timeout(req);
|
||||
|
||||
if ((req->flags & REQ_F_WORK_INITIALIZED) &&
|
||||
(req->work.flags & IO_WQ_WORK_CREDS) &&
|
||||
req->work.identity->creds != current_cred()) {
|
||||
if (old_creds)
|
||||
revert_creds(old_creds);
|
||||
if (old_creds == req->work.identity->creds)
|
||||
old_creds = NULL; /* restored original creds */
|
||||
else
|
||||
old_creds = override_creds(req->work.identity->creds);
|
||||
}
|
||||
req->work.identity->creds != current_cred())
|
||||
old_creds = override_creds(req->work.identity->creds);
|
||||
|
||||
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
|
||||
|
||||
if (old_creds)
|
||||
revert_creds(old_creds);
|
||||
|
||||
/*
|
||||
* We async punt it if the file wasn't marked NOWAIT, or if the file
|
||||
* doesn't support non-blocking read/write attempts
|
||||
@ -6595,9 +6589,6 @@ static void __io_queue_sqe(struct io_kiocb *req)
|
||||
*/
|
||||
io_queue_async_work(req);
|
||||
}
|
||||
|
||||
if (linked_timeout)
|
||||
io_queue_linked_timeout(linked_timeout);
|
||||
} else if (likely(!ret)) {
|
||||
/* drop submission reference */
|
||||
if (req->flags & REQ_F_COMPLETE_INLINE) {
|
||||
@ -6605,31 +6596,18 @@ static void __io_queue_sqe(struct io_kiocb *req)
|
||||
struct io_comp_state *cs = &ctx->submit_state.comp;
|
||||
|
||||
cs->reqs[cs->nr++] = req;
|
||||
if (cs->nr == IO_COMPL_BATCH)
|
||||
if (cs->nr == ARRAY_SIZE(cs->reqs))
|
||||
io_submit_flush_completions(cs, ctx);
|
||||
req = NULL;
|
||||
} else {
|
||||
req = io_put_req_find_next(req);
|
||||
}
|
||||
|
||||
if (linked_timeout)
|
||||
io_queue_linked_timeout(linked_timeout);
|
||||
|
||||
if (req) {
|
||||
if (!(req->flags & REQ_F_FORCE_ASYNC))
|
||||
goto again;
|
||||
io_queue_async_work(req);
|
||||
io_put_req(req);
|
||||
}
|
||||
} else {
|
||||
/* un-prep timeout, so it'll be killed as any other linked */
|
||||
req->flags &= ~REQ_F_LINK_TIMEOUT;
|
||||
req_set_fail_links(req);
|
||||
io_put_req(req);
|
||||
io_req_complete(req, ret);
|
||||
}
|
||||
|
||||
if (old_creds)
|
||||
revert_creds(old_creds);
|
||||
if (linked_timeout)
|
||||
io_queue_linked_timeout(linked_timeout);
|
||||
}
|
||||
|
||||
static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
|
Loading…
Reference in New Issue
Block a user