mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 07:10:27 +00:00
io_uring: do ctx sqd ejection in a clear context
WARNING: CPU: 1 PID: 27907 at fs/io_uring.c:7147 io_sq_thread_park+0xb5/0xd0 fs/io_uring.c:7147 CPU: 1 PID: 27907 Comm: iou-sqp-27905 Not tainted 5.12.0-rc4-syzkaller #0 RIP: 0010:io_sq_thread_park+0xb5/0xd0 fs/io_uring.c:7147 Call Trace: io_ring_ctx_wait_and_kill+0x214/0x700 fs/io_uring.c:8619 io_uring_release+0x3e/0x50 fs/io_uring.c:8646 __fput+0x288/0x920 fs/file_table.c:280 task_work_run+0xdd/0x1a0 kernel/task_work.c:140 io_run_task_work fs/io_uring.c:2238 [inline] io_run_task_work fs/io_uring.c:2228 [inline] io_uring_try_cancel_requests+0x8ec/0xc60 fs/io_uring.c:8770 io_uring_cancel_sqpoll+0x1cf/0x290 fs/io_uring.c:8974 io_sqpoll_cancel_cb+0x87/0xb0 fs/io_uring.c:8907 io_run_task_work_head+0x58/0xb0 fs/io_uring.c:1961 io_sq_thread+0x3e2/0x18d0 fs/io_uring.c:6763 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 May happen that last ctx ref is killed in io_uring_cancel_sqpoll(), so fput callback (i.e. io_uring_release()) is enqueued through task_work, and run by same cancellation. As it's deeply nested we can't do parking or taking sqd->lock there, because its state is unclear. So avoid ctx ejection from sqd list from io_ring_ctx_wait_and_kill() and do it in a clear context in io_ring_exit_work(). Fixes: f6d54255f423 ("io_uring: halt SQO submission on ctx exit") Reported-by: syzbot+e3a3f84f5cecf61f0583@syzkaller.appspotmail.com Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e90df88b8ff2cabb14a7534601d35d62ab4cb8c7.1616496707.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d81269fecb
commit
a185f1db59
@ -8564,6 +8564,14 @@ static void io_ring_exit_work(struct work_struct *work)
|
||||
struct io_tctx_node *node;
|
||||
int ret;
|
||||
|
||||
/* prevent SQPOLL from submitting new requests */
|
||||
if (ctx->sq_data) {
|
||||
io_sq_thread_park(ctx->sq_data);
|
||||
list_del_init(&ctx->sqd_list);
|
||||
io_sqd_update_thread_idle(ctx->sq_data);
|
||||
io_sq_thread_unpark(ctx->sq_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're doing polled IO and end up having requests being
|
||||
* submitted async (out-of-line), then completions can come in while
|
||||
@ -8615,14 +8623,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
|
||||
io_unregister_personality(ctx, index);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
|
||||
/* prevent SQPOLL from submitting new requests */
|
||||
if (ctx->sq_data) {
|
||||
io_sq_thread_park(ctx->sq_data);
|
||||
list_del_init(&ctx->sqd_list);
|
||||
io_sqd_update_thread_idle(ctx->sq_data);
|
||||
io_sq_thread_unpark(ctx->sq_data);
|
||||
}
|
||||
|
||||
io_kill_timeouts(ctx, NULL, NULL);
|
||||
io_poll_remove_all(ctx, NULL, NULL);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user