mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
null_blk: fix poll request timeout handling
When doing io_uring benchmark on /dev/nullb0, it's easy to crash the
kernel if poll requests timeout triggered, as reported by David. [1]
BUG: kernel NULL pointer dereference, address: 0000000000000008
Workqueue: kblockd blk_mq_timeout_work
RIP: 0010:null_timeout_rq+0x4e/0x91
Call Trace:
? null_timeout_rq+0x4e/0x91
blk_mq_handle_expired+0x31/0x4b
bt_iter+0x68/0x84
? bt_tags_iter+0x81/0x81
__sbitmap_for_each_set.constprop.0+0xb0/0xf2
? __blk_mq_complete_request_remote+0xf/0xf
bt_for_each+0x46/0x64
? __blk_mq_complete_request_remote+0xf/0xf
? percpu_ref_get_many+0xc/0x2a
blk_mq_queue_tag_busy_iter+0x14d/0x18e
blk_mq_timeout_work+0x95/0x127
process_one_work+0x185/0x263
worker_thread+0x1b5/0x227
This is indeed a race problem between null_timeout_rq() and null_poll().
null_poll() null_timeout_rq()
spin_lock(&nq->poll_lock)
list_splice_init(&nq->poll_list, &list)
spin_unlock(&nq->poll_lock)
while (!list_empty(&list))
req = list_first_entry()
list_del_init()
...
blk_mq_add_to_batch()
// req->rq_next = NULL
spin_lock(&nq->poll_lock)
// rq->queuelist->next == NULL
list_del_init(&rq->queuelist)
spin_unlock(&nq->poll_lock)
Fix these problems by setting requests state to MQ_RQ_COMPLETE under
nq->poll_lock protection, in which null_timeout_rq() can safely detect
this race and early return.
Note this patch just fix the kernel panic when request timeout happen.
[1] https://lore.kernel.org/all/3893581.1691785261@warthog.procyon.org.uk/
Fixes: 0a593fbbc2
("null_blk: poll queue support")
Reported-by: David Howells <dhowells@redhat.com>
Tested-by: David Howells <dhowells@redhat.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Link: https://lore.kernel.org/r/20230901120306.170520-2-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f7cf224246
commit
5a26e45edb
@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||
struct nullb_queue *nq = hctx->driver_data;
|
||||
LIST_HEAD(list);
|
||||
int nr = 0;
|
||||
struct request *rq;
|
||||
|
||||
spin_lock(&nq->poll_lock);
|
||||
list_splice_init(&nq->poll_list, &list);
|
||||
list_for_each_entry(rq, &list, queuelist)
|
||||
blk_mq_set_request_complete(rq);
|
||||
spin_unlock(&nq->poll_lock);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
pr_info("rq %p timed out\n", rq);
|
||||
|
||||
if (hctx->type == HCTX_TYPE_POLL) {
|
||||
struct nullb_queue *nq = hctx->driver_data;
|
||||
|
||||
spin_lock(&nq->poll_lock);
|
||||
/* The request may have completed meanwhile. */
|
||||
if (blk_mq_request_completed(rq)) {
|
||||
spin_unlock(&nq->poll_lock);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
list_del_init(&rq->queuelist);
|
||||
spin_unlock(&nq->poll_lock);
|
||||
}
|
||||
|
||||
pr_info("rq %p timed out\n", rq);
|
||||
|
||||
/*
|
||||
* If the device is marked as blocking (i.e. memory backed or zoned
|
||||
* device), the submission path may be blocked waiting for resources
|
||||
|
Loading…
Reference in New Issue
Block a user