mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
nvme: core: don't hold rcu read lock in nvme_ns_chr_uring_cmd_iopoll
Now nvme_ns_chr_uring_cmd_iopoll() has switched to request based io
polling, and the associated NS is guaranteed to be live in case of
io polling, so request is guaranteed to be valid because blk-mq uses
pre-allocated request pool.
Remove the rcu read lock in nvme_ns_chr_uring_cmd_iopoll(), which
isn't needed any more after switching to request based io polling.
Fix "BUG: sleeping function called from invalid context" because
set_page_dirty_lock() from blk_rq_unmap_user() may sleep.
Fixes: 585079b6e4
("nvme: wire up async polling for io passthrough commands")
Reported-by: Guangwu Zhang <guazhang@redhat.com>
Cc: Kanchan Joshi <joshi.k@samsung.com>
Cc: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Guangwu Zhang <guazhang@redhat.com>
Link: https://lore.kernel.org/r/20230809020440.174682-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f099a108ca
commit
a7a7dabb5d
@ -786,11 +786,9 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
|
||||
if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
req = READ_ONCE(ioucmd->cookie);
|
||||
if (req && blk_rq_is_poll(req))
|
||||
ret = blk_rq_poll(req, iob, poll_flags);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
|
Loading…
Reference in New Issue
Block a user