mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 21:35:07 +00:00
blk-ioc: fix recursive spin_lock/unlock_irq() in ioc_clear_queue()
Recursive spin_lock/unlock_irq() is not safe, because spin_unlock_irq() will enable irq unconditionally: spin_lock_irq queue_lock -> disable irq spin_lock_irq ioc->lock spin_unlock_irq ioc->lock -> enable irq /* * AA dead lock will be triggered if current context is preempted by irq, * and irq try to hold queue_lock again. */ spin_unlock_irq queue_lock Fix this problem by using spin_lock/unlock() directly for 'ioc->lock'. Fixes: 5a0ac57c48aa ("blk-ioc: protect ioc_destroy_icq() by 'queue_lock'") Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20230606011438.3743440-1-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f12bc113ce
commit
a7cfa0af0c
@ -179,9 +179,9 @@ void ioc_clear_queue(struct request_queue *q)
|
||||
* Other context won't hold ioc lock to wait for queue_lock, see
|
||||
* details in ioc_release_fn().
|
||||
*/
|
||||
spin_lock_irq(&icq->ioc->lock);
|
||||
spin_lock(&icq->ioc->lock);
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock_irq(&icq->ioc->lock);
|
||||
spin_unlock(&icq->ioc->lock);
|
||||
}
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user