block: track disk DEAD state automatically for modeling queue freeze lockdep

Now we only verify the outmost freeze & unfreeze in current context in case
that !q->mq_freeze_depth, so it is reliable to save disk DEAD state when
we want to lock the freeze queue since the state is one per-task variable
now.

Doing this way can kill lots of false positive when freeze queue is
called before adding disk[1].

[1] https://lore.kernel.org/linux-block/6741f6b2.050a0220.1cc393.0017.GAE@google.com/

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241127135133.3952153-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2024-11-27 21:51:28 +08:00 committed by Jens Axboe
parent e19422d1e4
commit b06cdcda0c
5 changed files with 24 additions and 12 deletions

View File

@ -131,6 +131,9 @@ static bool blk_freeze_set_owner(struct request_queue *q,
if (!q->mq_freeze_depth) { if (!q->mq_freeze_depth) {
q->mq_freeze_owner = owner; q->mq_freeze_owner = owner;
q->mq_freeze_owner_depth = 1; q->mq_freeze_owner_depth = 1;
q->mq_freeze_disk_dead = !q->disk ||
test_bit(GD_DEAD, &q->disk->state) ||
!blk_queue_registered(q);
return true; return true;
} }
@ -187,7 +190,7 @@ bool __blk_freeze_queue_start(struct request_queue *q,
void blk_freeze_queue_start(struct request_queue *q) void blk_freeze_queue_start(struct request_queue *q)
{ {
if (__blk_freeze_queue_start(q, current)) if (__blk_freeze_queue_start(q, current))
blk_freeze_acquire_lock(q, false, false); blk_freeze_acquire_lock(q, false);
} }
EXPORT_SYMBOL_GPL(blk_freeze_queue_start); EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@ -235,7 +238,7 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
void blk_mq_unfreeze_queue(struct request_queue *q) void blk_mq_unfreeze_queue(struct request_queue *q)
{ {
if (__blk_mq_unfreeze_queue(q, false)) if (__blk_mq_unfreeze_queue(q, false))
blk_unfreeze_release_lock(q, false, false); blk_unfreeze_release_lock(q, false);
} }
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);

View File

@ -720,22 +720,29 @@ void blk_integrity_verify(struct bio *bio);
void blk_integrity_prepare(struct request *rq); void blk_integrity_prepare(struct request *rq);
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes); void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
static inline void blk_freeze_acquire_lock(struct request_queue *q, bool #ifdef CONFIG_LOCKDEP
disk_dead, bool queue_dying) static inline void blk_freeze_acquire_lock(struct request_queue *q, bool queue_dying)
{ {
if (!disk_dead) if (!q->mq_freeze_disk_dead)
rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_); rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
if (!queue_dying) if (!queue_dying)
rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_); rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
} }
static inline void blk_unfreeze_release_lock(struct request_queue *q, bool static inline void blk_unfreeze_release_lock(struct request_queue *q, bool queue_dying)
disk_dead, bool queue_dying)
{ {
if (!queue_dying) if (!queue_dying)
rwsem_release(&q->q_lockdep_map, _RET_IP_); rwsem_release(&q->q_lockdep_map, _RET_IP_);
if (!disk_dead) if (!q->mq_freeze_disk_dead)
rwsem_release(&q->io_lockdep_map, _RET_IP_); rwsem_release(&q->io_lockdep_map, _RET_IP_);
} }
#else
static inline void blk_freeze_acquire_lock(struct request_queue *q, bool queue_dying)
{
}
static inline void blk_unfreeze_release_lock(struct request_queue *q, bool queue_dying)
{
}
#endif
#endif /* BLK_INTERNAL_H */ #endif /* BLK_INTERNAL_H */

View File

@ -602,14 +602,14 @@ void elevator_init_mq(struct request_queue *q)
* Disk isn't added yet, so verifying queue lock only manually. * Disk isn't added yet, so verifying queue lock only manually.
*/ */
blk_freeze_queue_start_non_owner(q); blk_freeze_queue_start_non_owner(q);
blk_freeze_acquire_lock(q, true, false); blk_freeze_acquire_lock(q, false);
blk_mq_freeze_queue_wait(q); blk_mq_freeze_queue_wait(q);
blk_mq_cancel_work_sync(q); blk_mq_cancel_work_sync(q);
err = blk_mq_init_sched(q, e); err = blk_mq_init_sched(q, e);
blk_unfreeze_release_lock(q, true, false); blk_unfreeze_release_lock(q, false);
blk_mq_unfreeze_queue_non_owner(q); blk_mq_unfreeze_queue_non_owner(q);
if (err) { if (err) {

View File

@ -692,7 +692,7 @@ void del_gendisk(struct gendisk *disk)
start_drain = __blk_mark_disk_dead(disk); start_drain = __blk_mark_disk_dead(disk);
queue_dying = blk_queue_dying(q); queue_dying = blk_queue_dying(q);
if (start_drain) if (start_drain)
blk_freeze_acquire_lock(q, true, queue_dying); blk_freeze_acquire_lock(q, queue_dying);
xa_for_each_start(&disk->part_tbl, idx, part, 1) xa_for_each_start(&disk->part_tbl, idx, part, 1)
drop_partition(part); drop_partition(part);
mutex_unlock(&disk->open_mutex); mutex_unlock(&disk->open_mutex);
@ -748,7 +748,7 @@ void del_gendisk(struct gendisk *disk)
blk_mq_exit_queue(q); blk_mq_exit_queue(q);
if (start_drain) if (start_drain)
blk_unfreeze_release_lock(q, true, queue_dying); blk_unfreeze_release_lock(q, queue_dying);
} }
EXPORT_SYMBOL(del_gendisk); EXPORT_SYMBOL(del_gendisk);

View File

@ -581,6 +581,8 @@ struct request_queue {
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
struct task_struct *mq_freeze_owner; struct task_struct *mq_freeze_owner;
int mq_freeze_owner_depth; int mq_freeze_owner_depth;
/* Records disk state in current context, used in unfreeze queue */
bool mq_freeze_disk_dead;
#endif #endif
wait_queue_head_t mq_freeze_wq; wait_queue_head_t mq_freeze_wq;
/* /*