block: track queue dying state automatically for modeling queue freeze lockdep

Now we only verify the outmost freeze & unfreeze in current context in case
that !q->mq_freeze_depth, so it is reliable to save queue lying state when
we want to lock the freeze queue since the state is one per-task variable
now.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241127135133.3952153-5-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2024-11-27 21:51:30 +08:00 committed by Jens Axboe
parent a97e3f357d
commit 62c5f7dfce
4 changed files with 17 additions and 13 deletions

View File

@ -134,6 +134,7 @@ static bool blk_freeze_set_owner(struct request_queue *q,
q->mq_freeze_disk_dead = !q->disk ||
test_bit(GD_DEAD, &q->disk->state) ||
!blk_queue_registered(q);
q->mq_freeze_queue_dying = blk_queue_dying(q);
return true;
}
@ -190,7 +191,7 @@ bool __blk_freeze_queue_start(struct request_queue *q,
void blk_freeze_queue_start(struct request_queue *q)
{
if (__blk_freeze_queue_start(q, current))
blk_freeze_acquire_lock(q, false);
blk_freeze_acquire_lock(q);
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@ -238,7 +239,7 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
void blk_mq_unfreeze_queue(struct request_queue *q)
{
if (__blk_mq_unfreeze_queue(q, false))
blk_unfreeze_release_lock(q, false);
blk_unfreeze_release_lock(q);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);

View File

@ -721,26 +721,26 @@ void blk_integrity_prepare(struct request *rq);
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
#ifdef CONFIG_LOCKDEP
static inline void blk_freeze_acquire_lock(struct request_queue *q, bool queue_dying)
static inline void blk_freeze_acquire_lock(struct request_queue *q)
{
if (!q->mq_freeze_disk_dead)
rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
if (!queue_dying)
if (!q->mq_freeze_queue_dying)
rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
}
static inline void blk_unfreeze_release_lock(struct request_queue *q, bool queue_dying)
static inline void blk_unfreeze_release_lock(struct request_queue *q)
{
if (!queue_dying)
if (!q->mq_freeze_queue_dying)
rwsem_release(&q->q_lockdep_map, _RET_IP_);
if (!q->mq_freeze_disk_dead)
rwsem_release(&q->io_lockdep_map, _RET_IP_);
}
#else
static inline void blk_freeze_acquire_lock(struct request_queue *q, bool queue_dying)
static inline void blk_freeze_acquire_lock(struct request_queue *q)
{
}
static inline void blk_unfreeze_release_lock(struct request_queue *q, bool queue_dying)
static inline void blk_unfreeze_release_lock(struct request_queue *q)
{
}
#endif

View File

@ -661,7 +661,7 @@ void del_gendisk(struct gendisk *disk)
struct request_queue *q = disk->queue;
struct block_device *part;
unsigned long idx;
bool start_drain, queue_dying;
bool start_drain;
might_sleep();
@ -690,9 +690,8 @@ void del_gendisk(struct gendisk *disk)
*/
mutex_lock(&disk->open_mutex);
start_drain = __blk_mark_disk_dead(disk);
queue_dying = blk_queue_dying(q);
if (start_drain)
blk_freeze_acquire_lock(q, queue_dying);
blk_freeze_acquire_lock(q);
xa_for_each_start(&disk->part_tbl, idx, part, 1)
drop_partition(part);
mutex_unlock(&disk->open_mutex);
@ -748,7 +747,7 @@ void del_gendisk(struct gendisk *disk)
blk_mq_exit_queue(q);
if (start_drain)
blk_unfreeze_release_lock(q, queue_dying);
blk_unfreeze_release_lock(q);
}
EXPORT_SYMBOL(del_gendisk);

View File

@ -581,8 +581,12 @@ struct request_queue {
#ifdef CONFIG_LOCKDEP
struct task_struct *mq_freeze_owner;
int mq_freeze_owner_depth;
/* Records disk state in current context, used in unfreeze queue */
/*
* Records disk & queue state in current context, used in unfreeze
* queue
*/
bool mq_freeze_disk_dead;
bool mq_freeze_queue_dying;
#endif
wait_queue_head_t mq_freeze_wq;
/*