block: Revert "block: Fix potential deadlock while freezing queue and acquiring sysfs_lock"

This reverts commit be26ba9642.

Commit be26ba9642 ("block: Fix potential deadlock while freezing queue and
acquiring sysfs_loc") actually reverts commit 22465bbac5 ("blk-mq: move cpuhp
callback registering out of q->sysfs_lock"), and causes the original resctrl
lockdep warning.

So revert it and we need to fix the issue in another way.

Cc: Nilay Shroff <nilay@linux.ibm.com>
Fixes: be26ba9642 ("block: Fix potential deadlock while freezing queue and acquiring sysfs_loc")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241218101617.3275704-2-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2024-12-18 18:16:14 +08:00 committed by Jens Axboe
parent 51588b1b77
commit 224749be6c
3 changed files with 23 additions and 26 deletions

View File

@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned long i;
lockdep_assert_held(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
return;
goto unlock;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
unlock:
mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
@ -290,10 +292,9 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
unsigned long i;
int ret = 0;
lockdep_assert_held(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
return ret;
goto unlock;
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
@ -301,5 +302,8 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
break;
}
unlock:
mutex_unlock(&q->sysfs_dir_lock);
return ret;
}

View File

@ -4453,8 +4453,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
unsigned long i, j;
/* protect against switching io scheduler */
lockdep_assert_held(&q->sysfs_lock);
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
@ -4487,6 +4486,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
mutex_unlock(&q->sysfs_lock);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
@ -4518,14 +4518,10 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
xa_init(&q->hctx_table);
mutex_lock(&q->sysfs_lock);
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
mutex_unlock(&q->sysfs_lock);
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
@ -4544,7 +4540,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return 0;
err_hctxs:
mutex_unlock(&q->sysfs_lock);
blk_mq_release(q);
err_exit:
q->mq_ops = NULL;
@ -4925,12 +4920,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
return false;
/* q->elevator needs protection from ->sysfs_lock */
lockdep_assert_held(&q->sysfs_lock);
mutex_lock(&q->sysfs_lock);
/* the check has to be done with holding sysfs_lock */
if (!q->elevator) {
kfree(qe);
goto out;
goto unlock;
}
INIT_LIST_HEAD(&qe->node);
@ -4940,7 +4935,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
__elevator_get(qe->type);
list_add(&qe->node, head);
elevator_disable(q);
out:
unlock:
mutex_unlock(&q->sysfs_lock);
return true;
}
@ -4969,9 +4966,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
list_del(&qe->node);
kfree(qe);
mutex_lock(&q->sysfs_lock);
elevator_switch(q, t);
/* drop the reference acquired in blk_mq_elv_switch_none */
elevator_put(t);
mutex_unlock(&q->sysfs_lock);
}
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
@ -4991,11 +4990,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
mutex_lock(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_lock);
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
}
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
@ -5051,11 +5047,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(&head, q);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);
}
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)

View File

@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
if (entry->load_module)
entry->load_module(disk, page, length);
mutex_lock(&q->sysfs_lock);
blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock);
res = entry->store(disk, page, length);
blk_mq_unfreeze_queue(q);
mutex_unlock(&q->sysfs_lock);
blk_mq_unfreeze_queue(q);
return res;
}