block: freeze the queue in queue_attr_store

queue_attr_store updates attributes used to control generating I/O, and
can cause malformed bios if changed with I/O in flight.  Freeze the queue
in common code instead of adding it to almost every attribute.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20240617060532.127975-12-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-06-17 08:04:38 +02:00 committed by Jens Axboe
parent 6b377787a3
commit af28141498
2 changed files with 5 additions and 9 deletions

View File

@ -4631,13 +4631,15 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
int ret; int ret;
unsigned long i; unsigned long i;
if (WARN_ON_ONCE(!q->mq_freeze_depth))
return -EINVAL;
if (!set) if (!set)
return -EINVAL; return -EINVAL;
if (q->nr_requests == nr) if (q->nr_requests == nr)
return 0; return 0;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
ret = 0; ret = 0;
@ -4671,7 +4673,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
} }
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret; return ret;
} }

View File

@ -189,12 +189,9 @@ static ssize_t queue_discard_max_store(struct request_queue *q,
if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
return -EINVAL; return -EINVAL;
blk_mq_freeze_queue(q);
lim = queue_limits_start_update(q); lim = queue_limits_start_update(q);
lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
err = queue_limits_commit_update(q, &lim); err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
if (err) if (err)
return err; return err;
return ret; return ret;
@ -241,11 +238,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0) if (ret < 0)
return ret; return ret;
blk_mq_freeze_queue(q);
lim = queue_limits_start_update(q); lim = queue_limits_start_update(q);
lim.max_user_sectors = max_sectors_kb << 1; lim.max_user_sectors = max_sectors_kb << 1;
err = queue_limits_commit_update(q, &lim); err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
if (err) if (err)
return err; return err;
return ret; return ret;
@ -585,13 +580,11 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
* ends up either enabling or disabling wbt completely. We can't * ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens. * have IO inflight if that happens.
*/ */
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val); wbt_set_min_lat(q, val);
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return count; return count;
} }
@ -722,9 +715,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store) if (!entry->store)
return -EIO; return -EIO;
blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
res = entry->store(q, page, length); res = entry->store(q, page, length);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
blk_mq_unfreeze_queue(q);
return res; return res;
} }