mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
dm: always manage discard support in terms of max_hw_discard_sectors
Commit4f563a6473
("block: add a max_user_discard_sectors queue limit") changed block core to set max_discard_sectors to: min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors) Since commit1c0e720228
("dm: use queue_limits_set") it was reported dm-thinp was failing in a few fstests (generic/347 and generic/405) with the first WARN_ON_ONCE in dm_cell_key_has_valid_range() being reported, e.g.: WARNING: CPU: 1 PID: 30 at drivers/md/dm-bio-prison-v1.c:128 dm_cell_key_has_valid_range+0x3d/0x50 blk_set_stacking_limits() sets max_user_discard_sectors to UINT_MAX, so given how block core now sets max_discard_sectors (detailed above) it follows that blk_stack_limits() stacks up the underlying device's max_hw_discard_sectors and max_discard_sectors is set to match it. If max_hw_discard_sectors exceeds dm's BIO_PRISON_MAX_RANGE, then dm_cell_key_has_valid_range() will trigger the warning with: WARN_ON_ONCE(key->block_end - key->block_begin > BIO_PRISON_MAX_RANGE) Aside from this warning, the discard will fail. Fix this and other DM issues by governing discard support in terms of max_hw_discard_sectors instead of max_discard_sectors. Reported-by: Theodore Ts'o <tytso@mit.edu> Fixes:1c0e720228
("dm: use queue_limits_set") Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
parent
69381cf88a
commit
825d8bbd2f
@ -3394,8 +3394,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
||||
|
||||
if (!cache->features.discard_passdown) {
|
||||
/* No passdown is done so setting own virtual limits */
|
||||
limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
|
||||
cache->origin_sectors);
|
||||
limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
|
||||
cache->origin_sectors);
|
||||
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
|
||||
return;
|
||||
}
|
||||
@ -3404,7 +3404,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
||||
* cache_iterate_devices() is stacking both origin and fast device limits
|
||||
* but discards aren't passed to fast device, so inherit origin's limits.
|
||||
*/
|
||||
limits->max_discard_sectors = origin_limits->max_discard_sectors;
|
||||
limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
|
||||
limits->discard_granularity = origin_limits->discard_granularity;
|
||||
limits->discard_alignment = origin_limits->discard_alignment;
|
||||
|
@ -2050,7 +2050,8 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
|
||||
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
|
||||
/* No passdown is done so we set our own virtual limits */
|
||||
limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
|
||||
limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
|
||||
limits->max_hw_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT,
|
||||
clone->region_size);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2059,7 +2060,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
|
||||
* device limits but discards aren't passed to the source device, so
|
||||
* inherit destination's limits.
|
||||
*/
|
||||
limits->max_discard_sectors = dest_limits->max_discard_sectors;
|
||||
limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
|
||||
limits->discard_granularity = dest_limits->discard_granularity;
|
||||
limits->discard_alignment = dest_limits->discard_alignment;
|
||||
|
@ -871,7 +871,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
|
||||
if (!bdev_max_discard_sectors(lc->dev->bdev)) {
|
||||
lc->device_supports_discard = false;
|
||||
limits->discard_granularity = lc->sectorsize;
|
||||
limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
|
||||
limits->max_hw_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
|
||||
}
|
||||
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
|
||||
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
|
||||
|
@ -2410,7 +2410,7 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
/* All discards are split on chunk_size boundary */
|
||||
limits->discard_granularity = snap->store->chunk_size;
|
||||
limits->max_discard_sectors = snap->store->chunk_size;
|
||||
limits->max_hw_discard_sectors = snap->store->chunk_size;
|
||||
|
||||
up_read(&_origins_lock);
|
||||
}
|
||||
|
@ -249,7 +249,6 @@ static int io_err_iterate_devices(struct dm_target *ti,
|
||||
|
||||
static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
{
|
||||
limits->max_discard_sectors = UINT_MAX;
|
||||
limits->max_hw_discard_sectors = UINT_MAX;
|
||||
limits->discard_granularity = 512;
|
||||
}
|
||||
|
@ -4100,7 +4100,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
if (pt->adjusted_pf.discard_enabled) {
|
||||
disable_discard_passdown_if_not_supported(pt);
|
||||
if (!pt->adjusted_pf.discard_passdown)
|
||||
limits->max_discard_sectors = 0;
|
||||
limits->max_hw_discard_sectors = 0;
|
||||
/*
|
||||
* The pool uses the same discard limits as the underlying data
|
||||
* device. DM core has already set this up.
|
||||
@ -4497,7 +4497,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
if (pool->pf.discard_enabled) {
|
||||
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
|
||||
limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
|
||||
limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,6 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
{
|
||||
limits->max_discard_sectors = UINT_MAX;
|
||||
limits->max_hw_discard_sectors = UINT_MAX;
|
||||
limits->discard_granularity = 512;
|
||||
}
|
||||
|
@ -1001,7 +1001,6 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
limits->discard_alignment = 0;
|
||||
limits->discard_granularity = DMZ_BLOCK_SIZE;
|
||||
limits->max_discard_sectors = chunk_sectors;
|
||||
limits->max_hw_discard_sectors = chunk_sectors;
|
||||
limits->max_write_zeroes_sectors = chunk_sectors;
|
||||
|
||||
|
@ -1086,7 +1086,7 @@ void disable_discard(struct mapped_device *md)
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
||||
/* device doesn't really support DISCARD, disable it */
|
||||
limits->max_discard_sectors = 0;
|
||||
limits->max_hw_discard_sectors = 0;
|
||||
}
|
||||
|
||||
void disable_write_zeroes(struct mapped_device *md)
|
||||
|
Loading…
Reference in New Issue
Block a user