mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
block: Support atomic writes limits for stacked devices
Allow stacked devices to support atomic writes by aggregating the minimum capability of all bottom devices. Flag BLK_FEAT_ATOMIC_WRITES_STACKED is set for stacked devices which have been enabled to support atomic writes. Some things to note on the implementation: - For simplicity, all bottom devices must have same atomic write boundary value (if any) - The atomic write boundary must be a power-of-2 already, but this restriction could be relaxed. Furthermore, it is now required that the chunk sectors for a top device must be aligned with this boundary. - If a bottom device atomic write unit min/max are not aligned with the top device chunk sectors, the top device atomic write unit min/max are reduced to a value which works for the chunk sectors. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20241118105018.1870052-3-john.g.garry@oracle.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d00eea91de
commit
d7f36dc446
@ -501,6 +501,119 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb
|
|||||||
return sectors;
|
return sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check if second and later bottom devices are compliant */
|
||||||
|
static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
|
||||||
|
struct queue_limits *b)
|
||||||
|
{
|
||||||
|
/* We're not going to support different boundary sizes.. yet */
|
||||||
|
if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Can't support this */
|
||||||
|
if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Or this */
|
||||||
|
if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
t->atomic_write_hw_max = min(t->atomic_write_hw_max,
|
||||||
|
b->atomic_write_hw_max);
|
||||||
|
t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
|
||||||
|
b->atomic_write_hw_unit_min);
|
||||||
|
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
|
||||||
|
b->atomic_write_hw_unit_max);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check for valid boundary of first bottom device */
|
||||||
|
static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
|
||||||
|
struct queue_limits *b)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Ensure atomic write boundary is aligned with chunk sectors. Stacked
|
||||||
|
* devices store chunk sectors in t->io_min.
|
||||||
|
*/
|
||||||
|
if (b->atomic_write_hw_boundary > t->io_min &&
|
||||||
|
b->atomic_write_hw_boundary % t->io_min)
|
||||||
|
return false;
|
||||||
|
if (t->io_min > b->atomic_write_hw_boundary &&
|
||||||
|
t->io_min % b->atomic_write_hw_boundary)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Check stacking of first bottom device */
|
||||||
|
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
|
||||||
|
struct queue_limits *b)
|
||||||
|
{
|
||||||
|
if (b->atomic_write_hw_boundary &&
|
||||||
|
!blk_stack_atomic_writes_boundary_head(t, b))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (t->io_min <= SECTOR_SIZE) {
|
||||||
|
/* No chunk sectors, so use bottom device values directly */
|
||||||
|
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
|
||||||
|
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
|
||||||
|
t->atomic_write_hw_max = b->atomic_write_hw_max;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find values for limits which work for chunk size.
|
||||||
|
* b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
|
||||||
|
* size (t->io_min), as chunk size is not restricted to a power-of-2.
|
||||||
|
* So we need to find highest power-of-2 which works for the chunk
|
||||||
|
* size.
|
||||||
|
* As an example scenario, we could have b->unit_max = 16K and
|
||||||
|
* t->io_min = 24K. For this case, reduce t->unit_max to a value
|
||||||
|
* aligned with both limits, i.e. 8K in this example.
|
||||||
|
*/
|
||||||
|
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
|
||||||
|
while (t->io_min % t->atomic_write_hw_unit_max)
|
||||||
|
t->atomic_write_hw_unit_max /= 2;
|
||||||
|
|
||||||
|
t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
|
||||||
|
t->atomic_write_hw_unit_max);
|
||||||
|
t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blk_stack_atomic_writes_limits(struct queue_limits *t,
|
||||||
|
struct queue_limits *b)
|
||||||
|
{
|
||||||
|
if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
|
||||||
|
goto unsupported;
|
||||||
|
|
||||||
|
if (!b->atomic_write_unit_min)
|
||||||
|
goto unsupported;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If atomic_write_hw_max is set, we have already stacked 1x bottom
|
||||||
|
* device, so check for compliance.
|
||||||
|
*/
|
||||||
|
if (t->atomic_write_hw_max) {
|
||||||
|
if (!blk_stack_atomic_writes_tail(t, b))
|
||||||
|
goto unsupported;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!blk_stack_atomic_writes_head(t, b))
|
||||||
|
goto unsupported;
|
||||||
|
return;
|
||||||
|
|
||||||
|
unsupported:
|
||||||
|
t->atomic_write_hw_max = 0;
|
||||||
|
t->atomic_write_hw_unit_max = 0;
|
||||||
|
t->atomic_write_hw_unit_min = 0;
|
||||||
|
t->atomic_write_hw_boundary = 0;
|
||||||
|
t->features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_stack_limits - adjust queue_limits for stacked devices
|
* blk_stack_limits - adjust queue_limits for stacked devices
|
||||||
* @t: the stacking driver limits (top device)
|
* @t: the stacking driver limits (top device)
|
||||||
@ -661,6 +774,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||||||
t->zone_write_granularity = 0;
|
t->zone_write_granularity = 0;
|
||||||
t->max_zone_append_sectors = 0;
|
t->max_zone_append_sectors = 0;
|
||||||
}
|
}
|
||||||
|
blk_stack_atomic_writes_limits(t, b);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_stack_limits);
|
EXPORT_SYMBOL(blk_stack_limits);
|
||||||
|
@ -333,6 +333,10 @@ typedef unsigned int __bitwise blk_features_t;
|
|||||||
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
|
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
|
||||||
((__force blk_features_t)(1u << 15))
|
((__force blk_features_t)(1u << 15))
|
||||||
|
|
||||||
|
/* stacked device can/does support atomic writes */
|
||||||
|
#define BLK_FEAT_ATOMIC_WRITES_STACKED \
|
||||||
|
((__force blk_features_t)(1u << 16))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags automatically inherited when stacking limits.
|
* Flags automatically inherited when stacking limits.
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user