mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
block: Allow zero value of max_zone_append_sectors queue limit
In preparation for adding a generic zone append emulation using zone write plugging, allow device drivers supporting zoned block device to set a the max_zone_append_sectors queue limit of a device to 0 to indicate the lack of native support for zone append operations and that the block layer should emulate these operations using regular write operations. blk_queue_max_zone_append_sectors() is modified to allow passing 0 as the max_zone_append_sectors argument. The function queue_max_zone_append_sectors() is also modified to ensure that the minimum of the max_hw_sectors and chunk_sectors limit is used whenever the max_zone_append_sectors limit is 0. This minimum is consistent with the value set for the max_zone_append_sectors limit by the function blk_validate_zoned_limits() when limits for a queue are validated. The helper functions queue_emulates_zone_append() and bdev_emulates_zone_append() are added to test if a queue (or block device) emulates zone append operations. In order for blk_revalidate_disk_zones() to accept zoned block devices relying on zone append emulation, the direct check to the max_zone_append_sectors queue limit of the disk is replaced by a check using the value returned by queue_max_zone_append_sectors(). Similarly, queue_zone_append_max_show() is modified to use the same accessor so that the sysfs attribute advertizes the non-zero limit that will be used, regardless if it is for native or emulated commands. For stacking drivers, a top device should not need to care if the underlying devices have native or emulated zone append operations. blk_stack_limits() is thus modified to set the top device max_zone_append_sectors limit using the new accessor queue_limits_max_zone_append_sectors(). queue_max_zone_append_sectors() is modified to use this function as well. Stacking drivers that require zone append emulation, e.g. dm-crypt, can still request this feature by calling blk_queue_max_zone_append_sectors() with a 0 limit. Signed-off-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Tested-by: Hans Holmberg <hans.holmberg@wdc.com> Tested-by: Dennis Maisenbacher <dennis.maisenbacher@wdc.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20240408014128.205141-10-dlemoal@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
843283e96e
commit
ccdbf0aad2
@ -602,7 +602,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
/* Make sure the BIO is small enough and will not get split */
|
||||
if (nr_sectors > q->limits.max_zone_append_sectors)
|
||||
if (nr_sectors > queue_max_zone_append_sectors(q))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
bio->bi_opf |= REQ_NOMERGE;
|
||||
|
@ -412,24 +412,32 @@ EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
|
||||
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append
|
||||
* @q: the request queue for the device
|
||||
* @max_zone_append_sectors: maximum number of sectors to write per command
|
||||
*
|
||||
* Sets the maximum number of sectors allowed for zone append commands. If
|
||||
* Specifying 0 for @max_zone_append_sectors indicates that the queue does
|
||||
* not natively support zone append operations and that the block layer must
|
||||
* emulate these operations using regular writes.
|
||||
**/
|
||||
void blk_queue_max_zone_append_sectors(struct request_queue *q,
|
||||
unsigned int max_zone_append_sectors)
|
||||
{
|
||||
unsigned int max_sectors;
|
||||
unsigned int max_sectors = 0;
|
||||
|
||||
if (WARN_ON(!blk_queue_is_zoned(q)))
|
||||
return;
|
||||
|
||||
max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
|
||||
max_sectors = min(q->limits.chunk_sectors, max_sectors);
|
||||
if (max_zone_append_sectors) {
|
||||
max_sectors = min(q->limits.max_hw_sectors,
|
||||
max_zone_append_sectors);
|
||||
max_sectors = min(q->limits.chunk_sectors, max_sectors);
|
||||
|
||||
/*
|
||||
* Signal eventual driver bugs resulting in the max_zone_append sectors limit
|
||||
* being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
|
||||
* or the max_hw_sectors limit not set.
|
||||
*/
|
||||
WARN_ON(!max_sectors);
|
||||
/*
|
||||
* Signal eventual driver bugs resulting in the max_zone_append
|
||||
* sectors limit being 0 due to the chunk_sectors limit (zone
|
||||
* size) not set or the max_hw_sectors limit not set.
|
||||
*/
|
||||
WARN_ON_ONCE(!max_sectors);
|
||||
}
|
||||
|
||||
q->limits.max_zone_append_sectors = max_sectors;
|
||||
}
|
||||
@ -756,8 +764,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
|
||||
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
|
||||
b->max_write_zeroes_sectors);
|
||||
t->max_zone_append_sectors = min(t->max_zone_append_sectors,
|
||||
b->max_zone_append_sectors);
|
||||
t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
|
||||
queue_limits_max_zone_append_sectors(b));
|
||||
t->bounce = max(t->bounce, b->bounce);
|
||||
|
||||
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
|
||||
|
@ -224,7 +224,7 @@ static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
|
||||
|
||||
static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
|
||||
{
|
||||
unsigned long long max_sectors = q->limits.max_zone_append_sectors;
|
||||
unsigned long long max_sectors = queue_max_zone_append_sectors(q);
|
||||
|
||||
return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
|
||||
}
|
||||
|
@ -1692,7 +1692,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!q->limits.max_zone_append_sectors) {
|
||||
if (!queue_max_zone_append_sectors(q)) {
|
||||
pr_warn("%s: Invalid 0 maximum zone append limit\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
|
@ -1173,12 +1173,29 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
|
||||
return q->limits.max_segment_size;
|
||||
}
|
||||
|
||||
static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
|
||||
static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
|
||||
{
|
||||
unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
|
||||
|
||||
const struct queue_limits *l = &q->limits;
|
||||
return min_not_zero(l->max_zone_append_sectors, max_sectors);
|
||||
}
|
||||
|
||||
return min(l->max_zone_append_sectors, l->max_sectors);
|
||||
static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
|
||||
{
|
||||
if (!blk_queue_is_zoned(q))
|
||||
return 0;
|
||||
|
||||
return queue_limits_max_zone_append_sectors(&q->limits);
|
||||
}
|
||||
|
||||
static inline bool queue_emulates_zone_append(struct request_queue *q)
|
||||
{
|
||||
return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
|
||||
}
|
||||
|
||||
static inline bool bdev_emulates_zone_append(struct block_device *bdev)
|
||||
{
|
||||
return queue_emulates_zone_append(bdev_get_queue(bdev));
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
Loading…
Reference in New Issue
Block a user