mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
block: make dma_alignment a stacking queue_limit
Device mappers had always been getting the default 511 dma mask, but the underlying device might have a larger alignment requirement. Since this value is used to determine alloweable direct-io alignment, this needs to be a stackable limit. Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20221110184501.2451620-2-kbusch@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a7a1598189
commit
c964d62f5c
@ -425,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
|
||||
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
|
||||
goto fail_stats;
|
||||
|
||||
blk_queue_dma_alignment(q, 511);
|
||||
blk_set_default_limits(&q->limits);
|
||||
q->nr_requests = BLKDEV_DEFAULT_RQ;
|
||||
|
||||
|
@ -57,6 +57,7 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||
lim->misaligned = 0;
|
||||
lim->zoned = BLK_ZONED_NONE;
|
||||
lim->zone_write_granularity = 0;
|
||||
lim->dma_alignment = 511;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_default_limits);
|
||||
|
||||
@ -600,6 +601,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
|
||||
t->io_min = max(t->io_min, b->io_min);
|
||||
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
|
||||
t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
|
||||
|
||||
/* Set non-power-of-2 compatible chunk_sectors boundary */
|
||||
if (b->chunk_sectors)
|
||||
@ -773,7 +775,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary);
|
||||
**/
|
||||
void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
q->dma_alignment = mask;
|
||||
q->limits.dma_alignment = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_dma_alignment);
|
||||
|
||||
@ -795,8 +797,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
BUG_ON(mask > PAGE_SIZE);
|
||||
|
||||
if (mask > q->dma_alignment)
|
||||
q->dma_alignment = mask;
|
||||
if (mask > q->limits.dma_alignment)
|
||||
q->limits.dma_alignment = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||
|
||||
|
@ -311,6 +311,13 @@ struct queue_limits {
|
||||
unsigned char discard_misaligned;
|
||||
unsigned char raid_partial_stripes_expensive;
|
||||
enum blk_zoned_model zoned;
|
||||
|
||||
/*
|
||||
* Drivers that set dma_alignment to less than 511 must be prepared to
|
||||
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
|
||||
* due to possible offsets.
|
||||
*/
|
||||
unsigned int dma_alignment;
|
||||
};
|
||||
|
||||
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
||||
@ -456,12 +463,6 @@ struct request_queue {
|
||||
unsigned long nr_requests; /* Max # of requests */
|
||||
|
||||
unsigned int dma_pad_mask;
|
||||
/*
|
||||
* Drivers that set dma_alignment to less than 511 must be prepared to
|
||||
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
|
||||
* due to possible offsets.
|
||||
*/
|
||||
unsigned int dma_alignment;
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
struct blk_crypto_profile *crypto_profile;
|
||||
@ -1324,7 +1325,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
|
||||
|
||||
static inline int queue_dma_alignment(const struct request_queue *q)
|
||||
{
|
||||
return q ? q->dma_alignment : 511;
|
||||
return q ? q->limits.dma_alignment : 511;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
|
||||
|
Loading…
Reference in New Issue
Block a user