block: pass a phys_addr_t to get_max_segment_size

Work on a single address to simplify the logic, and prepare the callers
from using better helpers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20240706075228.2350978-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-07-06 09:52:18 +02:00 committed by Jens Axboe
parent 25f76c3db2
commit 09595e0c9d

View File

@ -209,23 +209,22 @@ static inline unsigned get_max_io_size(struct bio *bio,
/** /**
* get_max_segment_size() - maximum number of bytes to add as a single segment * get_max_segment_size() - maximum number of bytes to add as a single segment
* @lim: Request queue limits. * @lim: Request queue limits.
* @start_page: See below. * @paddr: address of the range to add
* @offset: Offset from @start_page where to add a segment. * @max_len: maximum length available to add at @paddr
* *
* Returns the maximum number of bytes that can be added as a single segment. * Returns the maximum number of bytes of the range starting at @paddr that can
* be added to a single segment.
*/ */
static inline unsigned get_max_segment_size(const struct queue_limits *lim, static inline unsigned get_max_segment_size(const struct queue_limits *lim,
struct page *start_page, unsigned long offset) phys_addr_t paddr, unsigned int len)
{ {
unsigned long mask = lim->seg_boundary_mask;
offset = mask & (page_to_phys(start_page) + offset);
/* /*
* Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
* after having calculated the minimum. * after having calculated the minimum.
*/ */
return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1; return min_t(unsigned long, len,
min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
(unsigned long)lim->max_segment_size - 1) + 1);
} }
/** /**
@ -258,9 +257,7 @@ static bool bvec_split_segs(const struct queue_limits *lim,
unsigned seg_size = 0; unsigned seg_size = 0;
while (len && *nsegs < max_segs) { while (len && *nsegs < max_segs) {
seg_size = get_max_segment_size(lim, bv->bv_page, seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len);
bv->bv_offset + total_len);
seg_size = min(seg_size, len);
(*nsegs)++; (*nsegs)++;
total_len += seg_size; total_len += seg_size;
@ -494,8 +491,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) { while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total; unsigned offset = bvec->bv_offset + total;
unsigned len = min(get_max_segment_size(&q->limits, unsigned len = get_max_segment_size(&q->limits, bvec_phys(bvec),
bvec->bv_page, offset), nbytes); nbytes);
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
/* /*