mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
block: remove zone append special casing from the direct I/O path
This code is unused, and all future zoned file systems should follow
the btrfs lead of splitting the bios themselves to the zoned limits
in the I/O submission handler, because if they didn't they would be
hit by commit ed9832bc08
("block: introduce folio awareness and add
a bigger size from folio") breaking this code when the zone append
limit (that is usually the max_hw_sectors limit) is smaller than the
largest possible folio size.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20241030051859.280923-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
496a51b371
commit
cafd00d0e9
34
block/bio.c
34
block/bio.c
@ -1206,21 +1206,12 @@ EXPORT_SYMBOL_GPL(__bio_release_pages);
|
||||
|
||||
void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
size_t size = iov_iter_count(iter);
|
||||
|
||||
WARN_ON_ONCE(bio->bi_max_vecs);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
size_t max_sectors = queue_max_zone_append_sectors(q);
|
||||
|
||||
size = min(size, max_sectors << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
bio->bi_vcnt = iter->nr_segs;
|
||||
bio->bi_io_vec = (struct bio_vec *)iter->bvec;
|
||||
bio->bi_iter.bi_bvec_done = iter->iov_offset;
|
||||
bio->bi_iter.bi_size = size;
|
||||
bio->bi_iter.bi_size = iov_iter_count(iter);
|
||||
bio_set_flag(bio, BIO_CLONED);
|
||||
}
|
||||
|
||||
@ -1245,20 +1236,6 @@ static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
|
||||
size_t len, size_t offset)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
bool same_page = false;
|
||||
|
||||
if (bio_add_hw_folio(q, bio, folio, len, offset,
|
||||
queue_max_zone_append_sectors(q), &same_page) != len)
|
||||
return -EINVAL;
|
||||
if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
|
||||
unpin_user_folio(folio, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int get_contig_folio_len(unsigned int *num_pages,
|
||||
struct page **pages, unsigned int i,
|
||||
struct folio *folio, size_t left,
|
||||
@ -1365,14 +1342,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
len = get_contig_folio_len(&num_pages, pages, i,
|
||||
folio, left, offset);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
|
||||
ret = bio_iov_add_zone_append_folio(bio, folio, len,
|
||||
folio_offset);
|
||||
if (ret)
|
||||
break;
|
||||
} else
|
||||
bio_iov_add_folio(bio, folio, len, folio_offset);
|
||||
|
||||
bio_iov_add_folio(bio, folio, len, folio_offset);
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user