mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
block: Constify most queue limits pointers
Document which functions do not modify the queue limits. Reviewed-by: Ming Lei <ming.lei@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Keith Busch <kbusch@kernel.org> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20221025191755.1711437-3-bvanassche@acm.org Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b179c98f76
commit
aa261f2058
@ -555,7 +555,7 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
||||
size_t nr_iter = iov_iter_count(iter);
|
||||
size_t nr_segs = iter->nr_segs;
|
||||
struct bio_vec *bvecs, *bvprvp = NULL;
|
||||
struct queue_limits *lim = &q->limits;
|
||||
const struct queue_limits *lim = &q->limits;
|
||||
unsigned int nsegs = 0, bytes = 0;
|
||||
struct bio *bio;
|
||||
size_t i;
|
||||
|
@ -100,13 +100,14 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
||||
* is defined as 'unsigned int', meantime it has to be aligned to with the
|
||||
* logical block size, which is the minimum accepted unit by hardware.
|
||||
*/
|
||||
static unsigned int bio_allowed_max_sectors(struct queue_limits *lim)
|
||||
static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
|
||||
{
|
||||
return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static struct bio *bio_split_discard(struct bio *bio, struct queue_limits *lim,
|
||||
unsigned *nsegs, struct bio_set *bs)
|
||||
static struct bio *bio_split_discard(struct bio *bio,
|
||||
const struct queue_limits *lim,
|
||||
unsigned *nsegs, struct bio_set *bs)
|
||||
{
|
||||
unsigned int max_discard_sectors, granularity;
|
||||
sector_t tmp;
|
||||
@ -146,7 +147,8 @@ static struct bio *bio_split_discard(struct bio *bio, struct queue_limits *lim,
|
||||
}
|
||||
|
||||
static struct bio *bio_split_write_zeroes(struct bio *bio,
|
||||
struct queue_limits *lim, unsigned *nsegs, struct bio_set *bs)
|
||||
const struct queue_limits *lim,
|
||||
unsigned *nsegs, struct bio_set *bs)
|
||||
{
|
||||
*nsegs = 0;
|
||||
if (!lim->max_write_zeroes_sectors)
|
||||
@ -165,7 +167,7 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
|
||||
* aligned to a physical block boundary.
|
||||
*/
|
||||
static inline unsigned get_max_io_size(struct bio *bio,
|
||||
struct queue_limits *lim)
|
||||
const struct queue_limits *lim)
|
||||
{
|
||||
unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
|
||||
unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
|
||||
@ -184,7 +186,7 @@ static inline unsigned get_max_io_size(struct bio *bio,
|
||||
return max_sectors & ~(lbs - 1);
|
||||
}
|
||||
|
||||
static inline unsigned get_max_segment_size(struct queue_limits *lim,
|
||||
static inline unsigned get_max_segment_size(const struct queue_limits *lim,
|
||||
struct page *start_page, unsigned long offset)
|
||||
{
|
||||
unsigned long mask = lim->seg_boundary_mask;
|
||||
@ -219,9 +221,9 @@ static inline unsigned get_max_segment_size(struct queue_limits *lim,
|
||||
* *@nsegs segments and *@sectors sectors would make that bio unacceptable for
|
||||
* the block driver.
|
||||
*/
|
||||
static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv,
|
||||
unsigned *nsegs, unsigned *bytes, unsigned max_segs,
|
||||
unsigned max_bytes)
|
||||
static bool bvec_split_segs(const struct queue_limits *lim,
|
||||
const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
|
||||
unsigned max_segs, unsigned max_bytes)
|
||||
{
|
||||
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
|
||||
unsigned len = min(bv->bv_len, max_len);
|
||||
@ -267,7 +269,7 @@ static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv,
|
||||
* responsible for ensuring that @bs is only destroyed after processing of the
|
||||
* split bio has finished.
|
||||
*/
|
||||
static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim,
|
||||
static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
|
||||
unsigned *segs, struct bio_set *bs, unsigned max_bytes)
|
||||
{
|
||||
struct bio_vec bv, bvprv, *bvprvp = NULL;
|
||||
@ -331,8 +333,9 @@ static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim,
|
||||
* The split bio is allocated from @q->bio_split, which is provided by the
|
||||
* block layer.
|
||||
*/
|
||||
struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
|
||||
unsigned int *nr_segs)
|
||||
struct bio *__bio_split_to_limits(struct bio *bio,
|
||||
const struct queue_limits *lim,
|
||||
unsigned int *nr_segs)
|
||||
{
|
||||
struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
|
||||
struct bio *split;
|
||||
@ -377,7 +380,7 @@ struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
|
||||
*/
|
||||
struct bio *bio_split_to_limits(struct bio *bio)
|
||||
{
|
||||
struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
|
||||
const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
|
||||
unsigned int nr_segs;
|
||||
|
||||
if (bio_may_exceed_limits(bio, lim))
|
||||
|
@ -481,7 +481,7 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_io_opt);
|
||||
|
||||
static int queue_limit_alignment_offset(struct queue_limits *lim,
|
||||
static int queue_limit_alignment_offset(const struct queue_limits *lim,
|
||||
sector_t sector)
|
||||
{
|
||||
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
|
||||
@ -491,8 +491,8 @@ static int queue_limit_alignment_offset(struct queue_limits *lim,
|
||||
return (granularity + lim->alignment_offset - alignment) % granularity;
|
||||
}
|
||||
|
||||
static unsigned int queue_limit_discard_alignment(struct queue_limits *lim,
|
||||
sector_t sector)
|
||||
static unsigned int queue_limit_discard_alignment(
|
||||
const struct queue_limits *lim, sector_t sector)
|
||||
{
|
||||
unsigned int alignment, granularity, offset;
|
||||
|
||||
|
11
block/blk.h
11
block/blk.h
@ -104,7 +104,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
|
||||
static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
|
||||
struct bio_vec *bprv, unsigned int offset)
|
||||
{
|
||||
return (offset & lim->virt_boundary_mask) ||
|
||||
@ -115,7 +115,7 @@ static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
|
||||
* Check if adding a bio_vec after bprv with offset would create a gap in
|
||||
* the SG list. Most drivers don't care about this, but some do.
|
||||
*/
|
||||
static inline bool bvec_gap_to_prev(struct queue_limits *lim,
|
||||
static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
|
||||
struct bio_vec *bprv, unsigned int offset)
|
||||
{
|
||||
if (!lim->virt_boundary_mask)
|
||||
@ -297,7 +297,7 @@ ssize_t part_timeout_store(struct device *, struct device_attribute *,
|
||||
const char *, size_t);
|
||||
|
||||
static inline bool bio_may_exceed_limits(struct bio *bio,
|
||||
struct queue_limits *lim)
|
||||
const struct queue_limits *lim)
|
||||
{
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
@ -320,8 +320,9 @@ static inline bool bio_may_exceed_limits(struct bio *bio,
|
||||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
|
||||
}
|
||||
|
||||
struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
|
||||
unsigned int *nr_segs);
|
||||
struct bio *__bio_split_to_limits(struct bio *bio,
|
||||
const struct queue_limits *lim,
|
||||
unsigned int *nr_segs);
|
||||
int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
|
Loading…
Reference in New Issue
Block a user