mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
block: use bio_for_each_bvec() to map sg
It is more efficient to use bio_for_each_bvec() to map sg, meantime we have to consider splitting multipage bvec as done in blk_bio_segment_split(). Reviewed-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dcebd75592
commit
862e5a5e6f
@ -464,6 +464,54 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||
return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
|
||||
}
|
||||
|
||||
static struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
||||
struct scatterlist *sglist)
|
||||
{
|
||||
if (!*sg)
|
||||
return sglist;
|
||||
|
||||
/*
|
||||
* If the driver previously mapped a shorter list, we could see a
|
||||
* termination bit prematurely unless it fully inits the sg table
|
||||
* on each mapping. We KNOW that there must be more entries here
|
||||
* or the driver would be buggy, so force clear the termination bit
|
||||
* to avoid doing a full sg_init_table() in drivers for each command.
|
||||
*/
|
||||
sg_unmark_end(*sg);
|
||||
return sg_next(*sg);
|
||||
}
|
||||
|
||||
static unsigned blk_bvec_map_sg(struct request_queue *q,
|
||||
struct bio_vec *bvec, struct scatterlist *sglist,
|
||||
struct scatterlist **sg)
|
||||
{
|
||||
unsigned nbytes = bvec->bv_len;
|
||||
unsigned nsegs = 0, total = 0, offset = 0;
|
||||
|
||||
while (nbytes > 0) {
|
||||
unsigned seg_size;
|
||||
struct page *pg;
|
||||
unsigned idx;
|
||||
|
||||
*sg = blk_next_sg(sg, sglist);
|
||||
|
||||
seg_size = get_max_segment_size(q, bvec->bv_offset + total);
|
||||
seg_size = min(nbytes, seg_size);
|
||||
|
||||
offset = (total + bvec->bv_offset) % PAGE_SIZE;
|
||||
idx = (total + bvec->bv_offset) / PAGE_SIZE;
|
||||
pg = nth_page(bvec->bv_page, idx);
|
||||
|
||||
sg_set_page(*sg, pg, seg_size, offset);
|
||||
|
||||
total += seg_size;
|
||||
nbytes -= seg_size;
|
||||
nsegs++;
|
||||
}
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
|
||||
struct scatterlist *sglist, struct bio_vec *bvprv,
|
||||
@ -481,25 +529,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
|
||||
(*sg)->length += nbytes;
|
||||
} else {
|
||||
new_segment:
|
||||
if (!*sg)
|
||||
*sg = sglist;
|
||||
else {
|
||||
/*
|
||||
* If the driver previously mapped a shorter
|
||||
* list, we could see a termination bit
|
||||
* prematurely unless it fully inits the sg
|
||||
* table on each mapping. We KNOW that there
|
||||
* must be more entries here or the driver
|
||||
* would be buggy, so force clear the
|
||||
* termination bit to avoid doing a full
|
||||
* sg_init_table() in drivers for each command.
|
||||
*/
|
||||
sg_unmark_end(*sg);
|
||||
*sg = sg_next(*sg);
|
||||
}
|
||||
|
||||
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
|
||||
(*nsegs)++;
|
||||
(*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
|
||||
}
|
||||
*bvprv = *bvec;
|
||||
}
|
||||
@ -521,7 +551,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
||||
int nsegs = 0;
|
||||
|
||||
for_each_bio(bio)
|
||||
bio_for_each_segment(bvec, bio, iter)
|
||||
bio_for_each_bvec(bvec, bio, iter)
|
||||
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
|
||||
&nsegs);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user