... and with iov_iter_get_pages_alloc() it becomes even simpler

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2017-09-23 16:13:10 -04:00
parent 076098e51b
commit 629e42bcc3

View File

@ -1325,9 +1325,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
{ {
int j; int j;
int nr_pages = 0; int nr_pages = 0;
struct page **pages;
struct bio *bio; struct bio *bio;
int cur_page = 0;
int ret; int ret;
struct iov_iter i; struct iov_iter i;
struct iovec iov; struct iovec iov;
@ -1360,19 +1358,14 @@ struct bio *bio_map_user_iov(struct request_queue *q,
if (!bio) if (!bio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
if (!pages)
goto out;
i = *iter; i = *iter;
while (iov_iter_count(&i)) { while (iov_iter_count(&i)) {
struct page **pages;
ssize_t bytes; ssize_t bytes;
size_t offs, added = 0; size_t offs, added = 0;
int npages; int npages;
bytes = iov_iter_get_pages(&i, pages + cur_page, LONG_MAX, bytes = iov_iter_get_pages_alloc(&i, &pages, LONG_MAX, &offs);
nr_pages - cur_page, &offs);
if (unlikely(bytes <= 0)) { if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT; ret = bytes ? bytes : -EFAULT;
goto out_unmap; goto out_unmap;
@ -1380,7 +1373,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
for (j = cur_page; j < cur_page + npages; j++) { for (j = 0; j < npages; j++) {
unsigned int n = PAGE_SIZE - offs; unsigned int n = PAGE_SIZE - offs;
unsigned short prev_bi_vcnt = bio->bi_vcnt; unsigned short prev_bi_vcnt = bio->bi_vcnt;
@ -1409,13 +1402,11 @@ struct bio *bio_map_user_iov(struct request_queue *q,
/* /*
* release the pages we didn't map into the bio, if any * release the pages we didn't map into the bio, if any
*/ */
while (j < cur_page + npages) while (j < npages)
put_page(pages[j++]); put_page(pages[j++]);
cur_page = j; kvfree(pages);
} }
kfree(pages);
bio_set_flag(bio, BIO_USER_MAPPED); bio_set_flag(bio, BIO_USER_MAPPED);
/* /*
@ -1431,8 +1422,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bio_for_each_segment_all(bvec, bio, j) { bio_for_each_segment_all(bvec, bio, j) {
put_page(bvec->bv_page); put_page(bvec->bv_page);
} }
out:
kfree(pages);
bio_put(bio); bio_put(bio);
return ERR_PTR(ret); return ERR_PTR(ret);
} }