ceph: fix memory leak in ceph_direct_read_write()

The bvecs array which is allocated in iter_get_bvecs_alloc() is leaked
and pages remain pinned if ceph_alloc_sparse_ext_map() fails.

There is no need to delay the allocation of sparse_ext map until after
the bvecs array is set up, so fix this by moving sparse_ext allocation
a bit earlier.  Also, make a similar adjustment in __ceph_sync_read()
for consistency (a leak of the same kind in __ceph_sync_read() has been
addressed differently).

Cc: stable@vger.kernel.org
Fixes: 03bc06c7b0 ("ceph: add new mount option to enable sparse reads")
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Alex Markuze <amarkuze@redhat.com>
This commit is contained in:
Ilya Dryomov 2024-12-06 17:32:59 +01:00
parent 9abee47580
commit 66e0c4f914

View File

@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
len = read_off + read_len - off; len = read_off + read_len - off;
more = len < iov_iter_count(to); more = len < iov_iter_count(to);
op = &req->r_ops[0];
if (sparse) {
extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
if (ret) {
ceph_osdc_put_request(req);
break;
}
}
num_pages = calc_pages_for(read_off, read_len); num_pages = calc_pages_for(read_off, read_len);
page_off = offset_in_page(off); page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
@ -1129,16 +1139,6 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
offset_in_page(read_off), offset_in_page(read_off),
false, true); false, true);
op = &req->r_ops[0];
if (sparse) {
extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
if (ret) {
ceph_osdc_put_request(req);
break;
}
}
ceph_osdc_start_request(osdc, req); ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req); ret = ceph_osdc_wait_request(osdc, req);
@ -1551,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
break; break;
} }
op = &req->r_ops[0];
if (sparse) {
extent_cnt = __ceph_sparse_read_ext_count(inode, size);
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
if (ret) {
ceph_osdc_put_request(req);
break;
}
}
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages); len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
if (len < 0) { if (len < 0) {
ceph_osdc_put_request(req); ceph_osdc_put_request(req);
@ -1560,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (len != size) if (len != size)
osd_req_op_extent_update(req, 0, len); osd_req_op_extent_update(req, 0, len);
osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
/* /*
* To simplify error handling, allow AIO when IO within i_size * To simplify error handling, allow AIO when IO within i_size
* or IO can be satisfied by single OSD request. * or IO can be satisfied by single OSD request.
@ -1591,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req->r_mtime = mtime; req->r_mtime = mtime;
} }
osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
op = &req->r_ops[0];
if (sparse) {
extent_cnt = __ceph_sparse_read_ext_count(inode, size);
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
if (ret) {
ceph_osdc_put_request(req);
break;
}
}
if (aio_req) { if (aio_req) {
aio_req->total_len += len; aio_req->total_len += len;
aio_req->num_reqs++; aio_req->num_reqs++;