mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
A handful of important CephFS fixes from Max, Alex and myself: memory
corruption due to a buffer overrun, potential infinite loop and several memory leaks on the error paths. All but one marked for stable. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmdlxvsTHGlkcnlvbW92 QGdtYWlsLmNvbQAKCRBKf944AhHzi4vXB/9Y91WEVWK/ZX525z3sXbh0UK6yubPQ LRlHZ+01TE1uLTpU7wrv3rIApqqHDkL65au8Qc4/Cdxp3uJxZWFCs+0HuWwSeQ68 TmSFicQAPxii/IqqB9oLWtXvLNQkJ+kAlnpHwgka7eGZco+biOVaB+OcSViTaGXd Sczfc42zxdSR342Zz9GnLzBDygtu983c3frcFMYG2qqhd1ZyvVittASUR2dP8krl qZUoKaQjBC7z4C5X2iwJ+89m/0UjE0sxQr1tY7qhX0yetbElD5Ddke+lE2yjzVsF tD4IasZaTWvN5Ywh6H3uQzfySWUVrdwzErJBw7g+h7IA1Ok6J9zqB1r+ =vleo -----END PGP SIGNATURE----- Merge tag 'ceph-for-6.13-rc4' of https://github.com/ceph/ceph-client Pull ceph fixes from Ilya Dryomov: "A handful of important CephFS fixes from Max, Alex and myself: memory corruption due to a buffer overrun, potential infinite loop and several memory leaks on the error paths. All but one marked for stable" * tag 'ceph-for-6.13-rc4' of https://github.com/ceph/ceph-client: ceph: allocate sparse_ext map only for sparse reads ceph: fix memory leak in ceph_direct_read_write() ceph: improve error handling and short/overflow-read logic in __ceph_sync_read() ceph: validate snapdirname option length when mounting ceph: give up on paths longer than PATH_MAX ceph: fix memory leaks in __ceph_sync_read()
This commit is contained in:
commit
7684392f17
@ -1066,7 +1066,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
if (ceph_inode_is_shutdown(inode))
|
||||
return -EIO;
|
||||
|
||||
if (!len)
|
||||
if (!len || !i_size)
|
||||
return 0;
|
||||
/*
|
||||
* flush any page cache pages in this range. this
|
||||
@ -1086,7 +1086,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
int num_pages;
|
||||
size_t page_off;
|
||||
bool more;
|
||||
int idx;
|
||||
int idx = 0;
|
||||
size_t left;
|
||||
struct ceph_osd_req_op *op;
|
||||
u64 read_off = off;
|
||||
@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
len = read_off + read_len - off;
|
||||
more = len < iov_iter_count(to);
|
||||
|
||||
op = &req->r_ops[0];
|
||||
if (sparse) {
|
||||
extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
|
||||
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
|
||||
if (ret) {
|
||||
ceph_osdc_put_request(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
num_pages = calc_pages_for(read_off, read_len);
|
||||
page_off = offset_in_page(off);
|
||||
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
|
||||
@ -1127,17 +1137,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
|
||||
osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
|
||||
offset_in_page(read_off),
|
||||
false, false);
|
||||
|
||||
op = &req->r_ops[0];
|
||||
if (sparse) {
|
||||
extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
|
||||
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
|
||||
if (ret) {
|
||||
ceph_osdc_put_request(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
false, true);
|
||||
|
||||
ceph_osdc_start_request(osdc, req);
|
||||
ret = ceph_osdc_wait_request(osdc, req);
|
||||
@ -1160,7 +1160,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
else if (ret == -ENOENT)
|
||||
ret = 0;
|
||||
|
||||
if (ret > 0 && IS_ENCRYPTED(inode)) {
|
||||
if (ret < 0) {
|
||||
ceph_osdc_put_request(req);
|
||||
if (ret == -EBLOCKLISTED)
|
||||
fsc->blocklisted = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_ENCRYPTED(inode)) {
|
||||
int fret;
|
||||
|
||||
fret = ceph_fscrypt_decrypt_extents(inode, pages,
|
||||
@ -1186,10 +1193,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
ret = min_t(ssize_t, fret, len);
|
||||
}
|
||||
|
||||
ceph_osdc_put_request(req);
|
||||
|
||||
/* Short read but not EOF? Zero out the remainder. */
|
||||
if (ret >= 0 && ret < len && (off + ret < i_size)) {
|
||||
if (ret < len && (off + ret < i_size)) {
|
||||
int zlen = min(len - ret, i_size - off - ret);
|
||||
int zoff = page_off + ret;
|
||||
|
||||
@ -1199,13 +1204,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
ret += zlen;
|
||||
}
|
||||
|
||||
idx = 0;
|
||||
if (ret <= 0)
|
||||
left = 0;
|
||||
else if (off + ret > i_size)
|
||||
left = i_size - off;
|
||||
if (off + ret > i_size)
|
||||
left = (i_size > off) ? i_size - off : 0;
|
||||
else
|
||||
left = ret;
|
||||
|
||||
while (left > 0) {
|
||||
size_t plen, copied;
|
||||
|
||||
@ -1221,13 +1224,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
|
||||
break;
|
||||
}
|
||||
}
|
||||
ceph_release_page_vector(pages, num_pages);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -EBLOCKLISTED)
|
||||
fsc->blocklisted = true;
|
||||
break;
|
||||
}
|
||||
ceph_osdc_put_request(req);
|
||||
|
||||
if (off >= i_size || !more)
|
||||
break;
|
||||
@ -1553,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
break;
|
||||
}
|
||||
|
||||
op = &req->r_ops[0];
|
||||
if (!write && sparse) {
|
||||
extent_cnt = __ceph_sparse_read_ext_count(inode, size);
|
||||
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
|
||||
if (ret) {
|
||||
ceph_osdc_put_request(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
|
||||
if (len < 0) {
|
||||
ceph_osdc_put_request(req);
|
||||
@ -1562,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
if (len != size)
|
||||
osd_req_op_extent_update(req, 0, len);
|
||||
|
||||
osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
|
||||
|
||||
/*
|
||||
* To simplify error handling, allow AIO when IO within i_size
|
||||
* or IO can be satisfied by single OSD request.
|
||||
@ -1593,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
req->r_mtime = mtime;
|
||||
}
|
||||
|
||||
osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
|
||||
op = &req->r_ops[0];
|
||||
if (sparse) {
|
||||
extent_cnt = __ceph_sparse_read_ext_count(inode, size);
|
||||
ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
|
||||
if (ret) {
|
||||
ceph_osdc_put_request(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (aio_req) {
|
||||
aio_req->total_len += len;
|
||||
aio_req->num_reqs++;
|
||||
|
@ -2800,12 +2800,11 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
|
||||
|
||||
if (pos < 0) {
|
||||
/*
|
||||
* A rename didn't occur, but somehow we didn't end up where
|
||||
* we thought we would. Throw a warning and try again.
|
||||
* The path is longer than PATH_MAX and this function
|
||||
* cannot ever succeed. Creating paths that long is
|
||||
* possible with Ceph, but Linux cannot use them.
|
||||
*/
|
||||
pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
|
||||
pos);
|
||||
goto retry;
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
}
|
||||
|
||||
*pbase = base;
|
||||
|
@ -431,6 +431,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
|
||||
|
||||
switch (token) {
|
||||
case Opt_snapdirname:
|
||||
if (strlen(param->string) > NAME_MAX)
|
||||
return invalfc(fc, "snapdirname too long");
|
||||
kfree(fsopt->snapdir_name);
|
||||
fsopt->snapdir_name = param->string;
|
||||
param->string = NULL;
|
||||
|
@ -1173,6 +1173,8 @@ EXPORT_SYMBOL(ceph_osdc_new_request);
|
||||
|
||||
int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
|
||||
{
|
||||
WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ);
|
||||
|
||||
op->extent.sparse_ext_cnt = cnt;
|
||||
op->extent.sparse_ext = kmalloc_array(cnt,
|
||||
sizeof(*op->extent.sparse_ext),
|
||||
|
Loading…
Reference in New Issue
Block a user