mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-03 19:55:31 +00:00
Changes since last update:
- Add basic sub-page compressed data support; - Fix a memory leak on MicroLZMA and DEFLATE compression; - Fix a rare LZ4 inplace decompression issue on recent x86 CPUs; - Fix a KASAN issue reported by syzbot around crafted images; - Some cleanups. -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEQ0A6bDUS9Y+83NPFUXZn5Zlu5qoFAmWeiBMRHHhpYW5nQGtl cm5lbC5vcmcACgkQUXZn5Zlu5qpRiw/9EexUiFCsXGUQP9P4M7KXoTxYDrjVi8uN xjTQAame59JGGqzBivVAlUvP/zqdluafFvstEsINv3VoLzw+OLDHHbGVN3w/Jn2C Thilxul3shRyVhcUK/7d0lDagY32ggwYpqKc4Cr/6RiVHtQ7fnJBdsELFetSeI6d FcLQed/S4C3MgN0g/j9erj8j0Rizgk+yoLqglIECaxIxTbmhnZFXcLfRDWF/OoEy AdZ48qK5sIEBbVAhH/5sxXNod77wbwuTjpnzSaC+9PiAHgKGdl3W5Vf3SnckosmX WFbwszqk5JISS01vcNISLZg1U47a9vVd7CDis7lkbtU2LddhFerTmf3Xr6FIc+qJ hvsr+0djRbArF66DvYjWcoYueHkYh/kgTsYXsvmqheKtyNZJIrk6d0YS32+6XKth TGwX55WdWrLqhfwac509EFYKD7moYCXMTFaJh4zhqMiz5TX5eVLlRcoU3Uy57x3/ Q2UWnPuYiGFuWrhnYWNgn1n6KoQgb/tD9jjQ5D/i9AJI9aHydkoUFJdQTgxMv9FY lfdxp94Yo2+XjJ9BhSACgVkSnGzv89/9iUQ0Fps08rnc25rD4upiipqtAuqDWn6N gcEXC6oAOywdWdR5Y+yP/N3hIMYxn48X2gt875jyYMe0KTzIETIyPG4l3YhfitTN 0pBOcZBOQkw= =TiFo -----END PGP SIGNATURE----- Merge tag 'erofs-for-6.8-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs Pull erofs updates from Gao Xiang: "In this cycle, we'd like to enable basic sub-page compressed data support for Android ecosystem (for vendors to try out 16k page size with 4k-block images in their compatibility mode) as well as container images (so that 4k-block images can be parsed on arm64 cloud servers using 64k page size.) In addition, there are several bugfixes and cleanups as usual. All commits have been in -next for a while and no potential merge conflict is observed. Summary: - Add basic sub-page compressed data support - Fix a memory leak on MicroLZMA and DEFLATE compression - Fix a rare LZ4 inplace decompression issue on recent x86 CPUs - Fix a KASAN issue reported by syzbot around crafted images - Some cleanups" * tag 'erofs-for-6.8-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: make erofs_{err,info}() support NULL sb parameter erofs: avoid debugging output for (de)compressed data erofs: allow partially filled compressed bvecs erofs: enable sub-page compressed block support erofs: refine z_erofs_transform_plain() for sub-page block support erofs: fix ztailpacking for subpage compressed blocks erofs: fix up compacted indexes for block size < 4096 erofs: record `pclustersize` in bytes instead of pages erofs: support I/O submission for sub-page compressed blocks erofs: fix lz4 inplace decompression erofs: fix memory leak on short-lived bounced pages
This commit is contained in:
commit
0507d2526f
@ -121,11 +121,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
}
|
||||
|
||||
static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
void *inpage, unsigned int *inputmargin, int *maptype,
|
||||
bool may_inplace)
|
||||
void *inpage, void *out, unsigned int *inputmargin,
|
||||
int *maptype, bool may_inplace)
|
||||
{
|
||||
struct z_erofs_decompress_req *rq = ctx->rq;
|
||||
unsigned int omargin, total, i, j;
|
||||
unsigned int omargin, total, i;
|
||||
struct page **in;
|
||||
void *src, *tmp;
|
||||
|
||||
@ -135,12 +135,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
|
||||
goto docopy;
|
||||
|
||||
for (i = 0; i < ctx->inpages; ++i) {
|
||||
DBG_BUGON(rq->in[i] == NULL);
|
||||
for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
|
||||
if (rq->out[j] == rq->in[i])
|
||||
goto docopy;
|
||||
}
|
||||
for (i = 0; i < ctx->inpages; ++i)
|
||||
if (rq->out[ctx->outpages - ctx->inpages + i] !=
|
||||
rq->in[i])
|
||||
goto docopy;
|
||||
kunmap_local(inpage);
|
||||
*maptype = 3;
|
||||
return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
if (ctx->inpages <= 1) {
|
||||
@ -148,7 +149,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
return inpage;
|
||||
}
|
||||
kunmap_local(inpage);
|
||||
might_sleep();
|
||||
src = erofs_vm_map_ram(rq->in, ctx->inpages);
|
||||
if (!src)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -204,12 +204,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
|
||||
}
|
||||
|
||||
static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
u8 *out)
|
||||
u8 *dst)
|
||||
{
|
||||
struct z_erofs_decompress_req *rq = ctx->rq;
|
||||
bool support_0padding = false, may_inplace = false;
|
||||
unsigned int inputmargin;
|
||||
u8 *headpage, *src;
|
||||
u8 *out, *headpage, *src;
|
||||
int ret, maptype;
|
||||
|
||||
DBG_BUGON(*rq->in == NULL);
|
||||
@ -230,11 +230,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
}
|
||||
|
||||
inputmargin = rq->pageofs_in;
|
||||
src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
|
||||
src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
|
||||
&maptype, may_inplace);
|
||||
if (IS_ERR(src))
|
||||
return PTR_ERR(src);
|
||||
|
||||
out = dst + rq->pageofs_out;
|
||||
/* legacy format could compress extra data in a pcluster. */
|
||||
if (rq->partial_decoding || !support_0padding)
|
||||
ret = LZ4_decompress_safe_partial(src + inputmargin, out,
|
||||
@ -246,15 +247,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
if (ret != rq->outputsize) {
|
||||
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
|
||||
ret, rq->inputsize, inputmargin, rq->outputsize);
|
||||
|
||||
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
|
||||
16, 1, src + inputmargin, rq->inputsize, true);
|
||||
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
|
||||
16, 1, out, rq->outputsize, true);
|
||||
|
||||
if (ret >= 0)
|
||||
memset(out + ret, 0, rq->outputsize - ret);
|
||||
ret = -EIO;
|
||||
ret = -EFSCORRUPTED;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
@ -265,7 +260,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||
vm_unmap_ram(src, ctx->inpages);
|
||||
} else if (maptype == 2) {
|
||||
erofs_put_pcpubuf(src);
|
||||
} else {
|
||||
} else if (maptype != 3) {
|
||||
DBG_BUGON(1);
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -308,7 +303,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
||||
}
|
||||
|
||||
dstmap_out:
|
||||
ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
|
||||
ret = z_erofs_lz4_decompress_mem(&ctx, dst);
|
||||
if (!dst_maptype)
|
||||
kunmap_local(dst);
|
||||
else if (dst_maptype == 2)
|
||||
@ -319,43 +314,58 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
||||
static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool)
|
||||
{
|
||||
const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
|
||||
const unsigned int outpages =
|
||||
const unsigned int nrpages_in =
|
||||
PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
|
||||
const unsigned int nrpages_out =
|
||||
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
||||
const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
|
||||
PAGE_SIZE - rq->pageofs_out);
|
||||
const unsigned int lefthalf = rq->outputsize - righthalf;
|
||||
const unsigned int interlaced_offset =
|
||||
rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
|
||||
u8 *src;
|
||||
const unsigned int bs = rq->sb->s_blocksize;
|
||||
unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
|
||||
u8 *kin;
|
||||
|
||||
if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
|
||||
DBG_BUGON(1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (rq->out[0] == *rq->in) {
|
||||
DBG_BUGON(rq->pageofs_out);
|
||||
return 0;
|
||||
}
|
||||
|
||||
src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
|
||||
if (rq->out[0])
|
||||
memcpy_to_page(rq->out[0], rq->pageofs_out,
|
||||
src + interlaced_offset, righthalf);
|
||||
|
||||
if (outpages > inpages) {
|
||||
DBG_BUGON(!rq->out[outpages - 1]);
|
||||
if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
|
||||
memcpy_to_page(rq->out[outpages - 1], 0, src +
|
||||
(interlaced_offset ? 0 : righthalf),
|
||||
lefthalf);
|
||||
} else if (!interlaced_offset) {
|
||||
memmove(src, src + righthalf, lefthalf);
|
||||
flush_dcache_page(rq->in[inpages - 1]);
|
||||
DBG_BUGON(rq->outputsize > rq->inputsize);
|
||||
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
|
||||
cur = bs - (rq->pageofs_out & (bs - 1));
|
||||
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
|
||||
cur = min(cur, rq->outputsize);
|
||||
if (cur && rq->out[0]) {
|
||||
kin = kmap_local_page(rq->in[nrpages_in - 1]);
|
||||
if (rq->out[0] == rq->in[nrpages_in - 1]) {
|
||||
memmove(kin + rq->pageofs_out, kin + pi, cur);
|
||||
flush_dcache_page(rq->out[0]);
|
||||
} else {
|
||||
memcpy_to_page(rq->out[0], rq->pageofs_out,
|
||||
kin + pi, cur);
|
||||
}
|
||||
kunmap_local(kin);
|
||||
}
|
||||
rq->outputsize -= cur;
|
||||
}
|
||||
kunmap_local(src);
|
||||
|
||||
for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
|
||||
insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
|
||||
rq->outputsize -= insz;
|
||||
if (!rq->in[ni])
|
||||
continue;
|
||||
kin = kmap_local_page(rq->in[ni]);
|
||||
pi = 0;
|
||||
do {
|
||||
no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
|
||||
po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
|
||||
DBG_BUGON(no >= nrpages_out);
|
||||
cnt = min(insz - pi, PAGE_SIZE - po);
|
||||
if (rq->out[no] == rq->in[ni]) {
|
||||
memmove(kin + po,
|
||||
kin + rq->pageofs_in + pi, cnt);
|
||||
flush_dcache_page(rq->out[no]);
|
||||
} else if (rq->out[no]) {
|
||||
memcpy_to_page(rq->out[no], po,
|
||||
kin + rq->pageofs_in + pi, cnt);
|
||||
}
|
||||
pi += cnt;
|
||||
} while (pi < insz);
|
||||
kunmap_local(kin);
|
||||
}
|
||||
DBG_BUGON(ni > nrpages_in);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ int __init z_erofs_deflate_init(void)
|
||||
return 0;
|
||||
|
||||
out_failed:
|
||||
pr_err("failed to allocate zlib workspace\n");
|
||||
erofs_err(NULL, "failed to allocate zlib workspace");
|
||||
z_erofs_deflate_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -259,8 +259,10 @@ static int erofs_fill_inode(struct inode *inode)
|
||||
|
||||
if (erofs_inode_is_data_compressed(vi->datalayout)) {
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
if (!erofs_is_fscache_mode(inode->i_sb) &&
|
||||
inode->i_sb->s_blocksize_bits == PAGE_SHIFT) {
|
||||
if (!erofs_is_fscache_mode(inode->i_sb)) {
|
||||
DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
|
||||
erofs_info, inode->i_sb,
|
||||
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
|
||||
inode->i_mapping->a_ops = &z_erofs_aops;
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
|
@ -27,7 +27,10 @@ void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
|
||||
if (sb)
|
||||
pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
|
||||
else
|
||||
pr_err("%s: %pV", func, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
@ -41,7 +44,10 @@ void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
pr_info("(device %s): %pV", sb->s_id, &vaf);
|
||||
if (sb)
|
||||
pr_info("(device %s): %pV", sb->s_id, &vaf);
|
||||
else
|
||||
pr_info("%pV", &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
|
267
fs/erofs/zdata.c
267
fs/erofs/zdata.c
@ -56,6 +56,9 @@ struct z_erofs_pcluster {
|
||||
/* L: total number of bvecs */
|
||||
unsigned int vcnt;
|
||||
|
||||
/* I: pcluster size (compressed size) in bytes */
|
||||
unsigned int pclustersize;
|
||||
|
||||
/* I: page offset of start position of decompression */
|
||||
unsigned short pageofs_out;
|
||||
|
||||
@ -70,14 +73,6 @@ struct z_erofs_pcluster {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
union {
|
||||
/* I: physical cluster size in pages */
|
||||
unsigned short pclusterpages;
|
||||
|
||||
/* I: tailpacking inline compressed size */
|
||||
unsigned short tailpacking_size;
|
||||
};
|
||||
|
||||
/* I: compression algorithm format */
|
||||
unsigned char algorithmformat;
|
||||
|
||||
@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
|
||||
|
||||
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
|
||||
{
|
||||
if (z_erofs_is_inline_pcluster(pcl))
|
||||
return 1;
|
||||
return pcl->pclusterpages;
|
||||
return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
|
||||
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
|
||||
{
|
||||
int i;
|
||||
unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct z_erofs_pcluster_slab *pcs = pcluster_pool;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
|
||||
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
|
||||
for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
|
||||
struct z_erofs_pcluster *pcl;
|
||||
|
||||
if (nrpages > pcs->maxpages)
|
||||
@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
|
||||
pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
|
||||
if (!pcl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
pcl->pclusterpages = nrpages;
|
||||
pcl->pclustersize = size;
|
||||
return pcl;
|
||||
}
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
|
||||
{
|
||||
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
|
||||
struct z_erofs_pcluster *pcl = fe->pcl;
|
||||
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
|
||||
bool shouldalloc = z_erofs_should_alloc_cache(fe);
|
||||
bool standalone = true;
|
||||
/*
|
||||
@ -569,13 +563,14 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
|
||||
unsigned int i;
|
||||
|
||||
if (i_blocksize(fe->inode) != PAGE_SIZE)
|
||||
return;
|
||||
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pcl->pclusterpages; ++i) {
|
||||
struct page *page;
|
||||
for (i = 0; i < pclusterpages; ++i) {
|
||||
struct page *page, *newpage;
|
||||
void *t; /* mark pages just found for debugging */
|
||||
struct page *newpage = NULL;
|
||||
|
||||
/* the compressed page was loaded before */
|
||||
if (READ_ONCE(pcl->compressed_bvecs[i].page))
|
||||
@ -585,6 +580,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
|
||||
|
||||
if (page) {
|
||||
t = (void *)((unsigned long)page | 1);
|
||||
newpage = NULL;
|
||||
} else {
|
||||
/* I/O is needed, no possible to decompress directly */
|
||||
standalone = false;
|
||||
@ -592,9 +588,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* try to use cached I/O if page allocation
|
||||
* succeeds or fallback to in-place I/O instead
|
||||
* to avoid any direct reclaim.
|
||||
* Try cached I/O if allocation succeeds or fallback to
|
||||
* in-place I/O instead to avoid any direct reclaim.
|
||||
*/
|
||||
newpage = erofs_allocpage(&fe->pagepool, gfp);
|
||||
if (!newpage)
|
||||
@ -626,6 +621,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||
{
|
||||
struct z_erofs_pcluster *const pcl =
|
||||
container_of(grp, struct z_erofs_pcluster, obj);
|
||||
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
|
||||
int i;
|
||||
|
||||
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
|
||||
@ -633,7 +629,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||
* refcount of workgroup is now freezed as 0,
|
||||
* therefore no need to worry about available decompression users.
|
||||
*/
|
||||
for (i = 0; i < pcl->pclusterpages; ++i) {
|
||||
for (i = 0; i < pclusterpages; ++i) {
|
||||
struct page *page = pcl->compressed_bvecs[i].page;
|
||||
|
||||
if (!page)
|
||||
@ -657,6 +653,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
|
||||
{
|
||||
struct z_erofs_pcluster *pcl = folio_get_private(folio);
|
||||
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
|
||||
bool ret;
|
||||
int i;
|
||||
|
||||
@ -669,7 +666,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
|
||||
goto out;
|
||||
|
||||
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
|
||||
for (i = 0; i < pcl->pclusterpages; ++i) {
|
||||
for (i = 0; i < pclusterpages; ++i) {
|
||||
if (pcl->compressed_bvecs[i].page == &folio->page) {
|
||||
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
|
||||
ret = true;
|
||||
@ -778,20 +775,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
|
||||
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
|
||||
{
|
||||
struct erofs_map_blocks *map = &fe->map;
|
||||
struct super_block *sb = fe->inode->i_sb;
|
||||
bool ztailpacking = map->m_flags & EROFS_MAP_META;
|
||||
struct z_erofs_pcluster *pcl;
|
||||
struct erofs_workgroup *grp;
|
||||
int err;
|
||||
|
||||
if (!(map->m_flags & EROFS_MAP_ENCODED) ||
|
||||
(!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
|
||||
(!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
|
||||
DBG_BUGON(1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/* no available pcluster, let's allocate one */
|
||||
pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
|
||||
map->m_plen >> PAGE_SHIFT);
|
||||
pcl = z_erofs_alloc_pcluster(map->m_plen);
|
||||
if (IS_ERR(pcl))
|
||||
return PTR_ERR(pcl);
|
||||
|
||||
@ -815,10 +812,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
|
||||
|
||||
if (ztailpacking) {
|
||||
pcl->obj.index = 0; /* which indicates ztailpacking */
|
||||
pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
|
||||
pcl->tailpacking_size = map->m_plen;
|
||||
} else {
|
||||
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
|
||||
pcl->obj.index = erofs_blknr(sb, map->m_pa);
|
||||
|
||||
grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
|
||||
if (IS_ERR(grp)) {
|
||||
@ -893,6 +888,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
|
||||
}
|
||||
get_page(map->buf.page);
|
||||
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
|
||||
fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
|
||||
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
|
||||
}
|
||||
/* file-backed inplace I/O pages are traversed in reverse order */
|
||||
@ -973,12 +969,12 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
||||
struct inode *const inode = fe->inode;
|
||||
struct erofs_map_blocks *const map = &fe->map;
|
||||
const loff_t offset = page_offset(page);
|
||||
const unsigned int bs = i_blocksize(inode);
|
||||
bool tight = true, exclusive;
|
||||
unsigned int cur, end, len, split;
|
||||
int err = 0;
|
||||
|
||||
z_erofs_onlinepage_init(page);
|
||||
|
||||
split = 0;
|
||||
end = PAGE_SIZE;
|
||||
repeat:
|
||||
@ -1027,7 +1023,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
||||
* for inplace I/O or bvpage (should be processed in a strict order.)
|
||||
*/
|
||||
tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
|
||||
exclusive = (!cur && ((split <= 1) || tight));
|
||||
exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE)));
|
||||
if (cur)
|
||||
tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
|
||||
|
||||
@ -1206,34 +1202,27 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
|
||||
struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
|
||||
struct page *page = bvec->page;
|
||||
|
||||
/* compressed pages ought to be present before decompressing */
|
||||
/* compressed data ought to be valid before decompressing */
|
||||
if (!page) {
|
||||
DBG_BUGON(1);
|
||||
err = -EIO;
|
||||
continue;
|
||||
}
|
||||
be->compressed_pages[i] = page;
|
||||
|
||||
if (z_erofs_is_inline_pcluster(pcl)) {
|
||||
if (z_erofs_is_inline_pcluster(pcl) ||
|
||||
erofs_page_is_managed(EROFS_SB(be->sb), page)) {
|
||||
if (!PageUptodate(page))
|
||||
err = -EIO;
|
||||
continue;
|
||||
}
|
||||
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
if (!z_erofs_is_shortlived_page(page)) {
|
||||
if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
|
||||
if (!PageUptodate(page))
|
||||
err = -EIO;
|
||||
continue;
|
||||
}
|
||||
z_erofs_do_decompressed_bvec(be, bvec);
|
||||
*overlapped = true;
|
||||
}
|
||||
if (z_erofs_is_shortlived_page(page))
|
||||
continue;
|
||||
z_erofs_do_decompressed_bvec(be, bvec);
|
||||
*overlapped = true;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
|
||||
@ -1242,10 +1231,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
|
||||
struct z_erofs_pcluster *pcl = be->pcl;
|
||||
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
|
||||
const struct z_erofs_decompressor *decompressor =
|
||||
const struct z_erofs_decompressor *decomp =
|
||||
&erofs_decompressors[pcl->algorithmformat];
|
||||
unsigned int i, inputsize;
|
||||
int err2;
|
||||
int i, err2;
|
||||
struct page *page;
|
||||
bool overlapped;
|
||||
|
||||
@ -1279,21 +1267,14 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
|
||||
err2 = z_erofs_parse_in_bvecs(be, &overlapped);
|
||||
if (err2)
|
||||
err = err2;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (z_erofs_is_inline_pcluster(pcl))
|
||||
inputsize = pcl->tailpacking_size;
|
||||
else
|
||||
inputsize = pclusterpages * PAGE_SIZE;
|
||||
|
||||
err = decompressor->decompress(&(struct z_erofs_decompress_req) {
|
||||
if (!err)
|
||||
err = decomp->decompress(&(struct z_erofs_decompress_req) {
|
||||
.sb = be->sb,
|
||||
.in = be->compressed_pages,
|
||||
.out = be->decompressed_pages,
|
||||
.pageofs_in = pcl->pageofs_in,
|
||||
.pageofs_out = pcl->pageofs_out,
|
||||
.inputsize = inputsize,
|
||||
.inputsize = pcl->pclustersize,
|
||||
.outputsize = pcl->length,
|
||||
.alg = pcl->algorithmformat,
|
||||
.inplace_io = overlapped,
|
||||
@ -1301,7 +1282,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
|
||||
.fillgaps = pcl->multibases,
|
||||
}, be->pagepool);
|
||||
|
||||
out:
|
||||
/* must handle all compressed pages before actual file pages */
|
||||
if (z_erofs_is_inline_pcluster(pcl)) {
|
||||
page = pcl->compressed_bvecs[0].page;
|
||||
@ -1309,12 +1289,11 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
|
||||
put_page(page);
|
||||
} else {
|
||||
for (i = 0; i < pclusterpages; ++i) {
|
||||
page = pcl->compressed_bvecs[i].page;
|
||||
/* consider shortlived pages added when decompressing */
|
||||
page = be->compressed_pages[i];
|
||||
|
||||
if (erofs_page_is_managed(sbi, page))
|
||||
if (!page || erofs_page_is_managed(sbi, page))
|
||||
continue;
|
||||
|
||||
/* recycle all individual short-lived pages */
|
||||
(void)z_erofs_put_shortlivedpage(be->pagepool, page);
|
||||
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
|
||||
}
|
||||
@ -1436,86 +1415,85 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
||||
z_erofs_decompressqueue_work(&io->u.work);
|
||||
}
|
||||
|
||||
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
|
||||
unsigned int nr,
|
||||
struct page **pagepool,
|
||||
struct address_space *mc)
|
||||
static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
|
||||
struct z_erofs_decompress_frontend *f,
|
||||
struct z_erofs_pcluster *pcl,
|
||||
unsigned int nr,
|
||||
struct address_space *mc)
|
||||
{
|
||||
const pgoff_t index = pcl->obj.index;
|
||||
gfp_t gfp = mapping_gfp_mask(mc);
|
||||
bool tocache = false;
|
||||
|
||||
struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr;
|
||||
struct address_space *mapping;
|
||||
struct page *oldpage, *page;
|
||||
int justfound;
|
||||
struct page *page, *oldpage;
|
||||
int justfound, bs = i_blocksize(f->inode);
|
||||
|
||||
/* Except for inplace pages, the entire page can be used for I/Os */
|
||||
bvec->bv_offset = 0;
|
||||
bvec->bv_len = PAGE_SIZE;
|
||||
repeat:
|
||||
page = READ_ONCE(pcl->compressed_bvecs[nr].page);
|
||||
oldpage = page;
|
||||
|
||||
if (!page)
|
||||
oldpage = READ_ONCE(zbv->page);
|
||||
if (!oldpage)
|
||||
goto out_allocpage;
|
||||
|
||||
justfound = (unsigned long)page & 1UL;
|
||||
page = (struct page *)((unsigned long)page & ~1UL);
|
||||
justfound = (unsigned long)oldpage & 1UL;
|
||||
page = (struct page *)((unsigned long)oldpage & ~1UL);
|
||||
bvec->bv_page = page;
|
||||
|
||||
DBG_BUGON(z_erofs_is_shortlived_page(page));
|
||||
/*
|
||||
* preallocated cached pages, which is used to avoid direct reclaim
|
||||
* otherwise, it will go inplace I/O path instead.
|
||||
* Handle preallocated cached pages. We tried to allocate such pages
|
||||
* without triggering direct reclaim. If allocation failed, inplace
|
||||
* file-backed pages will be used instead.
|
||||
*/
|
||||
if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
|
||||
WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
|
||||
set_page_private(page, 0);
|
||||
WRITE_ONCE(zbv->page, page);
|
||||
tocache = true;
|
||||
goto out_tocache;
|
||||
}
|
||||
|
||||
mapping = READ_ONCE(page->mapping);
|
||||
|
||||
/*
|
||||
* file-backed online pages in plcuster are all locked steady,
|
||||
* therefore it is impossible for `mapping' to be NULL.
|
||||
* File-backed pages for inplace I/Os are all locked steady,
|
||||
* therefore it is impossible for `mapping` to be NULL.
|
||||
*/
|
||||
if (mapping && mapping != mc)
|
||||
/* ought to be unmanaged pages */
|
||||
goto out;
|
||||
|
||||
/* directly return for shortlived page as well */
|
||||
if (z_erofs_is_shortlived_page(page))
|
||||
goto out;
|
||||
if (mapping && mapping != mc) {
|
||||
if (zbv->offset < 0)
|
||||
bvec->bv_offset = round_up(-zbv->offset, bs);
|
||||
bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset;
|
||||
return;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
|
||||
/* only true if page reclaim goes wrong, should never happen */
|
||||
DBG_BUGON(justfound && PagePrivate(page));
|
||||
|
||||
/* the page is still in manage cache */
|
||||
/* the cached page is still in managed cache */
|
||||
if (page->mapping == mc) {
|
||||
WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
|
||||
|
||||
WRITE_ONCE(zbv->page, page);
|
||||
/*
|
||||
* The cached page is still available but without a valid
|
||||
* `->private` pcluster hint. Let's reconnect them.
|
||||
*/
|
||||
if (!PagePrivate(page)) {
|
||||
/*
|
||||
* impossible to be !PagePrivate(page) for
|
||||
* the current restriction as well if
|
||||
* the page is already in compressed_bvecs[].
|
||||
*/
|
||||
DBG_BUGON(!justfound);
|
||||
|
||||
justfound = 0;
|
||||
set_page_private(page, (unsigned long)pcl);
|
||||
SetPagePrivate(page);
|
||||
/* compressed_bvecs[] already takes a ref */
|
||||
attach_page_private(page, pcl);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
/* no need to submit io if it is already up-to-date */
|
||||
/* no need to submit if it is already up-to-date */
|
||||
if (PageUptodate(page)) {
|
||||
unlock_page(page);
|
||||
page = NULL;
|
||||
bvec->bv_page = NULL;
|
||||
}
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* the managed page has been truncated, it's unsafe to
|
||||
* reuse this one, let's allocate a new cache-managed page.
|
||||
* It has been truncated, so it's unsafe to reuse this one. Let's
|
||||
* allocate a new page for compressed data.
|
||||
*/
|
||||
DBG_BUGON(page->mapping);
|
||||
DBG_BUGON(!justfound);
|
||||
@ -1524,25 +1502,23 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
out_allocpage:
|
||||
page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
|
||||
if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
|
||||
oldpage, page)) {
|
||||
erofs_pagepool_add(pagepool, page);
|
||||
page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
|
||||
if (oldpage != cmpxchg(&zbv->page, oldpage, page)) {
|
||||
erofs_pagepool_add(&f->pagepool, page);
|
||||
cond_resched();
|
||||
goto repeat;
|
||||
}
|
||||
bvec->bv_page = page;
|
||||
out_tocache:
|
||||
if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
|
||||
/* turn into temporary page if fails (1 ref) */
|
||||
if (!tocache || bs != PAGE_SIZE ||
|
||||
add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) {
|
||||
/* turn into a temporary shortlived page (1 ref) */
|
||||
set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
attach_page_private(page, pcl);
|
||||
/* drop a refcount added by allocpage (then we have 2 refs here) */
|
||||
/* drop a refcount added by allocpage (then 2 refs in total here) */
|
||||
put_page(page);
|
||||
|
||||
out: /* the only exit (for tracing and debugging) */
|
||||
return page;
|
||||
}
|
||||
|
||||
static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
|
||||
@ -1597,7 +1573,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
|
||||
qtail[JQ_BYPASS] = &pcl->next;
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
static void z_erofs_submissionqueue_endio(struct bio *bio)
|
||||
{
|
||||
struct z_erofs_decompressqueue *q = bio->bi_private;
|
||||
blk_status_t err = bio->bi_status;
|
||||
@ -1609,7 +1585,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
|
||||
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
|
||||
if (!err)
|
||||
SetPageUptodate(page);
|
||||
@ -1632,17 +1607,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
|
||||
z_erofs_next_pcluster_t owned_head = f->owned_head;
|
||||
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
|
||||
pgoff_t last_index;
|
||||
erofs_off_t last_pa;
|
||||
struct block_device *last_bdev;
|
||||
unsigned int nr_bios = 0;
|
||||
struct bio *bio = NULL;
|
||||
unsigned long pflags;
|
||||
int memstall = 0;
|
||||
|
||||
/*
|
||||
* if managed cache is enabled, bypass jobqueue is needed,
|
||||
* no need to read from device for all pclusters in this queue.
|
||||
*/
|
||||
/* No need to read from device for pclusters in the bypass queue. */
|
||||
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
|
||||
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
|
||||
|
||||
@ -1655,7 +1627,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
do {
|
||||
struct erofs_map_dev mdev;
|
||||
struct z_erofs_pcluster *pcl;
|
||||
pgoff_t cur, end;
|
||||
erofs_off_t cur, end;
|
||||
struct bio_vec bvec;
|
||||
unsigned int i = 0;
|
||||
bool bypass = true;
|
||||
|
||||
@ -1674,18 +1647,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
};
|
||||
(void)erofs_map_dev(sb, &mdev);
|
||||
|
||||
cur = erofs_blknr(sb, mdev.m_pa);
|
||||
end = cur + pcl->pclusterpages;
|
||||
|
||||
cur = mdev.m_pa;
|
||||
end = cur + pcl->pclustersize;
|
||||
do {
|
||||
struct page *page;
|
||||
|
||||
page = pickup_page_for_submission(pcl, i++,
|
||||
&f->pagepool, mc);
|
||||
if (!page)
|
||||
z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
|
||||
if (!bvec.bv_page)
|
||||
continue;
|
||||
|
||||
if (bio && (cur != last_index + 1 ||
|
||||
if (bio && (cur != last_pa ||
|
||||
last_bdev != mdev.m_bdev)) {
|
||||
submit_bio_retry:
|
||||
submit_bio(bio);
|
||||
@ -1696,7 +1665,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
bio = NULL;
|
||||
}
|
||||
|
||||
if (unlikely(PageWorkingset(page)) && !memstall) {
|
||||
if (unlikely(PageWorkingset(bvec.bv_page)) &&
|
||||
!memstall) {
|
||||
psi_memstall_enter(&pflags);
|
||||
memstall = 1;
|
||||
}
|
||||
@ -1704,23 +1674,24 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
||||
if (!bio) {
|
||||
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
|
||||
REQ_OP_READ, GFP_NOIO);
|
||||
bio->bi_end_io = z_erofs_decompressqueue_endio;
|
||||
|
||||
last_bdev = mdev.m_bdev;
|
||||
bio->bi_iter.bi_sector = (sector_t)cur <<
|
||||
(sb->s_blocksize_bits - 9);
|
||||
bio->bi_end_io = z_erofs_submissionqueue_endio;
|
||||
bio->bi_iter.bi_sector = cur >> 9;
|
||||
bio->bi_private = q[JQ_SUBMIT];
|
||||
if (readahead)
|
||||
bio->bi_opf |= REQ_RAHEAD;
|
||||
++nr_bios;
|
||||
last_bdev = mdev.m_bdev;
|
||||
}
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
|
||||
if (cur + bvec.bv_len > end)
|
||||
bvec.bv_len = end - cur;
|
||||
if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
|
||||
bvec.bv_offset))
|
||||
goto submit_bio_retry;
|
||||
|
||||
last_index = cur;
|
||||
last_pa = cur + bvec.bv_len;
|
||||
bypass = false;
|
||||
} while (++cur < end);
|
||||
} while ((cur += bvec.bv_len) < end);
|
||||
|
||||
if (!bypass)
|
||||
qtail[JQ_SUBMIT] = &pcl->next;
|
||||
|
@ -82,29 +82,26 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
|
||||
}
|
||||
|
||||
static unsigned int decode_compactedbits(unsigned int lobits,
|
||||
unsigned int lomask,
|
||||
u8 *in, unsigned int pos, u8 *type)
|
||||
{
|
||||
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
|
||||
const unsigned int lo = v & lomask;
|
||||
const unsigned int lo = v & ((1 << lobits) - 1);
|
||||
|
||||
*type = (v >> lobits) & 3;
|
||||
return lo;
|
||||
}
|
||||
|
||||
static int get_compacted_la_distance(unsigned int lclusterbits,
|
||||
static int get_compacted_la_distance(unsigned int lobits,
|
||||
unsigned int encodebits,
|
||||
unsigned int vcnt, u8 *in, int i)
|
||||
{
|
||||
const unsigned int lomask = (1 << lclusterbits) - 1;
|
||||
unsigned int lo, d1 = 0;
|
||||
u8 type;
|
||||
|
||||
DBG_BUGON(i >= vcnt);
|
||||
|
||||
do {
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
|
||||
|
||||
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
||||
return d1;
|
||||
@ -123,15 +120,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
{
|
||||
struct erofs_inode *const vi = EROFS_I(m->inode);
|
||||
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
||||
const unsigned int lomask = (1 << lclusterbits) - 1;
|
||||
unsigned int vcnt, base, lo, encodebits, nblk, eofs;
|
||||
unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs;
|
||||
int i;
|
||||
u8 *in, type;
|
||||
bool big_pcluster;
|
||||
|
||||
if (1 << amortizedshift == 4 && lclusterbits <= 14)
|
||||
vcnt = 2;
|
||||
else if (1 << amortizedshift == 2 && lclusterbits == 12)
|
||||
else if (1 << amortizedshift == 2 && lclusterbits <= 12)
|
||||
vcnt = 16;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
@ -140,6 +136,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
|
||||
(vcnt << amortizedshift);
|
||||
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
|
||||
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
|
||||
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
|
||||
eofs = erofs_blkoff(m->inode->i_sb, pos);
|
||||
base = round_down(eofs, vcnt << amortizedshift);
|
||||
@ -147,15 +144,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
|
||||
i = (eofs - base) >> amortizedshift;
|
||||
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
|
||||
m->type = type;
|
||||
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
||||
m->clusterofs = 1 << lclusterbits;
|
||||
|
||||
/* figure out lookahead_distance: delta[1] if needed */
|
||||
if (lookahead)
|
||||
m->delta[1] = get_compacted_la_distance(lclusterbits,
|
||||
m->delta[1] = get_compacted_la_distance(lobits,
|
||||
encodebits, vcnt, in, i);
|
||||
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
|
||||
if (!big_pcluster) {
|
||||
@ -174,8 +170,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
* of which lo saves delta[1] rather than delta[0].
|
||||
* Hence, get delta[0] by the previous lcluster indirectly.
|
||||
*/
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * (i - 1), &type);
|
||||
lo = decode_compactedbits(lobits, in,
|
||||
encodebits * (i - 1), &type);
|
||||
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
||||
lo = 0;
|
||||
else if (lo & Z_EROFS_LI_D0_CBLKCNT)
|
||||
@ -190,8 +186,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
nblk = 1;
|
||||
while (i > 0) {
|
||||
--i;
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
lo = decode_compactedbits(lobits, in,
|
||||
encodebits * i, &type);
|
||||
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
||||
i -= lo;
|
||||
|
||||
@ -202,8 +198,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
nblk = 0;
|
||||
while (i > 0) {
|
||||
--i;
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
lo = decode_compactedbits(lobits, in,
|
||||
encodebits * i, &type);
|
||||
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
||||
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
|
||||
--i;
|
||||
|
Loading…
Reference in New Issue
Block a user