mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
Changes since last update:
- Convert metadata APIs to byte offsets; - Avoid allocating DEFLATE streams unnecessarily; - Some erofs_show_options() cleanup. -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEQ0A6bDUS9Y+83NPFUXZn5Zlu5qoFAmZQmHARHHhpYW5nQGtl cm5lbC5vcmcACgkQUXZn5Zlu5qrGnhAAnvOifMYekIgY/W0PSGSe85XtXps5vBjo rixZ/vNAl8NrLgzHY5lX+4dbENywEULzdxYAgF4VN9eKNGyuZ4oCBmYStoGueQ41 N1oq36O/CVJDCOLkFUwjD6GpHngjJR3xiU8DRrhKdPZJeYXVEJwZB4KOOymorkO0 Xn9SPrF/GC4YDWJL901RKT8p6gyRNWiWJ/+hwDAxfmCSuzW2uRNnBLeXNvjqj4Z3 u5WEaFSlNRlLWnZPcHy8O3t/XAPkhvTN+C5+YeaePWyHc5WYOM9mWt8VLOFQb60K l+q/cnWXw+8NNbxnuccWVJfEb6zUJmZ5/yTm+Ndutrpk5dFSPb6DjZo5/K36dGls r02XysW+Jl24wBIFkYRHild2WT+gSqo/zyIDsSt/DF+DhpqmnIqAASx4yJenw7ib BNV4m4gQflLrORKpVmsKyHrm5GuHsTWsGc51iX1uqsdfDgN79mFgR1taBAZw162P pPeWuD6XYE+eT+t5nggnXqmZ5qatEhTFkYDjUzSq4ZQfyZnRG8Tl6zbBuyVhaxsO zH1rAmwtI6x+ehHI46Kurh8HT6UrB0CNM6RokYKr6JWVzIdFPPMVKkxcq2KozTPf CBu+Whh/WGFROM8JT2KGCnuz2ZBUZXDtNBJmW+ZnA+z9b7xZ1f31nio4vKKdZU+R swpnV+0q9cs= =qDDl -----END PGP SIGNATURE----- Merge tag 'erofs-for-6.10-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs Pull more erofs updates from Gao Xiang: "The main ones are metadata API conversion to byte offsets by Al Viro. Another patch gets rid of unnecessary memory allocation out of DEFLATE decompressor. The remaining one is a trivial cleanup. - Convert metadata APIs to byte offsets - Avoid allocating DEFLATE streams unnecessarily - Some erofs_show_options() cleanup" * tag 'erofs-for-6.10-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: avoid allocating DEFLATE streams before mounting z_erofs_pcluster_begin(): don't bother with rounding position down erofs: don't round offset down for erofs_read_metabuf() erofs: don't align offset for erofs_read_metabuf() (simple cases) erofs: mechanically convert erofs_read_metabuf() to offsets erofs: clean up erofs_show_options()
This commit is contained in:
commit
dcb9f48667
@ -72,10 +72,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
|
||||
}
|
||||
|
||||
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type)
|
||||
erofs_off_t offset, enum erofs_kmap_type type)
|
||||
{
|
||||
erofs_init_metabuf(buf, sb);
|
||||
return erofs_bread(buf, erofs_pos(sb, blkaddr), type);
|
||||
return erofs_bread(buf, offset, type);
|
||||
}
|
||||
|
||||
static int erofs_map_blocks_flatmode(struct inode *inode,
|
||||
@ -152,7 +152,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
|
||||
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
|
||||
vi->xattr_isize, unit) + unit * chunknr;
|
||||
|
||||
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
|
||||
kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
err = PTR_ERR(kaddr);
|
||||
goto out;
|
||||
@ -163,7 +163,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
|
||||
|
||||
/* handle block map */
|
||||
if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
|
||||
__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
|
||||
__le32 *blkaddr = kaddr;
|
||||
|
||||
if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
|
||||
map->m_flags = 0;
|
||||
@ -174,7 +174,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
|
||||
goto out_unlock;
|
||||
}
|
||||
/* parse chunk indexes */
|
||||
idx = kaddr + erofs_blkoff(sb, pos);
|
||||
idx = kaddr;
|
||||
switch (le32_to_cpu(idx->blkaddr)) {
|
||||
case EROFS_NULL_ADDR:
|
||||
map->m_flags = 0;
|
||||
@ -294,11 +294,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
|
||||
iomap->type = IOMAP_INLINE;
|
||||
ptr = erofs_read_metabuf(&buf, sb,
|
||||
erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
|
||||
ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
|
||||
iomap->inline_data = ptr;
|
||||
iomap->private = buf.base;
|
||||
} else {
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
|
@ -46,39 +46,15 @@ int __init z_erofs_deflate_init(void)
|
||||
/* by default, use # of possible CPUs instead */
|
||||
if (!z_erofs_deflate_nstrms)
|
||||
z_erofs_deflate_nstrms = num_possible_cpus();
|
||||
|
||||
for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
|
||||
++z_erofs_deflate_avail_strms) {
|
||||
struct z_erofs_deflate *strm;
|
||||
|
||||
strm = kzalloc(sizeof(*strm), GFP_KERNEL);
|
||||
if (!strm)
|
||||
goto out_failed;
|
||||
|
||||
/* XXX: in-kernel zlib cannot shrink windowbits currently */
|
||||
strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
|
||||
if (!strm->z.workspace) {
|
||||
kfree(strm);
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
spin_lock(&z_erofs_deflate_lock);
|
||||
strm->next = z_erofs_deflate_head;
|
||||
z_erofs_deflate_head = strm;
|
||||
spin_unlock(&z_erofs_deflate_lock);
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_failed:
|
||||
erofs_err(NULL, "failed to allocate zlib workspace");
|
||||
z_erofs_deflate_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int z_erofs_load_deflate_config(struct super_block *sb,
|
||||
struct erofs_super_block *dsb, void *data, int size)
|
||||
{
|
||||
struct z_erofs_deflate_cfgs *dfl = data;
|
||||
static DEFINE_MUTEX(deflate_resize_mutex);
|
||||
static bool inited;
|
||||
|
||||
if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
|
||||
erofs_err(sb, "invalid deflate cfgs, size=%u", size);
|
||||
@ -89,9 +65,36 @@ int z_erofs_load_deflate_config(struct super_block *sb,
|
||||
erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
mutex_lock(&deflate_resize_mutex);
|
||||
if (!inited) {
|
||||
for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
|
||||
++z_erofs_deflate_avail_strms) {
|
||||
struct z_erofs_deflate *strm;
|
||||
|
||||
strm = kzalloc(sizeof(*strm), GFP_KERNEL);
|
||||
if (!strm)
|
||||
goto failed;
|
||||
/* XXX: in-kernel zlib cannot customize windowbits */
|
||||
strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
|
||||
if (!strm->z.workspace) {
|
||||
kfree(strm);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
spin_lock(&z_erofs_deflate_lock);
|
||||
strm->next = z_erofs_deflate_head;
|
||||
z_erofs_deflate_head = strm;
|
||||
spin_unlock(&z_erofs_deflate_lock);
|
||||
}
|
||||
inited = true;
|
||||
}
|
||||
mutex_unlock(&deflate_resize_mutex);
|
||||
erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
|
||||
return 0;
|
||||
failed:
|
||||
mutex_unlock(&deflate_resize_mutex);
|
||||
z_erofs_deflate_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||
|
@ -273,21 +273,15 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
|
||||
if (map.m_flags & EROFS_MAP_META) {
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
struct iov_iter iter;
|
||||
erofs_blk_t blknr;
|
||||
size_t offset, size;
|
||||
size_t size = map.m_llen;
|
||||
void *src;
|
||||
|
||||
/* For tail packing layout, the offset may be non-zero. */
|
||||
offset = erofs_blkoff(sb, map.m_pa);
|
||||
blknr = erofs_blknr(sb, map.m_pa);
|
||||
size = map.m_llen;
|
||||
|
||||
src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
|
||||
src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP);
|
||||
if (IS_ERR(src))
|
||||
return PTR_ERR(src);
|
||||
|
||||
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
|
||||
if (copy_to_iter(src + offset, size, &iter) != size) {
|
||||
if (copy_to_iter(src, size, &iter) != size) {
|
||||
erofs_put_metabuf(&buf);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
|
||||
blkaddr = erofs_blknr(sb, inode_loc);
|
||||
*ofs = erofs_blkoff(sb, inode_loc);
|
||||
|
||||
kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
|
||||
kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
|
||||
vi->nid, PTR_ERR(kaddr));
|
||||
@ -66,7 +66,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
|
||||
goto err_out;
|
||||
}
|
||||
memcpy(copied, dic, gotten);
|
||||
kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
|
||||
kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr + 1),
|
||||
EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
|
||||
|
@ -64,15 +64,12 @@ enum {
|
||||
};
|
||||
|
||||
struct erofs_mount_opts {
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
/* current strategy of how to use managed cache */
|
||||
unsigned char cache_strategy;
|
||||
/* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
|
||||
unsigned int sync_decompress;
|
||||
|
||||
/* threshold for decompression synchronously */
|
||||
unsigned int max_sync_decompress_pages;
|
||||
#endif
|
||||
unsigned int mount_opt;
|
||||
};
|
||||
|
||||
@ -406,7 +403,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
|
||||
enum erofs_kmap_type type);
|
||||
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
|
||||
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type);
|
||||
erofs_off_t offset, enum erofs_kmap_type type);
|
||||
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
|
||||
int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
u64 start, u64 len);
|
||||
|
@ -178,12 +178,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
|
||||
struct erofs_fscache *fscache;
|
||||
struct erofs_deviceslot *dis;
|
||||
struct file *bdev_file;
|
||||
void *ptr;
|
||||
|
||||
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
dis = ptr + erofs_blkoff(sb, *pos);
|
||||
dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP);
|
||||
if (IS_ERR(dis))
|
||||
return PTR_ERR(dis);
|
||||
|
||||
if (!sbi->devs->flatdev && !dif->path) {
|
||||
if (!dis->tag[0]) {
|
||||
@ -943,26 +941,14 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
|
||||
struct erofs_mount_opts *opt = &sbi->opt;
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_XATTR
|
||||
if (test_opt(opt, XATTR_USER))
|
||||
seq_puts(seq, ",user_xattr");
|
||||
else
|
||||
seq_puts(seq, ",nouser_xattr");
|
||||
#endif
|
||||
#ifdef CONFIG_EROFS_FS_POSIX_ACL
|
||||
if (test_opt(opt, POSIX_ACL))
|
||||
seq_puts(seq, ",acl");
|
||||
else
|
||||
seq_puts(seq, ",noacl");
|
||||
#endif
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
|
||||
seq_puts(seq, ",cache_strategy=disabled");
|
||||
else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
|
||||
seq_puts(seq, ",cache_strategy=readahead");
|
||||
else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
|
||||
seq_puts(seq, ",cache_strategy=readaround");
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_EROFS_FS_XATTR))
|
||||
seq_puts(seq, test_opt(opt, XATTR_USER) ?
|
||||
",user_xattr" : ",nouser_xattr");
|
||||
if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL))
|
||||
seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl");
|
||||
if (IS_ENABLED(CONFIG_EROFS_FS_ZIP))
|
||||
seq_printf(seq, ",cache_strategy=%s",
|
||||
erofs_param_cache_strategy[opt->cache_strategy].name);
|
||||
if (test_opt(opt, DAX_ALWAYS))
|
||||
seq_puts(seq, ",dax=always");
|
||||
if (test_opt(opt, DAX_NEVER))
|
||||
|
@ -868,7 +868,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
|
||||
} else {
|
||||
void *mptr;
|
||||
|
||||
mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
|
||||
mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
|
||||
if (IS_ERR(mptr)) {
|
||||
ret = PTR_ERR(mptr);
|
||||
erofs_err(sb, "failed to get inline data %d", ret);
|
||||
|
@ -34,13 +34,13 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
|
||||
unsigned int advise;
|
||||
|
||||
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
|
||||
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
|
||||
pos, EROFS_KMAP);
|
||||
if (IS_ERR(m->kaddr))
|
||||
return PTR_ERR(m->kaddr);
|
||||
|
||||
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
|
||||
m->lcn = lcn;
|
||||
di = m->kaddr + erofs_blkoff(inode->i_sb, pos);
|
||||
di = m->kaddr;
|
||||
|
||||
advise = le16_to_cpu(di->di_advise);
|
||||
m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
|
||||
@ -109,7 +109,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
{
|
||||
struct erofs_inode *const vi = EROFS_I(m->inode);
|
||||
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
||||
unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs;
|
||||
unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
|
||||
int i;
|
||||
u8 *in, type;
|
||||
bool big_pcluster;
|
||||
@ -127,11 +127,11 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
||||
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
|
||||
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
|
||||
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
|
||||
eofs = erofs_blkoff(m->inode->i_sb, pos);
|
||||
base = round_down(eofs, vcnt << amortizedshift);
|
||||
in = m->kaddr + base;
|
||||
bytes = pos & ((vcnt << amortizedshift) - 1);
|
||||
|
||||
i = (eofs - base) >> amortizedshift;
|
||||
in = m->kaddr - bytes;
|
||||
|
||||
i = bytes >> amortizedshift;
|
||||
|
||||
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
|
||||
m->type = type;
|
||||
@ -256,7 +256,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
|
||||
out:
|
||||
pos += lcn * (1 << amortizedshift);
|
||||
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
|
||||
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
|
||||
pos, EROFS_KMAP);
|
||||
if (IS_ERR(m->kaddr))
|
||||
return PTR_ERR(m->kaddr);
|
||||
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
|
||||
@ -570,7 +570,6 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
|
||||
int err, headnr;
|
||||
erofs_off_t pos;
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
void *kaddr;
|
||||
struct z_erofs_map_header *h;
|
||||
|
||||
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
|
||||
@ -590,13 +589,12 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
|
||||
goto out_unlock;
|
||||
|
||||
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
|
||||
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
err = PTR_ERR(kaddr);
|
||||
h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
|
||||
if (IS_ERR(h)) {
|
||||
err = PTR_ERR(h);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
h = kaddr + erofs_blkoff(sb, pos);
|
||||
/*
|
||||
* if the highest bit of the 8-byte map header is set, the whole file
|
||||
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
|
||||
|
Loading…
Reference in New Issue
Block a user