mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
overlayfs update for 6.5
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE9zuTYTs0RXF+Ke33EVvVyTe/1WoFAmSTFh8ACgkQEVvVyTe/ 1Wpy5g/+NTL00ptBHHnEZJG7rRu+PzvjB+gzI8YpMT4kiGkJBNKZIxvgURvisDJ/ coOXySq0yk0y901r2Rb+hP8YSYcSo8nrLV+jJwlZcmFsudtbSbOdUUJiAUjqGZk2 YxiCl9NrAZJMVQj6AfL1LE0L00NmIxXruGiCXWEnbHjYvmy/LIsLOg9b+vlkPPZM xqxQq//hVZel5Lh6rmz9svJIqFF/e3zobX7NBV0MiFGHX926u2TV0ImAnjmhuxdV mZ8zKCQ4kGTU++PjOCyKPS+xF9eg6mlCVW9uq/QIN83muhwaUlOimJEBunbNxBQz if/OWxSMZv5UFXqYbF6YsHQrWupCV2uEcs8ypcGNsPX3AeX9i8JTGdVWMPax+4HO IMZ7Ltxf8Udihsn4s9n0ZMgSDbMTcPRFMNrHXtWU6FuEEjbP5SuD1KREJrh58myL I99/0tOICAx3ZRueZNmqz4BJ9zev+86PtPNDqHbZy2qrwBn7C6ly3AFWCCv32mza ehINgUv/TtzUBKqkbhznrjinrv5upV3CV9xg4L8ohhuVeoa8h3kGf8H5XSfnhY56 x1DNWAZK+yZpLf/I39hrWwSME0T1D3GEuI2NlPmAKpqZSgwYdhSAcfebfQhmwD3Q B3qY3Dh74dsN9zW9oygcXXtHHizzsomOc/qXkXvYP4MXkQi0+h0= =KPxq -----END PGP SIGNATURE----- Merge tag 'ovl-update-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/overlayfs/vfs Pull overlayfs update from Amir Goldstein: - fix two NULL pointer deref bugs (Zhihao Cheng) - add support for "data-only" lower layers destined to be used by composefs - port overlayfs to the new mount api (Christian Brauner) * tag 'ovl-update-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/overlayfs/vfs: (26 commits) ovl: add Amir as co-maintainer ovl: reserve ability to reconfigure mount options with new mount api ovl: modify layer parameter parsing ovl: port to new mount api ovl: factor out ovl_parse_options() helper ovl: store enum redirect_mode in config instead of a string ovl: pass ovl_fs to xino helpers ovl: clarify ovl_get_root() semantics ovl: negate the ofs->share_whiteout boolean ovl: check type and offset of struct vfsmount in ovl_entry ovl: implement lazy lookup of lowerdata in data-only layers ovl: prepare for lazy lookup of lowerdata inode ovl: prepare to store lowerdata redirect for lazy lowerdata lookup ovl: implement lookup in data-only layers ovl: introduce data-only lower layers ovl: remove unneeded goto instructions ovl: deduplicate lowerdata and lowerstack[] ovl: deduplicate lowerpath and lowerstack[] ovl: move ovl_entry into ovl_inode ovl: factor out ovl_free_entry() and ovl_stack_*() helpers ...
This commit is contained in:
commit
be3c213150
@ -231,12 +231,11 @@ Mount options:
|
||||
Redirects are enabled.
|
||||
- "redirect_dir=follow":
|
||||
Redirects are not created, but followed.
|
||||
- "redirect_dir=off":
|
||||
Redirects are not created and only followed if "redirect_always_follow"
|
||||
feature is enabled in the kernel/module config.
|
||||
- "redirect_dir=nofollow":
|
||||
Redirects are not created and not followed (equivalent to "redirect_dir=off"
|
||||
if "redirect_always_follow" feature is not enabled).
|
||||
Redirects are not created and not followed.
|
||||
- "redirect_dir=off":
|
||||
If "redirect_always_follow" is enabled in the kernel/module config,
|
||||
this "off" traslates to "follow", otherwise it translates to "nofollow".
|
||||
|
||||
When the NFS export feature is enabled, every copied up directory is
|
||||
indexed by the file handle of the lower inode and a file handle of the
|
||||
@ -371,6 +370,41 @@ conflict with metacopy=on, and will result in an error.
|
||||
[*] redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
|
||||
given.
|
||||
|
||||
|
||||
Data-only lower layers
|
||||
----------------------
|
||||
|
||||
With "metacopy" feature enabled, an overlayfs regular file may be a composition
|
||||
of information from up to three different layers:
|
||||
|
||||
1) metadata from a file in the upper layer
|
||||
|
||||
2) st_ino and st_dev object identifier from a file in a lower layer
|
||||
|
||||
3) data from a file in another lower layer (further below)
|
||||
|
||||
The "lower data" file can be on any lower layer, except from the top most
|
||||
lower layer.
|
||||
|
||||
Below the top most lower layer, any number of lower most layers may be defined
|
||||
as "data-only" lower layers, using double colon ("::") separators.
|
||||
A normal lower layer is not allowed to be below a data-only layer, so single
|
||||
colon separators are not allowed to the right of double colon ("::") separators.
|
||||
|
||||
|
||||
For example:
|
||||
|
||||
mount -t overlay overlay -olowerdir=/l1:/l2:/l3::/do1::/do2 /merged
|
||||
|
||||
The paths of files in the "data-only" lower layers are not visible in the
|
||||
merged overlayfs directories and the metadata and st_ino/st_dev of files
|
||||
in the "data-only" lower layers are not visible in overlayfs inodes.
|
||||
|
||||
Only the data of the files in the "data-only" lower layers may be visible
|
||||
when a "metacopy" file in one of the lower layers above it, has a "redirect"
|
||||
to the absolute path of the "lower data" file in the "data-only" lower layer.
|
||||
|
||||
|
||||
Sharing and copying layers
|
||||
--------------------------
|
||||
|
||||
|
@ -15937,6 +15937,7 @@ F: include/media/i2c/ov2659.h
|
||||
|
||||
OVERLAY FILESYSTEM
|
||||
M: Miklos Szeredi <miklos@szeredi.hu>
|
||||
M: Amir Goldstein <amir73il@gmail.com>
|
||||
L: linux-unionfs@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
|
||||
|
@ -6,4 +6,4 @@
|
||||
obj-$(CONFIG_OVERLAY_FS) += overlay.o
|
||||
|
||||
overlay-objs := super.o namei.o util.o inode.o file.o dir.o readdir.o \
|
||||
copy_up.o export.o
|
||||
copy_up.o export.o params.o
|
||||
|
@ -575,6 +575,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
|
||||
/* Restore timestamps on parent (best effort) */
|
||||
ovl_set_timestamps(ofs, upperdir, &c->pstat);
|
||||
ovl_dentry_set_upper_alias(c->dentry);
|
||||
ovl_dentry_update_reval(c->dentry, upper);
|
||||
}
|
||||
}
|
||||
inode_unlock(udir);
|
||||
@ -894,6 +895,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
|
||||
inode_unlock(udir);
|
||||
|
||||
ovl_dentry_set_upper_alias(c->dentry);
|
||||
ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
|
||||
}
|
||||
|
||||
out:
|
||||
@ -1071,6 +1073,15 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
|
||||
if (WARN_ON(disconnected && d_is_dir(dentry)))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* We may not need lowerdata if we are only doing metacopy up, but it is
|
||||
* not very important to optimize this case, so do lazy lowerdata lookup
|
||||
* before any copy up, so we can do it before taking ovl_inode_lock().
|
||||
*/
|
||||
err = ovl_maybe_lookup_lowerdata(dentry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
while (!err) {
|
||||
struct dentry *next;
|
||||
|
@ -83,7 +83,7 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
|
||||
ofs->whiteout = whiteout;
|
||||
}
|
||||
|
||||
if (ofs->share_whiteout) {
|
||||
if (!ofs->no_shared_whiteout) {
|
||||
whiteout = ovl_lookup_temp(ofs, workdir);
|
||||
if (IS_ERR(whiteout))
|
||||
goto out;
|
||||
@ -95,7 +95,7 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
|
||||
if (err != -EMLINK) {
|
||||
pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n",
|
||||
ofs->whiteout->d_inode->i_nlink, err);
|
||||
ofs->share_whiteout = false;
|
||||
ofs->no_shared_whiteout = true;
|
||||
}
|
||||
dput(whiteout);
|
||||
}
|
||||
@ -269,8 +269,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
|
||||
|
||||
ovl_dir_modified(dentry->d_parent, false);
|
||||
ovl_dentry_set_upper_alias(dentry);
|
||||
ovl_dentry_update_reval(dentry, newdentry,
|
||||
DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
|
||||
ovl_dentry_init_reval(dentry, newdentry, NULL);
|
||||
|
||||
if (!hardlink) {
|
||||
/*
|
||||
@ -953,7 +952,7 @@ static bool ovl_type_merge_or_lower(struct dentry *dentry)
|
||||
|
||||
static bool ovl_can_move(struct dentry *dentry)
|
||||
{
|
||||
return ovl_redirect_dir(dentry->d_sb) ||
|
||||
return ovl_redirect_dir(OVL_FS(dentry->d_sb)) ||
|
||||
!d_is_dir(dentry) || !ovl_type_merge_or_lower(dentry);
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ static int ovl_connectable_layer(struct dentry *dentry)
|
||||
|
||||
/* We can get overlay root from root of any layer */
|
||||
if (dentry == dentry->d_sb->s_root)
|
||||
return oe->numlower;
|
||||
return ovl_numlower(oe);
|
||||
|
||||
/*
|
||||
* If it's an unindexed merge dir, then it's not connectable with any
|
||||
@ -91,7 +91,7 @@ static int ovl_connectable_layer(struct dentry *dentry)
|
||||
return 0;
|
||||
|
||||
/* We can get upper/overlay path from indexed/lower dentry */
|
||||
return oe->lowerstack[0].layer->idx;
|
||||
return ovl_lowerstack(oe)->layer->idx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -105,6 +105,7 @@ static int ovl_connectable_layer(struct dentry *dentry)
|
||||
static int ovl_connect_layer(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *next, *parent = NULL;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
int origin_layer;
|
||||
int err = 0;
|
||||
|
||||
@ -112,7 +113,7 @@ static int ovl_connect_layer(struct dentry *dentry)
|
||||
WARN_ON(!ovl_dentry_lower(dentry)))
|
||||
return -EIO;
|
||||
|
||||
origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx;
|
||||
origin_layer = ovl_lowerstack(oe)->layer->idx;
|
||||
if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry))
|
||||
return origin_layer;
|
||||
|
||||
@ -285,21 +286,29 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
|
||||
struct dentry *lower = lowerpath ? lowerpath->dentry : NULL;
|
||||
struct dentry *upper = upper_alias ?: index;
|
||||
struct dentry *dentry;
|
||||
struct inode *inode;
|
||||
struct inode *inode = NULL;
|
||||
struct ovl_entry *oe;
|
||||
struct ovl_inode_params oip = {
|
||||
.lowerpath = lowerpath,
|
||||
.index = index,
|
||||
.numlower = !!lower
|
||||
};
|
||||
|
||||
/* We get overlay directory dentries with ovl_lookup_real() */
|
||||
if (d_is_dir(upper ?: lower))
|
||||
return ERR_PTR(-EIO);
|
||||
|
||||
oe = ovl_alloc_entry(!!lower);
|
||||
if (!oe)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
oip.upperdentry = dget(upper);
|
||||
if (lower) {
|
||||
ovl_lowerstack(oe)->dentry = dget(lower);
|
||||
ovl_lowerstack(oe)->layer = lowerpath->layer;
|
||||
}
|
||||
oip.oe = oe;
|
||||
inode = ovl_get_inode(sb, &oip);
|
||||
if (IS_ERR(inode)) {
|
||||
ovl_free_entry(oe);
|
||||
dput(upper);
|
||||
return ERR_CAST(inode);
|
||||
}
|
||||
@ -314,20 +323,11 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
|
||||
dentry = d_alloc_anon(inode->i_sb);
|
||||
if (unlikely(!dentry))
|
||||
goto nomem;
|
||||
oe = ovl_alloc_entry(lower ? 1 : 0);
|
||||
if (!oe)
|
||||
goto nomem;
|
||||
|
||||
if (lower) {
|
||||
oe->lowerstack->dentry = dget(lower);
|
||||
oe->lowerstack->layer = lowerpath->layer;
|
||||
}
|
||||
dentry->d_fsdata = oe;
|
||||
if (upper_alias)
|
||||
ovl_dentry_set_upper_alias(dentry);
|
||||
|
||||
ovl_dentry_update_reval(dentry, upper,
|
||||
DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
|
||||
ovl_dentry_init_reval(dentry, upper, OVL_I_E(inode));
|
||||
|
||||
return d_instantiate_anon(dentry, inode);
|
||||
|
||||
@ -342,15 +342,16 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
|
||||
/* Get the upper or lower dentry in stack whose on layer @idx */
|
||||
static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
struct ovl_path *lowerstack = ovl_lowerstack(oe);
|
||||
int i;
|
||||
|
||||
if (!idx)
|
||||
return ovl_dentry_upper(dentry);
|
||||
|
||||
for (i = 0; i < oe->numlower; i++) {
|
||||
if (oe->lowerstack[i].layer->idx == idx)
|
||||
return oe->lowerstack[i].dentry;
|
||||
for (i = 0; i < ovl_numlower(oe); i++) {
|
||||
if (lowerstack[i].layer->idx == idx)
|
||||
return lowerstack[i].dentry;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -107,14 +107,23 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
|
||||
{
|
||||
struct dentry *dentry = file_dentry(file);
|
||||
struct path realpath;
|
||||
int err;
|
||||
|
||||
real->flags = 0;
|
||||
real->file = file->private_data;
|
||||
|
||||
if (allow_meta)
|
||||
if (allow_meta) {
|
||||
ovl_path_real(dentry, &realpath);
|
||||
else
|
||||
} else {
|
||||
/* lazy lookup of lowerdata */
|
||||
err = ovl_maybe_lookup_lowerdata(dentry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ovl_path_realdata(dentry, &realpath);
|
||||
}
|
||||
if (!realpath.dentry)
|
||||
return -EIO;
|
||||
|
||||
/* Has it been copied up since we'd opened it? */
|
||||
if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) {
|
||||
@ -150,6 +159,11 @@ static int ovl_open(struct inode *inode, struct file *file)
|
||||
struct path realpath;
|
||||
int err;
|
||||
|
||||
/* lazy lookup of lowerdata */
|
||||
err = ovl_maybe_lookup_lowerdata(dentry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ovl_maybe_copy_up(dentry, file->f_flags);
|
||||
if (err)
|
||||
return err;
|
||||
@ -158,6 +172,9 @@ static int ovl_open(struct inode *inode, struct file *file)
|
||||
file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
|
||||
|
||||
ovl_path_realdata(dentry, &realpath);
|
||||
if (!realpath.dentry)
|
||||
return -EIO;
|
||||
|
||||
realfile = ovl_open_realfile(file, &realpath);
|
||||
if (IS_ERR(realfile))
|
||||
return PTR_ERR(realfile);
|
||||
|
@ -97,8 +97,9 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
|
||||
|
||||
static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
|
||||
{
|
||||
bool samefs = ovl_same_fs(dentry->d_sb);
|
||||
unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
|
||||
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
|
||||
bool samefs = ovl_same_fs(ofs);
|
||||
unsigned int xinobits = ovl_xino_bits(ofs);
|
||||
unsigned int xinoshift = 64 - xinobits;
|
||||
|
||||
if (samefs) {
|
||||
@ -123,7 +124,7 @@ static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
|
||||
stat->ino |= ((u64)fsid) << (xinoshift + 1);
|
||||
stat->dev = dentry->d_sb->s_dev;
|
||||
return;
|
||||
} else if (ovl_xino_warn(dentry->d_sb)) {
|
||||
} else if (ovl_xino_warn(ofs)) {
|
||||
pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
|
||||
dentry, stat->ino, xinobits);
|
||||
}
|
||||
@ -149,7 +150,7 @@ static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
|
||||
* is unique per underlying fs, so we use the unique anonymous
|
||||
* bdev assigned to the underlying fs.
|
||||
*/
|
||||
stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
|
||||
stat->dev = ofs->fs[fsid].pseudo_dev;
|
||||
}
|
||||
}
|
||||
|
||||
@ -186,7 +187,7 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
|
||||
* If lower filesystem supports NFS file handles, this also guaranties
|
||||
* persistent st_ino across mount cycle.
|
||||
*/
|
||||
if (!is_dir || ovl_same_dev(dentry->d_sb)) {
|
||||
if (!is_dir || ovl_same_dev(OVL_FS(dentry->d_sb))) {
|
||||
if (!OVL_TYPE_UPPER(type)) {
|
||||
fsid = ovl_layer_lower(dentry)->fsid;
|
||||
} else if (OVL_TYPE_ORIGIN(type)) {
|
||||
@ -240,15 +241,22 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
|
||||
/*
|
||||
* If lower is not same as lowerdata or if there was
|
||||
* no origin on upper, we can end up here.
|
||||
* With lazy lowerdata lookup, guess lowerdata blocks
|
||||
* from size to avoid lowerdata lookup on stat(2).
|
||||
*/
|
||||
struct kstat lowerdatastat;
|
||||
u32 lowermask = STATX_BLOCKS;
|
||||
|
||||
ovl_path_lowerdata(dentry, &realpath);
|
||||
err = vfs_getattr(&realpath, &lowerdatastat,
|
||||
lowermask, flags);
|
||||
if (err)
|
||||
goto out;
|
||||
if (realpath.dentry) {
|
||||
err = vfs_getattr(&realpath, &lowerdatastat,
|
||||
lowermask, flags);
|
||||
if (err)
|
||||
goto out;
|
||||
} else {
|
||||
lowerdatastat.blocks =
|
||||
round_up(stat->size, stat->blksize) >> 9;
|
||||
}
|
||||
stat->blocks = lowerdatastat.blocks;
|
||||
}
|
||||
}
|
||||
@ -288,8 +296,8 @@ int ovl_permission(struct mnt_idmap *idmap,
|
||||
int err;
|
||||
|
||||
/* Careful in RCU walk mode */
|
||||
ovl_i_path_real(inode, &realpath);
|
||||
if (!realpath.dentry) {
|
||||
realinode = ovl_i_path_real(inode, &realpath);
|
||||
if (!realinode) {
|
||||
WARN_ON(!(mask & MAY_NOT_BLOCK));
|
||||
return -ECHILD;
|
||||
}
|
||||
@ -302,7 +310,6 @@ int ovl_permission(struct mnt_idmap *idmap,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
realinode = d_inode(realpath.dentry);
|
||||
old_cred = ovl_override_creds(inode->i_sb);
|
||||
if (!upperinode &&
|
||||
!special_file(realinode->i_mode) && mask & MAY_WRITE) {
|
||||
@ -559,20 +566,20 @@ struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap,
|
||||
struct inode *inode, int type,
|
||||
bool rcu, bool noperm)
|
||||
{
|
||||
struct inode *realinode = ovl_inode_real(inode);
|
||||
struct inode *realinode;
|
||||
struct posix_acl *acl;
|
||||
struct path realpath;
|
||||
|
||||
if (!IS_POSIXACL(realinode))
|
||||
return NULL;
|
||||
|
||||
/* Careful in RCU walk mode */
|
||||
ovl_i_path_real(inode, &realpath);
|
||||
if (!realpath.dentry) {
|
||||
realinode = ovl_i_path_real(inode, &realpath);
|
||||
if (!realinode) {
|
||||
WARN_ON(!rcu);
|
||||
return ERR_PTR(-ECHILD);
|
||||
}
|
||||
|
||||
if (!IS_POSIXACL(realinode))
|
||||
return NULL;
|
||||
|
||||
if (rcu) {
|
||||
/*
|
||||
* If the layer is idmapped drop out of RCU path walk
|
||||
@ -710,6 +717,9 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
struct inode *realinode = ovl_inode_realdata(inode);
|
||||
const struct cred *old_cred;
|
||||
|
||||
if (!realinode)
|
||||
return -EIO;
|
||||
|
||||
if (!realinode->i_op->fiemap)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -952,7 +962,7 @@ static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
|
||||
|
||||
static void ovl_next_ino(struct inode *inode)
|
||||
{
|
||||
struct ovl_fs *ofs = inode->i_sb->s_fs_info;
|
||||
struct ovl_fs *ofs = OVL_FS(inode->i_sb);
|
||||
|
||||
inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
|
||||
if (unlikely(!inode->i_ino))
|
||||
@ -961,7 +971,8 @@ static void ovl_next_ino(struct inode *inode)
|
||||
|
||||
static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
|
||||
{
|
||||
int xinobits = ovl_xino_bits(inode->i_sb);
|
||||
struct ovl_fs *ofs = OVL_FS(inode->i_sb);
|
||||
int xinobits = ovl_xino_bits(ofs);
|
||||
unsigned int xinoshift = 64 - xinobits;
|
||||
|
||||
/*
|
||||
@ -972,7 +983,7 @@ static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
|
||||
* with d_ino also causes nfsd readdirplus to fail.
|
||||
*/
|
||||
inode->i_ino = ino;
|
||||
if (ovl_same_fs(inode->i_sb)) {
|
||||
if (ovl_same_fs(ofs)) {
|
||||
return;
|
||||
} else if (xinobits && likely(!(ino >> xinoshift))) {
|
||||
inode->i_ino |= (unsigned long)fsid << (xinoshift + 1);
|
||||
@ -1003,14 +1014,10 @@ void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
|
||||
struct inode *realinode;
|
||||
struct ovl_inode *oi = OVL_I(inode);
|
||||
|
||||
if (oip->upperdentry)
|
||||
oi->__upperdentry = oip->upperdentry;
|
||||
if (oip->lowerpath && oip->lowerpath->dentry) {
|
||||
oi->lowerpath.dentry = dget(oip->lowerpath->dentry);
|
||||
oi->lowerpath.layer = oip->lowerpath->layer;
|
||||
}
|
||||
if (oip->lowerdata)
|
||||
oi->lowerdata = igrab(d_inode(oip->lowerdata));
|
||||
oi->__upperdentry = oip->upperdentry;
|
||||
oi->oe = oip->oe;
|
||||
oi->redirect = oip->redirect;
|
||||
oi->lowerdata_redirect = oip->lowerdata_redirect;
|
||||
|
||||
realinode = ovl_inode_real(inode);
|
||||
ovl_copyattr(inode);
|
||||
@ -1325,7 +1332,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
|
||||
{
|
||||
struct ovl_fs *ofs = OVL_FS(sb);
|
||||
struct dentry *upperdentry = oip->upperdentry;
|
||||
struct ovl_path *lowerpath = oip->lowerpath;
|
||||
struct ovl_path *lowerpath = ovl_lowerpath(oip->oe);
|
||||
struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
|
||||
struct inode *inode;
|
||||
struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
|
||||
@ -1369,7 +1376,9 @@ struct inode *ovl_get_inode(struct super_block *sb,
|
||||
}
|
||||
|
||||
dput(upperdentry);
|
||||
ovl_free_entry(oip->oe);
|
||||
kfree(oip->redirect);
|
||||
kfree(oip->lowerdata_redirect);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1398,14 +1407,12 @@ struct inode *ovl_get_inode(struct super_block *sb,
|
||||
if (oip->index)
|
||||
ovl_set_flag(OVL_INDEX, inode);
|
||||
|
||||
OVL_I(inode)->redirect = oip->redirect;
|
||||
|
||||
if (bylower)
|
||||
ovl_set_flag(OVL_CONST_INO, inode);
|
||||
|
||||
/* Check for non-merge dir that may have whiteouts */
|
||||
if (is_dir) {
|
||||
if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
|
||||
if (((upperdentry && lowerdentry) || ovl_numlower(oip->oe) > 1) ||
|
||||
ovl_path_check_origin_xattr(ofs, &realpath)) {
|
||||
ovl_set_flag(OVL_WHITEOUTS, inode);
|
||||
}
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/exportfs.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
#include "../internal.h" /* for vfs_path_lookup */
|
||||
|
||||
struct ovl_lookup_data {
|
||||
struct super_block *sb;
|
||||
struct vfsmount *mnt;
|
||||
@ -24,6 +26,8 @@ struct ovl_lookup_data {
|
||||
bool last;
|
||||
char *redirect;
|
||||
bool metacopy;
|
||||
/* Referring to last redirect xattr */
|
||||
bool absolute_redirect;
|
||||
};
|
||||
|
||||
static int ovl_check_redirect(const struct path *path, struct ovl_lookup_data *d,
|
||||
@ -33,11 +37,13 @@ static int ovl_check_redirect(const struct path *path, struct ovl_lookup_data *d
|
||||
char *buf;
|
||||
struct ovl_fs *ofs = OVL_FS(d->sb);
|
||||
|
||||
d->absolute_redirect = false;
|
||||
buf = ovl_get_redirect_xattr(ofs, path, prelen + strlen(post));
|
||||
if (IS_ERR_OR_NULL(buf))
|
||||
return PTR_ERR(buf);
|
||||
|
||||
if (buf[0] == '/') {
|
||||
d->absolute_redirect = true;
|
||||
/*
|
||||
* One of the ancestor path elements in an absolute path
|
||||
* lookup in ovl_lookup_layer() could have been opaque and
|
||||
@ -349,6 +355,61 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ovl_lookup_data_layer(struct dentry *dentry, const char *redirect,
|
||||
const struct ovl_layer *layer,
|
||||
struct path *datapath)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = vfs_path_lookup(layer->mnt->mnt_root, layer->mnt, redirect,
|
||||
LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS | LOOKUP_NO_XDEV,
|
||||
datapath);
|
||||
pr_debug("lookup lowerdata (%pd2, redirect=\"%s\", layer=%d, err=%i)\n",
|
||||
dentry, redirect, layer->idx, err);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = -EREMOTE;
|
||||
if (ovl_dentry_weird(datapath->dentry))
|
||||
goto out_path_put;
|
||||
|
||||
err = -ENOENT;
|
||||
/* Only regular file is acceptable as lower data */
|
||||
if (!d_is_reg(datapath->dentry))
|
||||
goto out_path_put;
|
||||
|
||||
return 0;
|
||||
|
||||
out_path_put:
|
||||
path_put(datapath);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Lookup in data-only layers by absolute redirect to layer root */
|
||||
static int ovl_lookup_data_layers(struct dentry *dentry, const char *redirect,
|
||||
struct ovl_path *lowerdata)
|
||||
{
|
||||
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
|
||||
const struct ovl_layer *layer;
|
||||
struct path datapath;
|
||||
int err = -ENOENT;
|
||||
int i;
|
||||
|
||||
layer = &ofs->layers[ofs->numlayer - ofs->numdatalayer];
|
||||
for (i = 0; i < ofs->numdatalayer; i++, layer++) {
|
||||
err = ovl_lookup_data_layer(dentry, redirect, layer, &datapath);
|
||||
if (!err) {
|
||||
mntput(datapath.mnt);
|
||||
lowerdata->dentry = datapath.dentry;
|
||||
lowerdata->layer = layer;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
|
||||
struct dentry *upperdentry, struct ovl_path **stackp)
|
||||
@ -356,7 +417,7 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
|
||||
struct dentry *origin = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 1; i < ofs->numlayer; i++) {
|
||||
for (i = 1; i <= ovl_numlowerlayer(ofs); i++) {
|
||||
/*
|
||||
* If lower fs uuid is not unique among lower fs we cannot match
|
||||
* fh->uuid to layer.
|
||||
@ -790,20 +851,21 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
|
||||
*/
|
||||
int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
struct ovl_path *lowerstack = ovl_lowerstack(oe);
|
||||
|
||||
BUG_ON(idx < 0);
|
||||
if (idx == 0) {
|
||||
ovl_path_upper(dentry, path);
|
||||
if (path->dentry)
|
||||
return oe->numlower ? 1 : -1;
|
||||
return ovl_numlower(oe) ? 1 : -1;
|
||||
idx++;
|
||||
}
|
||||
BUG_ON(idx > oe->numlower);
|
||||
path->dentry = oe->lowerstack[idx - 1].dentry;
|
||||
path->mnt = oe->lowerstack[idx - 1].layer->mnt;
|
||||
BUG_ON(idx > ovl_numlower(oe));
|
||||
path->dentry = lowerstack[idx - 1].dentry;
|
||||
path->mnt = lowerstack[idx - 1].layer->mnt;
|
||||
|
||||
return (idx < oe->numlower) ? idx + 1 : -1;
|
||||
return (idx < ovl_numlower(oe)) ? idx + 1 : -1;
|
||||
}
|
||||
|
||||
/* Fix missing 'origin' xattr */
|
||||
@ -827,14 +889,60 @@ static int ovl_fix_origin(struct ovl_fs *ofs, struct dentry *dentry,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Lazy lookup of lowerdata */
|
||||
int ovl_maybe_lookup_lowerdata(struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = d_inode(dentry);
|
||||
const char *redirect = ovl_lowerdata_redirect(inode);
|
||||
struct ovl_path datapath = {};
|
||||
const struct cred *old_cred;
|
||||
int err;
|
||||
|
||||
if (!redirect || ovl_dentry_lowerdata(dentry))
|
||||
return 0;
|
||||
|
||||
if (redirect[0] != '/')
|
||||
return -EIO;
|
||||
|
||||
err = ovl_inode_lock_interruptible(inode);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = 0;
|
||||
/* Someone got here before us? */
|
||||
if (ovl_dentry_lowerdata(dentry))
|
||||
goto out;
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
err = ovl_lookup_data_layers(dentry, redirect, &datapath);
|
||||
revert_creds(old_cred);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = ovl_dentry_set_lowerdata(dentry, &datapath);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
out:
|
||||
ovl_inode_unlock(inode);
|
||||
dput(datapath.dentry);
|
||||
|
||||
return err;
|
||||
|
||||
out_err:
|
||||
pr_warn_ratelimited("lazy lowerdata lookup failed (%pd2, err=%i)\n",
|
||||
dentry, err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct ovl_entry *oe;
|
||||
struct ovl_entry *oe = NULL;
|
||||
const struct cred *old_cred;
|
||||
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
|
||||
struct ovl_entry *poe = dentry->d_parent->d_fsdata;
|
||||
struct ovl_entry *roe = dentry->d_sb->s_root->d_fsdata;
|
||||
struct ovl_entry *poe = OVL_E(dentry->d_parent);
|
||||
struct ovl_entry *roe = OVL_E(dentry->d_sb->s_root);
|
||||
struct ovl_path *stack = NULL, *origin_path = NULL;
|
||||
struct dentry *upperdir, *upperdentry = NULL;
|
||||
struct dentry *origin = NULL;
|
||||
@ -853,7 +961,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
.is_dir = false,
|
||||
.opaque = false,
|
||||
.stop = false,
|
||||
.last = ofs->config.redirect_follow ? false : !poe->numlower,
|
||||
.last = ovl_redirect_follow(ofs) ? false : !ovl_numlower(poe),
|
||||
.redirect = NULL,
|
||||
.metacopy = false,
|
||||
};
|
||||
@ -904,21 +1012,20 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
upperopaque = d.opaque;
|
||||
}
|
||||
|
||||
if (!d.stop && poe->numlower) {
|
||||
if (!d.stop && ovl_numlower(poe)) {
|
||||
err = -ENOMEM;
|
||||
stack = kcalloc(ofs->numlayer - 1, sizeof(struct ovl_path),
|
||||
GFP_KERNEL);
|
||||
stack = ovl_stack_alloc(ofs->numlayer - 1);
|
||||
if (!stack)
|
||||
goto out_put_upper;
|
||||
}
|
||||
|
||||
for (i = 0; !d.stop && i < poe->numlower; i++) {
|
||||
struct ovl_path lower = poe->lowerstack[i];
|
||||
for (i = 0; !d.stop && i < ovl_numlower(poe); i++) {
|
||||
struct ovl_path lower = ovl_lowerstack(poe)[i];
|
||||
|
||||
if (!ofs->config.redirect_follow)
|
||||
d.last = i == poe->numlower - 1;
|
||||
else
|
||||
d.last = lower.layer->idx == roe->numlower;
|
||||
if (!ovl_redirect_follow(ofs))
|
||||
d.last = i == ovl_numlower(poe) - 1;
|
||||
else if (d.is_dir || !ofs->numdatalayer)
|
||||
d.last = lower.layer->idx == ovl_numlower(roe);
|
||||
|
||||
d.mnt = lower.layer->mnt;
|
||||
err = ovl_lookup_layer(lower.dentry, &d, &this, false);
|
||||
@ -995,7 +1102,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
* this attack vector when not necessary.
|
||||
*/
|
||||
err = -EPERM;
|
||||
if (d.redirect && !ofs->config.redirect_follow) {
|
||||
if (d.redirect && !ovl_redirect_follow(ofs)) {
|
||||
pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n",
|
||||
dentry);
|
||||
goto out_put;
|
||||
@ -1011,6 +1118,12 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
}
|
||||
}
|
||||
|
||||
/* Defer lookup of lowerdata in data-only layers to first access */
|
||||
if (d.metacopy && ctr && ofs->numdatalayer && d.absolute_redirect) {
|
||||
d.metacopy = false;
|
||||
ctr++;
|
||||
}
|
||||
|
||||
/*
|
||||
* For regular non-metacopy upper dentries, there is no lower
|
||||
* path based lookup, hence ctr will be zero. If a dentry is found
|
||||
@ -1067,13 +1180,14 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
}
|
||||
}
|
||||
|
||||
oe = ovl_alloc_entry(ctr);
|
||||
err = -ENOMEM;
|
||||
if (!oe)
|
||||
goto out_put;
|
||||
if (ctr) {
|
||||
oe = ovl_alloc_entry(ctr);
|
||||
err = -ENOMEM;
|
||||
if (!oe)
|
||||
goto out_put;
|
||||
|
||||
memcpy(oe->lowerstack, stack, sizeof(struct ovl_path) * ctr);
|
||||
dentry->d_fsdata = oe;
|
||||
ovl_stack_cpy(ovl_lowerstack(oe), stack, ctr);
|
||||
}
|
||||
|
||||
if (upperopaque)
|
||||
ovl_dentry_set_opaque(dentry);
|
||||
@ -1106,14 +1220,16 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
if (upperdentry || ctr) {
|
||||
struct ovl_inode_params oip = {
|
||||
.upperdentry = upperdentry,
|
||||
.lowerpath = stack,
|
||||
.oe = oe,
|
||||
.index = index,
|
||||
.numlower = ctr,
|
||||
.redirect = upperredirect,
|
||||
.lowerdata = (ctr > 1 && !d.is_dir) ?
|
||||
stack[ctr - 1].dentry : NULL,
|
||||
};
|
||||
|
||||
/* Store lowerdata redirect for lazy lookup */
|
||||
if (ctr > 1 && !d.is_dir && !stack[ctr - 1].dentry) {
|
||||
oip.lowerdata_redirect = d.redirect;
|
||||
d.redirect = NULL;
|
||||
}
|
||||
inode = ovl_get_inode(dentry->d_sb, &oip);
|
||||
err = PTR_ERR(inode);
|
||||
if (IS_ERR(inode))
|
||||
@ -1122,8 +1238,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
ovl_set_flag(OVL_UPPERDATA, inode);
|
||||
}
|
||||
|
||||
ovl_dentry_update_reval(dentry, upperdentry,
|
||||
DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
|
||||
ovl_dentry_init_reval(dentry, upperdentry, OVL_I_E(inode));
|
||||
|
||||
revert_creds(old_cred);
|
||||
if (origin_path) {
|
||||
@ -1131,18 +1246,15 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
kfree(origin_path);
|
||||
}
|
||||
dput(index);
|
||||
kfree(stack);
|
||||
ovl_stack_free(stack, ctr);
|
||||
kfree(d.redirect);
|
||||
return d_splice_alias(inode, dentry);
|
||||
|
||||
out_free_oe:
|
||||
dentry->d_fsdata = NULL;
|
||||
kfree(oe);
|
||||
ovl_free_entry(oe);
|
||||
out_put:
|
||||
dput(index);
|
||||
for (i = 0; i < ctr; i++)
|
||||
dput(stack[i].dentry);
|
||||
kfree(stack);
|
||||
ovl_stack_free(stack, ctr);
|
||||
out_put_upper:
|
||||
if (origin_path) {
|
||||
dput(origin_path->dentry);
|
||||
@ -1158,7 +1270,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
bool ovl_lower_positive(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *poe = dentry->d_parent->d_fsdata;
|
||||
struct ovl_entry *poe = OVL_E(dentry->d_parent);
|
||||
const struct qstr *name = &dentry->d_name;
|
||||
const struct cred *old_cred;
|
||||
unsigned int i;
|
||||
@ -1178,12 +1290,13 @@ bool ovl_lower_positive(struct dentry *dentry)
|
||||
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
/* Positive upper -> have to look up lower to see whether it exists */
|
||||
for (i = 0; !done && !positive && i < poe->numlower; i++) {
|
||||
for (i = 0; !done && !positive && i < ovl_numlower(poe); i++) {
|
||||
struct dentry *this;
|
||||
struct dentry *lowerdir = poe->lowerstack[i].dentry;
|
||||
struct ovl_path *parentpath = &ovl_lowerstack(poe)[i];
|
||||
|
||||
this = lookup_one_positive_unlocked(mnt_idmap(poe->lowerstack[i].layer->mnt),
|
||||
name->name, lowerdir, name->len);
|
||||
this = lookup_one_positive_unlocked(
|
||||
mnt_idmap(parentpath->layer->mnt),
|
||||
name->name, parentpath->dentry, name->len);
|
||||
if (IS_ERR(this)) {
|
||||
switch (PTR_ERR(this)) {
|
||||
case -ENOENT:
|
||||
|
@ -57,12 +57,27 @@ enum ovl_entry_flag {
|
||||
OVL_E_CONNECTED,
|
||||
};
|
||||
|
||||
enum {
|
||||
OVL_REDIRECT_OFF, /* "off" mode is never used. In effect */
|
||||
OVL_REDIRECT_FOLLOW, /* ...it translates to either "follow" */
|
||||
OVL_REDIRECT_NOFOLLOW, /* ...or "nofollow". */
|
||||
OVL_REDIRECT_ON,
|
||||
};
|
||||
|
||||
enum {
|
||||
OVL_XINO_OFF,
|
||||
OVL_XINO_AUTO,
|
||||
OVL_XINO_ON,
|
||||
};
|
||||
|
||||
/* The set of options that user requested explicitly via mount options */
|
||||
struct ovl_opt_set {
|
||||
bool metacopy;
|
||||
bool redirect;
|
||||
bool nfs_export;
|
||||
bool index;
|
||||
};
|
||||
|
||||
/*
|
||||
* The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
|
||||
* where:
|
||||
@ -353,17 +368,29 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
|
||||
return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
|
||||
}
|
||||
|
||||
static inline bool ovl_allow_offline_changes(struct ovl_fs *ofs)
|
||||
{
|
||||
/*
|
||||
* To avoid regressions in existing setups with overlay lower offline
|
||||
* changes, we allow lower changes only if none of the new features
|
||||
* are used.
|
||||
*/
|
||||
return (!ofs->config.index && !ofs->config.metacopy &&
|
||||
!ofs->config.redirect_dir && ofs->config.xino != OVL_XINO_ON);
|
||||
}
|
||||
|
||||
/* params.c */
|
||||
#define OVL_MAX_STACK 500
|
||||
|
||||
struct ovl_fs_context_layer {
|
||||
char *name;
|
||||
struct path path;
|
||||
};
|
||||
|
||||
struct ovl_fs_context {
|
||||
struct path upper;
|
||||
struct path work;
|
||||
size_t capacity;
|
||||
size_t nr; /* includes nr_data */
|
||||
size_t nr_data;
|
||||
struct ovl_opt_set set;
|
||||
struct ovl_fs_context_layer *lower;
|
||||
};
|
||||
|
||||
int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
|
||||
bool workdir);
|
||||
int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc);
|
||||
void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx);
|
||||
|
||||
/* util.c */
|
||||
int ovl_want_write(struct dentry *dentry);
|
||||
@ -374,21 +401,30 @@ int ovl_can_decode_fh(struct super_block *sb);
|
||||
struct dentry *ovl_indexdir(struct super_block *sb);
|
||||
bool ovl_index_all(struct super_block *sb);
|
||||
bool ovl_verify_lower(struct super_block *sb);
|
||||
struct ovl_path *ovl_stack_alloc(unsigned int n);
|
||||
void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n);
|
||||
void ovl_stack_put(struct ovl_path *stack, unsigned int n);
|
||||
void ovl_stack_free(struct ovl_path *stack, unsigned int n);
|
||||
struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
|
||||
void ovl_free_entry(struct ovl_entry *oe);
|
||||
bool ovl_dentry_remote(struct dentry *dentry);
|
||||
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
|
||||
unsigned int mask);
|
||||
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry);
|
||||
void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry,
|
||||
struct ovl_entry *oe);
|
||||
void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
|
||||
struct ovl_entry *oe, unsigned int mask);
|
||||
bool ovl_dentry_weird(struct dentry *dentry);
|
||||
enum ovl_path_type ovl_path_type(struct dentry *dentry);
|
||||
void ovl_path_upper(struct dentry *dentry, struct path *path);
|
||||
void ovl_path_lower(struct dentry *dentry, struct path *path);
|
||||
void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
|
||||
void ovl_i_path_real(struct inode *inode, struct path *path);
|
||||
struct inode *ovl_i_path_real(struct inode *inode, struct path *path);
|
||||
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
|
||||
enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path);
|
||||
struct dentry *ovl_dentry_upper(struct dentry *dentry);
|
||||
struct dentry *ovl_dentry_lower(struct dentry *dentry);
|
||||
struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
|
||||
int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath);
|
||||
const struct ovl_layer *ovl_i_layer_lower(struct inode *inode);
|
||||
const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
|
||||
struct dentry *ovl_dentry_real(struct dentry *dentry);
|
||||
@ -398,6 +434,7 @@ struct inode *ovl_inode_lower(struct inode *inode);
|
||||
struct inode *ovl_inode_lowerdata(struct inode *inode);
|
||||
struct inode *ovl_inode_real(struct inode *inode);
|
||||
struct inode *ovl_inode_realdata(struct inode *inode);
|
||||
const char *ovl_lowerdata_redirect(struct inode *inode);
|
||||
struct ovl_dir_cache *ovl_dir_cache(struct inode *inode);
|
||||
void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache);
|
||||
void ovl_dentry_set_flag(unsigned long flag, struct dentry *dentry);
|
||||
@ -412,7 +449,6 @@ bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
|
||||
bool ovl_dentry_needs_data_copy_up_locked(struct dentry *dentry, int flags);
|
||||
bool ovl_has_upperdata(struct inode *inode);
|
||||
void ovl_set_upperdata(struct inode *inode);
|
||||
bool ovl_redirect_dir(struct super_block *sb);
|
||||
const char *ovl_dentry_get_redirect(struct dentry *dentry);
|
||||
void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
|
||||
void ovl_inode_update(struct inode *inode, struct dentry *upperdentry);
|
||||
@ -480,31 +516,51 @@ static inline bool ovl_is_impuredir(struct super_block *sb,
|
||||
return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
|
||||
}
|
||||
|
||||
static inline bool ovl_redirect_follow(struct ovl_fs *ofs)
|
||||
{
|
||||
return ofs->config.redirect_mode != OVL_REDIRECT_NOFOLLOW;
|
||||
}
|
||||
|
||||
static inline bool ovl_redirect_dir(struct ovl_fs *ofs)
|
||||
{
|
||||
return ofs->config.redirect_mode == OVL_REDIRECT_ON;
|
||||
}
|
||||
|
||||
/*
|
||||
* With xino=auto, we do best effort to keep all inodes on same st_dev and
|
||||
* d_ino consistent with st_ino.
|
||||
* With xino=on, we do the same effort but we warn if we failed.
|
||||
*/
|
||||
static inline bool ovl_xino_warn(struct super_block *sb)
|
||||
static inline bool ovl_xino_warn(struct ovl_fs *ofs)
|
||||
{
|
||||
return OVL_FS(sb)->config.xino == OVL_XINO_ON;
|
||||
return ofs->config.xino == OVL_XINO_ON;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid regressions in existing setups with overlay lower offline changes,
|
||||
* we allow lower changes only if none of the new features are used.
|
||||
*/
|
||||
static inline bool ovl_allow_offline_changes(struct ovl_fs *ofs)
|
||||
{
|
||||
return (!ofs->config.index && !ofs->config.metacopy &&
|
||||
!ovl_redirect_dir(ofs) && !ovl_xino_warn(ofs));
|
||||
}
|
||||
|
||||
/* All layers on same fs? */
|
||||
static inline bool ovl_same_fs(struct super_block *sb)
|
||||
static inline bool ovl_same_fs(struct ovl_fs *ofs)
|
||||
{
|
||||
return OVL_FS(sb)->xino_mode == 0;
|
||||
return ofs->xino_mode == 0;
|
||||
}
|
||||
|
||||
/* All overlay inodes have same st_dev? */
|
||||
static inline bool ovl_same_dev(struct super_block *sb)
|
||||
static inline bool ovl_same_dev(struct ovl_fs *ofs)
|
||||
{
|
||||
return OVL_FS(sb)->xino_mode >= 0;
|
||||
return ofs->xino_mode >= 0;
|
||||
}
|
||||
|
||||
static inline unsigned int ovl_xino_bits(struct super_block *sb)
|
||||
static inline unsigned int ovl_xino_bits(struct ovl_fs *ofs)
|
||||
{
|
||||
return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
|
||||
return ovl_same_dev(ofs) ? ofs->xino_mode : 0;
|
||||
}
|
||||
|
||||
static inline void ovl_inode_lock(struct inode *inode)
|
||||
@ -550,6 +606,7 @@ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
|
||||
struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
|
||||
struct dentry *origin, bool verify);
|
||||
int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
|
||||
int ovl_maybe_lookup_lowerdata(struct dentry *dentry);
|
||||
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags);
|
||||
bool ovl_lower_positive(struct dentry *dentry);
|
||||
@ -646,11 +703,10 @@ bool ovl_is_private_xattr(struct super_block *sb, const char *name);
|
||||
struct ovl_inode_params {
|
||||
struct inode *newinode;
|
||||
struct dentry *upperdentry;
|
||||
struct ovl_path *lowerpath;
|
||||
struct ovl_entry *oe;
|
||||
bool index;
|
||||
unsigned int numlower;
|
||||
char *redirect;
|
||||
struct dentry *lowerdata;
|
||||
char *lowerdata_redirect;
|
||||
};
|
||||
void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
|
||||
unsigned long ino, int fsid);
|
||||
|
@ -6,13 +6,10 @@
|
||||
*/
|
||||
|
||||
struct ovl_config {
|
||||
char *lowerdir;
|
||||
char *upperdir;
|
||||
char *workdir;
|
||||
bool default_permissions;
|
||||
bool redirect_dir;
|
||||
bool redirect_follow;
|
||||
const char *redirect_mode;
|
||||
int redirect_mode;
|
||||
bool index;
|
||||
bool uuid;
|
||||
bool nfs_export;
|
||||
@ -32,6 +29,7 @@ struct ovl_sb {
|
||||
};
|
||||
|
||||
struct ovl_layer {
|
||||
/* ovl_free_fs() relies on @mnt being the first member! */
|
||||
struct vfsmount *mnt;
|
||||
/* Trap in ovl inode cache */
|
||||
struct inode *trap;
|
||||
@ -40,18 +38,34 @@ struct ovl_layer {
|
||||
int idx;
|
||||
/* One fsid per unique underlying sb (upper fsid == 0) */
|
||||
int fsid;
|
||||
char *name;
|
||||
};
|
||||
|
||||
/*
|
||||
* ovl_free_fs() relies on @mnt being the first member when unmounting
|
||||
* the private mounts created for each layer. Let's check both the
|
||||
* offset and type.
|
||||
*/
|
||||
static_assert(offsetof(struct ovl_layer, mnt) == 0);
|
||||
static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
|
||||
|
||||
struct ovl_path {
|
||||
const struct ovl_layer *layer;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
struct ovl_entry {
|
||||
unsigned int __numlower;
|
||||
struct ovl_path __lowerstack[];
|
||||
};
|
||||
|
||||
/* private information held for overlayfs's superblock */
|
||||
struct ovl_fs {
|
||||
unsigned int numlayer;
|
||||
/* Number of unique fs among layers including upper fs */
|
||||
unsigned int numfs;
|
||||
/* Number of data-only lower layers */
|
||||
unsigned int numdatalayer;
|
||||
const struct ovl_layer *layers;
|
||||
struct ovl_sb *fs;
|
||||
/* workbasedir is the path at workdir= mount option */
|
||||
@ -70,7 +84,6 @@ struct ovl_fs {
|
||||
/* Did we take the inuse lock? */
|
||||
bool upperdir_locked;
|
||||
bool workdir_locked;
|
||||
bool share_whiteout;
|
||||
/* Traps in ovl inode cache */
|
||||
struct inode *workbasedir_trap;
|
||||
struct inode *workdir_trap;
|
||||
@ -79,12 +92,19 @@ struct ovl_fs {
|
||||
int xino_mode;
|
||||
/* For allocation of non-persistent inode numbers */
|
||||
atomic_long_t last_ino;
|
||||
/* Whiteout dentry cache */
|
||||
/* Shared whiteout cache */
|
||||
struct dentry *whiteout;
|
||||
bool no_shared_whiteout;
|
||||
/* r/o snapshot of upperdir sb's only taken on volatile mounts */
|
||||
errseq_t errseq;
|
||||
};
|
||||
|
||||
/* Number of lower layers, not including data-only layers */
|
||||
static inline unsigned int ovl_numlowerlayer(struct ovl_fs *ofs)
|
||||
{
|
||||
return ofs->numlayer - ofs->numdatalayer - 1;
|
||||
}
|
||||
|
||||
static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
|
||||
{
|
||||
return ofs->layers[0].mnt;
|
||||
@ -105,36 +125,53 @@ static inline bool ovl_should_sync(struct ovl_fs *ofs)
|
||||
return !ofs->config.ovl_volatile;
|
||||
}
|
||||
|
||||
/* private information held for every overlayfs dentry */
|
||||
struct ovl_entry {
|
||||
union {
|
||||
struct {
|
||||
unsigned long flags;
|
||||
};
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
unsigned numlower;
|
||||
struct ovl_path lowerstack[];
|
||||
};
|
||||
|
||||
struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
|
||||
|
||||
static inline struct ovl_entry *OVL_E(struct dentry *dentry)
|
||||
static inline unsigned int ovl_numlower(struct ovl_entry *oe)
|
||||
{
|
||||
return (struct ovl_entry *) dentry->d_fsdata;
|
||||
return oe ? oe->__numlower : 0;
|
||||
}
|
||||
|
||||
static inline struct ovl_path *ovl_lowerstack(struct ovl_entry *oe)
|
||||
{
|
||||
return ovl_numlower(oe) ? oe->__lowerstack : NULL;
|
||||
}
|
||||
|
||||
static inline struct ovl_path *ovl_lowerpath(struct ovl_entry *oe)
|
||||
{
|
||||
return ovl_lowerstack(oe);
|
||||
}
|
||||
|
||||
static inline struct ovl_path *ovl_lowerdata(struct ovl_entry *oe)
|
||||
{
|
||||
struct ovl_path *lowerstack = ovl_lowerstack(oe);
|
||||
|
||||
return lowerstack ? &lowerstack[oe->__numlower - 1] : NULL;
|
||||
}
|
||||
|
||||
/* May return NULL if lazy lookup of lowerdata is needed */
|
||||
static inline struct dentry *ovl_lowerdata_dentry(struct ovl_entry *oe)
|
||||
{
|
||||
struct ovl_path *lowerdata = ovl_lowerdata(oe);
|
||||
|
||||
return lowerdata ? READ_ONCE(lowerdata->dentry) : NULL;
|
||||
}
|
||||
|
||||
/* private information held for every overlayfs dentry */
|
||||
static inline unsigned long *OVL_E_FLAGS(struct dentry *dentry)
|
||||
{
|
||||
return (unsigned long *) &dentry->d_fsdata;
|
||||
}
|
||||
|
||||
struct ovl_inode {
|
||||
union {
|
||||
struct ovl_dir_cache *cache; /* directory */
|
||||
struct inode *lowerdata; /* regular file */
|
||||
const char *lowerdata_redirect; /* regular file */
|
||||
};
|
||||
const char *redirect;
|
||||
u64 version;
|
||||
unsigned long flags;
|
||||
struct inode vfs_inode;
|
||||
struct dentry *__upperdentry;
|
||||
struct ovl_path lowerpath;
|
||||
struct ovl_entry *oe;
|
||||
|
||||
/* synchronize copy up and more */
|
||||
struct mutex lock;
|
||||
@ -145,6 +182,16 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
|
||||
return container_of(inode, struct ovl_inode, vfs_inode);
|
||||
}
|
||||
|
||||
static inline struct ovl_entry *OVL_I_E(struct inode *inode)
|
||||
{
|
||||
return inode ? OVL_I(inode)->oe : NULL;
|
||||
}
|
||||
|
||||
static inline struct ovl_entry *OVL_E(struct dentry *dentry)
|
||||
{
|
||||
return OVL_I_E(d_inode(dentry));
|
||||
}
|
||||
|
||||
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
|
||||
{
|
||||
return READ_ONCE(oi->__upperdentry);
|
||||
|
389
fs/overlayfs/params.c
Normal file
389
fs/overlayfs/params.c
Normal file
@ -0,0 +1,389 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/fs_context.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include <linux/xattr.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
static ssize_t ovl_parse_param_split_lowerdirs(char *str)
|
||||
{
|
||||
ssize_t nr_layers = 1, nr_colons = 0;
|
||||
char *s, *d;
|
||||
|
||||
for (s = d = str;; s++, d++) {
|
||||
if (*s == '\\') {
|
||||
s++;
|
||||
} else if (*s == ':') {
|
||||
bool next_colon = (*(s + 1) == ':');
|
||||
|
||||
nr_colons++;
|
||||
if (nr_colons == 2 && next_colon) {
|
||||
pr_err("only single ':' or double '::' sequences of unescaped colons in lowerdir mount option allowed.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* count layers, not colons */
|
||||
if (!next_colon)
|
||||
nr_layers++;
|
||||
|
||||
*d = '\0';
|
||||
continue;
|
||||
}
|
||||
|
||||
*d = *s;
|
||||
if (!*s) {
|
||||
/* trailing colons */
|
||||
if (nr_colons) {
|
||||
pr_err("unescaped trailing colons in lowerdir mount option.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
nr_colons = 0;
|
||||
}
|
||||
|
||||
return nr_layers;
|
||||
}
|
||||
|
||||
static int ovl_mount_dir_noesc(const char *name, struct path *path)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
if (!*name) {
|
||||
pr_err("empty lowerdir\n");
|
||||
goto out;
|
||||
}
|
||||
err = kern_path(name, LOOKUP_FOLLOW, path);
|
||||
if (err) {
|
||||
pr_err("failed to resolve '%s': %i\n", name, err);
|
||||
goto out;
|
||||
}
|
||||
err = -EINVAL;
|
||||
if (ovl_dentry_weird(path->dentry)) {
|
||||
pr_err("filesystem on '%s' not supported\n", name);
|
||||
goto out_put;
|
||||
}
|
||||
if (!d_is_dir(path->dentry)) {
|
||||
pr_err("'%s' not a directory\n", name);
|
||||
goto out_put;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_put:
|
||||
path_put_init(path);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ovl_unescape(char *s)
|
||||
{
|
||||
char *d = s;
|
||||
|
||||
for (;; s++, d++) {
|
||||
if (*s == '\\')
|
||||
s++;
|
||||
*d = *s;
|
||||
if (!*s)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int ovl_mount_dir(const char *name, struct path *path)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
char *tmp = kstrdup(name, GFP_KERNEL);
|
||||
|
||||
if (tmp) {
|
||||
ovl_unescape(tmp);
|
||||
err = ovl_mount_dir_noesc(tmp, path);
|
||||
|
||||
if (!err && path->dentry->d_flags & DCACHE_OP_REAL) {
|
||||
pr_err("filesystem on '%s' not supported as upperdir\n",
|
||||
tmp);
|
||||
path_put_init(path);
|
||||
err = -EINVAL;
|
||||
}
|
||||
kfree(tmp);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
|
||||
bool workdir)
|
||||
{
|
||||
int err;
|
||||
struct ovl_fs *ofs = fc->s_fs_info;
|
||||
struct ovl_config *config = &ofs->config;
|
||||
struct ovl_fs_context *ctx = fc->fs_private;
|
||||
struct path path;
|
||||
char *dup;
|
||||
|
||||
err = ovl_mount_dir(name, &path);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Check whether upper path is read-only here to report failures
|
||||
* early. Don't forget to recheck when the superblock is created
|
||||
* as the mount attributes could change.
|
||||
*/
|
||||
if (__mnt_is_readonly(path.mnt)) {
|
||||
path_put(&path);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dup = kstrdup(name, GFP_KERNEL);
|
||||
if (!dup) {
|
||||
path_put(&path);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (workdir) {
|
||||
kfree(config->workdir);
|
||||
config->workdir = dup;
|
||||
path_put(&ctx->work);
|
||||
ctx->work = path;
|
||||
} else {
|
||||
kfree(config->upperdir);
|
||||
config->upperdir = dup;
|
||||
path_put(&ctx->upper);
|
||||
ctx->upper = path;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
|
||||
{
|
||||
for (size_t nr = 0; nr < ctx->nr; nr++) {
|
||||
path_put(&ctx->lower[nr].path);
|
||||
kfree(ctx->lower[nr].name);
|
||||
ctx->lower[nr].name = NULL;
|
||||
}
|
||||
ctx->nr = 0;
|
||||
ctx->nr_data = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse lowerdir= mount option:
|
||||
*
|
||||
* (1) lowerdir=/lower1:/lower2:/lower3::/data1::/data2
|
||||
* Set "/lower1", "/lower2", and "/lower3" as lower layers and
|
||||
* "/data1" and "/data2" as data lower layers. Any existing lower
|
||||
* layers are replaced.
|
||||
* (2) lowerdir=:/lower4
|
||||
* Append "/lower4" to current stack of lower layers. This requires
|
||||
* that there already is at least one lower layer configured.
|
||||
* (3) lowerdir=::/lower5
|
||||
* Append data "/lower5" as data lower layer. This requires that
|
||||
* there's at least one regular lower layer present.
|
||||
*/
|
||||
int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
|
||||
{
|
||||
int err;
|
||||
struct ovl_fs_context *ctx = fc->fs_private;
|
||||
struct ovl_fs_context_layer *l;
|
||||
char *dup = NULL, *dup_iter;
|
||||
ssize_t nr_lower = 0, nr = 0, nr_data = 0;
|
||||
bool append = false, data_layer = false;
|
||||
|
||||
/*
|
||||
* Ensure we're backwards compatible with mount(2)
|
||||
* by allowing relative paths.
|
||||
*/
|
||||
|
||||
/* drop all existing lower layers */
|
||||
if (!*name) {
|
||||
ovl_parse_param_drop_lowerdir(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strncmp(name, "::", 2) == 0) {
|
||||
/*
|
||||
* This is a data layer.
|
||||
* There must be at least one regular lower layer
|
||||
* specified.
|
||||
*/
|
||||
if (ctx->nr == 0) {
|
||||
pr_err("data lower layers without regular lower layers not allowed");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Skip the leading "::". */
|
||||
name += 2;
|
||||
data_layer = true;
|
||||
/*
|
||||
* A data layer is automatically an append as there
|
||||
* must've been at least one regular lower layer.
|
||||
*/
|
||||
append = true;
|
||||
} else if (*name == ':') {
|
||||
/*
|
||||
* This is a regular lower layer.
|
||||
* If users want to append a layer enforce that they
|
||||
* have already specified a first layer before. It's
|
||||
* better to be strict.
|
||||
*/
|
||||
if (ctx->nr == 0) {
|
||||
pr_err("cannot append layer if no previous layer has been specified");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Once a sequence of data layers has started regular
|
||||
* lower layers are forbidden.
|
||||
*/
|
||||
if (ctx->nr_data > 0) {
|
||||
pr_err("regular lower layers cannot follow data lower layers");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Skip the leading ":". */
|
||||
name++;
|
||||
append = true;
|
||||
}
|
||||
|
||||
dup = kstrdup(name, GFP_KERNEL);
|
||||
if (!dup)
|
||||
return -ENOMEM;
|
||||
|
||||
err = -EINVAL;
|
||||
nr_lower = ovl_parse_param_split_lowerdirs(dup);
|
||||
if (nr_lower < 0)
|
||||
goto out_err;
|
||||
|
||||
if ((nr_lower > OVL_MAX_STACK) ||
|
||||
(append && (size_add(ctx->nr, nr_lower) > OVL_MAX_STACK))) {
|
||||
pr_err("too many lower directories, limit is %d\n", OVL_MAX_STACK);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (!append)
|
||||
ovl_parse_param_drop_lowerdir(ctx);
|
||||
|
||||
/*
|
||||
* (1) append
|
||||
*
|
||||
* We want nr <= nr_lower <= capacity We know nr > 0 and nr <=
|
||||
* capacity. If nr == 0 this wouldn't be append. If nr +
|
||||
* nr_lower is <= capacity then nr <= nr_lower <= capacity
|
||||
* already holds. If nr + nr_lower exceeds capacity, we realloc.
|
||||
*
|
||||
* (2) replace
|
||||
*
|
||||
* Ensure we're backwards compatible with mount(2) which allows
|
||||
* "lowerdir=/a:/b:/c,lowerdir=/d:/e:/f" causing the last
|
||||
* specified lowerdir mount option to win.
|
||||
*
|
||||
* We want nr <= nr_lower <= capacity We know either (i) nr == 0
|
||||
* or (ii) nr > 0. We also know nr_lower > 0. The capacity
|
||||
* could've been changed multiple times already so we only know
|
||||
* nr <= capacity. If nr + nr_lower > capacity we realloc,
|
||||
* otherwise nr <= nr_lower <= capacity holds already.
|
||||
*/
|
||||
nr_lower += ctx->nr;
|
||||
if (nr_lower > ctx->capacity) {
|
||||
err = -ENOMEM;
|
||||
l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!l)
|
||||
goto out_err;
|
||||
|
||||
ctx->lower = l;
|
||||
ctx->capacity = nr_lower;
|
||||
}
|
||||
|
||||
/*
|
||||
* (3) By (1) and (2) we know nr <= nr_lower <= capacity.
|
||||
* (4) If ctx->nr == 0 => replace
|
||||
* We have verified above that the lowerdir mount option
|
||||
* isn't an append, i.e., the lowerdir mount option
|
||||
* doesn't start with ":" or "::".
|
||||
* (4.1) The lowerdir mount options only contains regular lower
|
||||
* layers ":".
|
||||
* => Nothing to verify.
|
||||
* (4.2) The lowerdir mount options contains regular ":" and
|
||||
* data "::" layers.
|
||||
* => We need to verify that data lower layers "::" aren't
|
||||
* followed by regular ":" lower layers
|
||||
* (5) If ctx->nr > 0 => append
|
||||
* We know that there's at least one regular layer
|
||||
* otherwise we would've failed when parsing the previous
|
||||
* lowerdir mount option.
|
||||
* (5.1) The lowerdir mount option is a regular layer ":" append
|
||||
* => We need to verify that no data layers have been
|
||||
* specified before.
|
||||
* (5.2) The lowerdir mount option is a data layer "::" append
|
||||
* We know that there's at least one regular layer or
|
||||
* other data layers. => There's nothing to verify.
|
||||
*/
|
||||
dup_iter = dup;
|
||||
for (nr = ctx->nr; nr < nr_lower; nr++) {
|
||||
l = &ctx->lower[nr];
|
||||
memset(l, 0, sizeof(*l));
|
||||
|
||||
err = ovl_mount_dir_noesc(dup_iter, &l->path);
|
||||
if (err)
|
||||
goto out_put;
|
||||
|
||||
err = -ENOMEM;
|
||||
l->name = kstrdup(dup_iter, GFP_KERNEL_ACCOUNT);
|
||||
if (!l->name)
|
||||
goto out_put;
|
||||
|
||||
if (data_layer)
|
||||
nr_data++;
|
||||
|
||||
/* Calling strchr() again would overrun. */
|
||||
if ((nr + 1) == nr_lower)
|
||||
break;
|
||||
|
||||
err = -EINVAL;
|
||||
dup_iter = strchr(dup_iter, '\0') + 1;
|
||||
if (*dup_iter) {
|
||||
/*
|
||||
* This is a regular layer so we require that
|
||||
* there are no data layers.
|
||||
*/
|
||||
if ((ctx->nr_data + nr_data) > 0) {
|
||||
pr_err("regular lower layers cannot follow data lower layers");
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
data_layer = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* This is a data lower layer. */
|
||||
data_layer = true;
|
||||
dup_iter++;
|
||||
}
|
||||
ctx->nr = nr_lower;
|
||||
ctx->nr_data += nr_data;
|
||||
kfree(dup);
|
||||
return 0;
|
||||
|
||||
out_put:
|
||||
/*
|
||||
* We know nr >= ctx->nr < nr_lower. If we failed somewhere
|
||||
* we want to undo until nr == ctx->nr. This is correct for
|
||||
* both ctx->nr == 0 and ctx->nr > 0.
|
||||
*/
|
||||
for (; nr >= ctx->nr; nr--) {
|
||||
l = &ctx->lower[nr];
|
||||
kfree(l->name);
|
||||
l->name = NULL;
|
||||
path_put(&l->path);
|
||||
|
||||
/* don't overflow */
|
||||
if (nr == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
out_err:
|
||||
kfree(dup);
|
||||
|
||||
/* Intentionally don't realloc to a smaller size. */
|
||||
return err;
|
||||
}
|
@ -118,7 +118,7 @@ static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
|
||||
return false;
|
||||
|
||||
/* Always recalc d_ino when remapping lower inode numbers */
|
||||
if (ovl_xino_bits(rdd->dentry->d_sb))
|
||||
if (ovl_xino_bits(OVL_FS(rdd->dentry->d_sb)))
|
||||
return true;
|
||||
|
||||
/* Always recalc d_ino for parent */
|
||||
@ -460,13 +460,14 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
|
||||
|
||||
{
|
||||
struct dentry *dir = path->dentry;
|
||||
struct ovl_fs *ofs = OVL_FS(dir->d_sb);
|
||||
struct dentry *this = NULL;
|
||||
enum ovl_path_type type;
|
||||
u64 ino = p->real_ino;
|
||||
int xinobits = ovl_xino_bits(dir->d_sb);
|
||||
int xinobits = ovl_xino_bits(ofs);
|
||||
int err = 0;
|
||||
|
||||
if (!ovl_same_dev(dir->d_sb))
|
||||
if (!ovl_same_dev(ofs))
|
||||
goto out;
|
||||
|
||||
if (p->name[0] == '.') {
|
||||
@ -515,7 +516,7 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
|
||||
ino = ovl_remap_lower_ino(ino, xinobits,
|
||||
ovl_layer_lower(this)->fsid,
|
||||
p->name, p->len,
|
||||
ovl_xino_warn(dir->d_sb));
|
||||
ovl_xino_warn(ofs));
|
||||
}
|
||||
|
||||
out:
|
||||
@ -694,12 +695,13 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
|
||||
int err;
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
struct dentry *dir = file->f_path.dentry;
|
||||
struct ovl_fs *ofs = OVL_FS(dir->d_sb);
|
||||
const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
|
||||
struct ovl_readdir_translate rdt = {
|
||||
.ctx.actor = ovl_fill_real,
|
||||
.orig_ctx = ctx,
|
||||
.xinobits = ovl_xino_bits(dir->d_sb),
|
||||
.xinowarn = ovl_xino_warn(dir->d_sb),
|
||||
.xinobits = ovl_xino_bits(ofs),
|
||||
.xinowarn = ovl_xino_warn(ofs),
|
||||
};
|
||||
|
||||
if (rdt.xinobits && lower_layer)
|
||||
@ -735,6 +737,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
|
||||
struct ovl_cache_entry *p;
|
||||
const struct cred *old_cred;
|
||||
int err;
|
||||
@ -749,8 +752,8 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
|
||||
* dir is impure then need to adjust d_ino for copied up
|
||||
* entries.
|
||||
*/
|
||||
if (ovl_xino_bits(dentry->d_sb) ||
|
||||
(ovl_same_fs(dentry->d_sb) &&
|
||||
if (ovl_xino_bits(ofs) ||
|
||||
(ovl_same_fs(ofs) &&
|
||||
(ovl_is_impure_dir(file) ||
|
||||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
|
||||
err = ovl_iterate_real(file, ctx);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -83,33 +83,84 @@ bool ovl_verify_lower(struct super_block *sb)
|
||||
return ofs->config.nfs_export && ofs->config.index;
|
||||
}
|
||||
|
||||
struct ovl_path *ovl_stack_alloc(unsigned int n)
|
||||
{
|
||||
return kcalloc(n, sizeof(struct ovl_path), GFP_KERNEL);
|
||||
}
|
||||
|
||||
void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
memcpy(dst, src, sizeof(struct ovl_path) * n);
|
||||
for (i = 0; i < n; i++)
|
||||
dget(src[i].dentry);
|
||||
}
|
||||
|
||||
void ovl_stack_put(struct ovl_path *stack, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; stack && i < n; i++)
|
||||
dput(stack[i].dentry);
|
||||
}
|
||||
|
||||
void ovl_stack_free(struct ovl_path *stack, unsigned int n)
|
||||
{
|
||||
ovl_stack_put(stack, n);
|
||||
kfree(stack);
|
||||
}
|
||||
|
||||
struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
|
||||
{
|
||||
size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
|
||||
size_t size = offsetof(struct ovl_entry, __lowerstack[numlower]);
|
||||
struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
|
||||
|
||||
if (oe)
|
||||
oe->numlower = numlower;
|
||||
oe->__numlower = numlower;
|
||||
|
||||
return oe;
|
||||
}
|
||||
|
||||
bool ovl_dentry_remote(struct dentry *dentry)
|
||||
void ovl_free_entry(struct ovl_entry *oe)
|
||||
{
|
||||
return dentry->d_flags &
|
||||
(DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
|
||||
ovl_stack_put(ovl_lowerstack(oe), ovl_numlower(oe));
|
||||
kfree(oe);
|
||||
}
|
||||
|
||||
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
|
||||
unsigned int mask)
|
||||
#define OVL_D_REVALIDATE (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE)
|
||||
|
||||
bool ovl_dentry_remote(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
return dentry->d_flags & OVL_D_REVALIDATE;
|
||||
}
|
||||
|
||||
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry)
|
||||
{
|
||||
if (!ovl_dentry_remote(realdentry))
|
||||
return;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_flags |= realdentry->d_flags & OVL_D_REVALIDATE;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
||||
void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry,
|
||||
struct ovl_entry *oe)
|
||||
{
|
||||
return ovl_dentry_init_flags(dentry, upperdentry, oe, OVL_D_REVALIDATE);
|
||||
}
|
||||
|
||||
void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
|
||||
struct ovl_entry *oe, unsigned int mask)
|
||||
{
|
||||
struct ovl_path *lowerstack = ovl_lowerstack(oe);
|
||||
unsigned int i, flags = 0;
|
||||
|
||||
if (upperdentry)
|
||||
flags |= upperdentry->d_flags;
|
||||
for (i = 0; i < oe->numlower; i++)
|
||||
flags |= oe->lowerstack[i].dentry->d_flags;
|
||||
for (i = 0; i < ovl_numlower(oe) && lowerstack[i].dentry; i++)
|
||||
flags |= lowerstack[i].dentry->d_flags;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_flags &= ~mask;
|
||||
@ -127,7 +178,7 @@ bool ovl_dentry_weird(struct dentry *dentry)
|
||||
|
||||
enum ovl_path_type ovl_path_type(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
enum ovl_path_type type = 0;
|
||||
|
||||
if (ovl_dentry_upper(dentry)) {
|
||||
@ -136,7 +187,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
|
||||
/*
|
||||
* Non-dir dentry can hold lower dentry of its copy up origin.
|
||||
*/
|
||||
if (oe->numlower) {
|
||||
if (ovl_numlower(oe)) {
|
||||
if (ovl_test_flag(OVL_CONST_INO, d_inode(dentry)))
|
||||
type |= __OVL_PATH_ORIGIN;
|
||||
if (d_is_dir(dentry) ||
|
||||
@ -144,7 +195,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
|
||||
type |= __OVL_PATH_MERGE;
|
||||
}
|
||||
} else {
|
||||
if (oe->numlower > 1)
|
||||
if (ovl_numlower(oe) > 1)
|
||||
type |= __OVL_PATH_MERGE;
|
||||
}
|
||||
return type;
|
||||
@ -160,11 +211,12 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
|
||||
|
||||
void ovl_path_lower(struct dentry *dentry, struct path *path)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
struct ovl_path *lowerpath = ovl_lowerstack(oe);
|
||||
|
||||
if (oe->numlower) {
|
||||
path->mnt = oe->lowerstack[0].layer->mnt;
|
||||
path->dentry = oe->lowerstack[0].dentry;
|
||||
if (ovl_numlower(oe)) {
|
||||
path->mnt = lowerpath->layer->mnt;
|
||||
path->dentry = lowerpath->dentry;
|
||||
} else {
|
||||
*path = (struct path) { };
|
||||
}
|
||||
@ -172,11 +224,19 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
|
||||
|
||||
void ovl_path_lowerdata(struct dentry *dentry, struct path *path)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
struct ovl_path *lowerdata = ovl_lowerdata(oe);
|
||||
struct dentry *lowerdata_dentry = ovl_lowerdata_dentry(oe);
|
||||
|
||||
if (oe->numlower) {
|
||||
path->mnt = oe->lowerstack[oe->numlower - 1].layer->mnt;
|
||||
path->dentry = oe->lowerstack[oe->numlower - 1].dentry;
|
||||
if (lowerdata_dentry) {
|
||||
path->dentry = lowerdata_dentry;
|
||||
/*
|
||||
* Pairs with smp_wmb() in ovl_dentry_set_lowerdata().
|
||||
* Make sure that if lowerdata->dentry is visible, then
|
||||
* datapath->layer is visible as well.
|
||||
*/
|
||||
smp_rmb();
|
||||
path->mnt = READ_ONCE(lowerdata->layer)->mnt;
|
||||
} else {
|
||||
*path = (struct path) { };
|
||||
}
|
||||
@ -215,16 +275,16 @@ struct dentry *ovl_dentry_upper(struct dentry *dentry)
|
||||
|
||||
struct dentry *ovl_dentry_lower(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
|
||||
return oe->numlower ? oe->lowerstack[0].dentry : NULL;
|
||||
return ovl_numlower(oe) ? ovl_lowerstack(oe)->dentry : NULL;
|
||||
}
|
||||
|
||||
const struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
|
||||
return oe->numlower ? oe->lowerstack[0].layer : NULL;
|
||||
return ovl_numlower(oe) ? ovl_lowerstack(oe)->layer : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -235,9 +295,30 @@ const struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
|
||||
*/
|
||||
struct dentry *ovl_dentry_lowerdata(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
return ovl_lowerdata_dentry(OVL_E(dentry));
|
||||
}
|
||||
|
||||
return oe->numlower ? oe->lowerstack[oe->numlower - 1].dentry : NULL;
|
||||
int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath)
|
||||
{
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
struct ovl_path *lowerdata = ovl_lowerdata(oe);
|
||||
struct dentry *datadentry = datapath->dentry;
|
||||
|
||||
if (WARN_ON_ONCE(ovl_numlower(oe) <= 1))
|
||||
return -EIO;
|
||||
|
||||
WRITE_ONCE(lowerdata->layer, datapath->layer);
|
||||
/*
|
||||
* Pairs with smp_rmb() in ovl_path_lowerdata().
|
||||
* Make sure that if lowerdata->dentry is visible, then
|
||||
* lowerdata->layer is visible as well.
|
||||
*/
|
||||
smp_wmb();
|
||||
WRITE_ONCE(lowerdata->dentry, dget(datadentry));
|
||||
|
||||
ovl_dentry_update_reval(dentry, datadentry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dentry *ovl_dentry_real(struct dentry *dentry)
|
||||
@ -250,15 +331,19 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode)
|
||||
return ovl_upperdentry_dereference(OVL_I(inode));
|
||||
}
|
||||
|
||||
void ovl_i_path_real(struct inode *inode, struct path *path)
|
||||
struct inode *ovl_i_path_real(struct inode *inode, struct path *path)
|
||||
{
|
||||
struct ovl_path *lowerpath = ovl_lowerpath(OVL_I_E(inode));
|
||||
|
||||
path->dentry = ovl_i_dentry_upper(inode);
|
||||
if (!path->dentry) {
|
||||
path->dentry = OVL_I(inode)->lowerpath.dentry;
|
||||
path->mnt = OVL_I(inode)->lowerpath.layer->mnt;
|
||||
path->dentry = lowerpath->dentry;
|
||||
path->mnt = lowerpath->layer->mnt;
|
||||
} else {
|
||||
path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb));
|
||||
}
|
||||
|
||||
return path->dentry ? d_inode_rcu(path->dentry) : NULL;
|
||||
}
|
||||
|
||||
struct inode *ovl_inode_upper(struct inode *inode)
|
||||
@ -270,9 +355,9 @@ struct inode *ovl_inode_upper(struct inode *inode)
|
||||
|
||||
struct inode *ovl_inode_lower(struct inode *inode)
|
||||
{
|
||||
struct dentry *lowerdentry = OVL_I(inode)->lowerpath.dentry;
|
||||
struct ovl_path *lowerpath = ovl_lowerpath(OVL_I_E(inode));
|
||||
|
||||
return lowerdentry ? d_inode(lowerdentry) : NULL;
|
||||
return lowerpath ? d_inode(lowerpath->dentry) : NULL;
|
||||
}
|
||||
|
||||
struct inode *ovl_inode_real(struct inode *inode)
|
||||
@ -283,10 +368,12 @@ struct inode *ovl_inode_real(struct inode *inode)
|
||||
/* Return inode which contains lower data. Do not return metacopy */
|
||||
struct inode *ovl_inode_lowerdata(struct inode *inode)
|
||||
{
|
||||
struct dentry *lowerdata = ovl_lowerdata_dentry(OVL_I_E(inode));
|
||||
|
||||
if (WARN_ON(!S_ISREG(inode->i_mode)))
|
||||
return NULL;
|
||||
|
||||
return OVL_I(inode)->lowerdata ?: ovl_inode_lower(inode);
|
||||
return lowerdata ? d_inode(lowerdata) : NULL;
|
||||
}
|
||||
|
||||
/* Return real inode which contains data. Does not return metacopy inode */
|
||||
@ -301,9 +388,15 @@ struct inode *ovl_inode_realdata(struct inode *inode)
|
||||
return ovl_inode_lowerdata(inode);
|
||||
}
|
||||
|
||||
const char *ovl_lowerdata_redirect(struct inode *inode)
|
||||
{
|
||||
return inode && S_ISREG(inode->i_mode) ?
|
||||
OVL_I(inode)->lowerdata_redirect : NULL;
|
||||
}
|
||||
|
||||
struct ovl_dir_cache *ovl_dir_cache(struct inode *inode)
|
||||
{
|
||||
return OVL_I(inode)->cache;
|
||||
return inode && S_ISDIR(inode->i_mode) ? OVL_I(inode)->cache : NULL;
|
||||
}
|
||||
|
||||
void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache)
|
||||
@ -313,17 +406,17 @@ void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache)
|
||||
|
||||
void ovl_dentry_set_flag(unsigned long flag, struct dentry *dentry)
|
||||
{
|
||||
set_bit(flag, &OVL_E(dentry)->flags);
|
||||
set_bit(flag, OVL_E_FLAGS(dentry));
|
||||
}
|
||||
|
||||
void ovl_dentry_clear_flag(unsigned long flag, struct dentry *dentry)
|
||||
{
|
||||
clear_bit(flag, &OVL_E(dentry)->flags);
|
||||
clear_bit(flag, OVL_E_FLAGS(dentry));
|
||||
}
|
||||
|
||||
bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry)
|
||||
{
|
||||
return test_bit(flag, &OVL_E(dentry)->flags);
|
||||
return test_bit(flag, OVL_E_FLAGS(dentry));
|
||||
}
|
||||
|
||||
bool ovl_dentry_is_opaque(struct dentry *dentry)
|
||||
@ -413,13 +506,6 @@ bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags)
|
||||
return !ovl_has_upperdata(d_inode(dentry));
|
||||
}
|
||||
|
||||
bool ovl_redirect_dir(struct super_block *sb)
|
||||
{
|
||||
struct ovl_fs *ofs = sb->s_fs_info;
|
||||
|
||||
return ofs->config.redirect_dir && !ofs->noxattr;
|
||||
}
|
||||
|
||||
const char *ovl_dentry_get_redirect(struct dentry *dentry)
|
||||
{
|
||||
return OVL_I(d_inode(dentry))->redirect;
|
||||
@ -999,7 +1085,7 @@ int ovl_check_metacopy_xattr(struct ovl_fs *ofs, const struct path *path)
|
||||
|
||||
bool ovl_is_metacopy_dentry(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_entry *oe = dentry->d_fsdata;
|
||||
struct ovl_entry *oe = OVL_E(dentry);
|
||||
|
||||
if (!d_is_reg(dentry))
|
||||
return false;
|
||||
@ -1010,7 +1096,7 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
|
||||
return false;
|
||||
}
|
||||
|
||||
return (oe->numlower > 1);
|
||||
return (ovl_numlower(oe) > 1);
|
||||
}
|
||||
|
||||
char *ovl_get_redirect_xattr(struct ovl_fs *ofs, const struct path *path, int padding)
|
||||
@ -1105,8 +1191,7 @@ void ovl_copyattr(struct inode *inode)
|
||||
vfsuid_t vfsuid;
|
||||
vfsgid_t vfsgid;
|
||||
|
||||
ovl_i_path_real(inode, &realpath);
|
||||
realinode = d_inode(realpath.dentry);
|
||||
realinode = ovl_i_path_real(inode, &realpath);
|
||||
real_idmap = mnt_idmap(realpath.mnt);
|
||||
|
||||
vfsuid = i_uid_into_vfsuid(real_idmap, realinode);
|
||||
|
Loading…
Reference in New Issue
Block a user