vfs-6.11.misc

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZpEF0AAKCRCRxhvAZXjc
 oq0TAQDjfTLN75RwKQ34RIFtRun2q+OMfBQtSegtaccqazghyAD/QfmPuZDxB5DL
 rsI/5k5O4VupIKrEdIaqvNxmkmDsSAc=
 =bf7E
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.11.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull misc vfs updates from Christian Brauner:
 "Features:

   - Support passing NULL along AT_EMPTY_PATH for statx().

     NULL paths with any flag value other than AT_EMPTY_PATH go the
     usual route and end up with -EFAULT to retain compatibility (Rust
     is abusing calls of the sort to detect availability of statx)

     This avoids path lookup code, lockref management, memory allocation
     and in case of NULL path userspace memory access (which can be
     quite expensive with SMAP on x86_64)

   - Don't block i_writecount during exec. Remove the
     deny_write_access() mechanism for executables

   - Relax open_by_handle_at() permissions in specific cases where we
     can prove that the caller had sufficient privileges to open a file

   - Switch timespec64 fields in struct inode to discrete integers
     freeing up 4 bytes

  Fixes:

   - Fix false positive circular locking warning in hfsplus

   - Initialize hfs_inode_info after hfs_alloc_inode() in hfs

   - Avoid accidental overflows in vfs_fallocate()

   - Don't interrupt fallocate with EINTR in tmpfs to avoid constantly
     restarting shmem_fallocate()

   - Add missing quote in comment in fs/readdir

  Cleanups:

   - Don't assign and test in an if statement in mqueue. Move the
     assignment out of the if statement

   - Reflow the logic in may_create_in_sticky()

   - Remove the usage of the deprecated ida_simple_xx() API from procfs

   - Reject FSCONFIG_CMD_CREATE_EXCL requets that depend on the new
     mount api early

   - Rename variables in copy_tree() to make it easier to understand

   - Replace WARN(down_read_trylock, ...) abuse with proper asserts in
     various places in the VFS

   - Get rid of user_path_at_empty() and drop the empty argument from
     getname_flags()

   - Check for error while copying and no path in one branch in
     getname_flags()

   - Avoid redundant smp_mb() for THP handling in do_dentry_open()

   - Rename parent_ino to d_parent_ino and make it use RCU

   - Remove unused header include in fs/readdir

   - Export in_group_capable() helper and switch f2fs and fuse over to
     it instead of open-coding the logic in both places"

* tag 'vfs-6.11.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (27 commits)
  ipc: mqueue: remove assignment from IS_ERR argument
  vfs: rename parent_ino to d_parent_ino and make it use RCU
  vfs: support statx(..., NULL, AT_EMPTY_PATH, ...)
  stat: use vfs_empty_path() helper
  fs: new helper vfs_empty_path()
  fs: reflow may_create_in_sticky()
  vfs: remove redundant smp_mb for thp handling in do_dentry_open
  fuse: Use in_group_or_capable() helper
  f2fs: Use in_group_or_capable() helper
  fs: Export in_group_or_capable()
  vfs: reorder checks in may_create_in_sticky
  hfs: fix to initialize fields of hfs_inode_info after hfs_alloc_inode()
  proc: Remove usage of the deprecated ida_simple_xx() API
  hfsplus: fix to avoid false alarm of circular locking
  Improve readability of copy_tree
  vfs: shave a branch in getname_flags
  vfs: retire user_path_at_empty and drop empty arg from getname_flags
  vfs: stop using user_path_at_empty in do_readlinkat
  tmpfs: don't interrupt fallocate with EINTR
  fs: don't block i_writecount during exec
  ...
This commit is contained in:
Linus Torvalds 2024-07-15 10:52:51 -07:00
commit b051320d6a
38 changed files with 545 additions and 335 deletions

View File

@ -17,8 +17,6 @@
#include <linux/filelock.h>
#include <linux/security.h>
#include "internal.h"
/**
* setattr_should_drop_sgid - determine whether the setgid bit needs to be
* removed

View File

@ -1216,7 +1216,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
}
reloc_func_desc = interp_load_addr;
allow_write_access(interpreter);
fput(interpreter);
kfree(interp_elf_ex);
@ -1308,7 +1307,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
kfree(interp_elf_ex);
kfree(interp_elf_phdata);
out_free_file:
allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_ph:

View File

@ -394,7 +394,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
goto error;
}
allow_write_access(interpreter);
fput(interpreter);
interpreter = NULL;
}
@ -466,10 +465,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
retval = 0;
error:
if (interpreter) {
allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
}
kfree(interpreter_name);
kfree(exec_params.phdrs);
kfree(exec_params.loadmap);

View File

@ -247,13 +247,10 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (retval < 0)
goto ret;
if (fmt->flags & MISC_FMT_OPEN_FILE) {
if (fmt->flags & MISC_FMT_OPEN_FILE)
interp_file = file_clone_open(fmt->interp_file);
if (!IS_ERR(interp_file))
deny_write_access(interp_file);
} else {
else
interp_file = open_exec(fmt->interpreter);
}
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
goto ret;

View File

@ -1555,7 +1555,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
{
struct dentry *dentry;
WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
rwsem_assert_held_write(&sb->s_umount);
dentry = sb->s_root;
sb->s_root = NULL;
@ -3106,6 +3106,34 @@ void d_tmpfile(struct file *file, struct inode *inode)
}
EXPORT_SYMBOL(d_tmpfile);
/*
* Obtain inode number of the parent dentry.
*/
ino_t d_parent_ino(struct dentry *dentry)
{
struct dentry *parent;
struct inode *iparent;
unsigned seq;
ino_t ret;
scoped_guard(rcu) {
seq = raw_seqcount_begin(&dentry->d_seq);
parent = READ_ONCE(dentry->d_parent);
iparent = d_inode_rcu(parent);
if (likely(iparent)) {
ret = iparent->i_ino;
if (!read_seqcount_retry(&dentry->d_seq, seq))
return ret;
}
}
spin_lock(&dentry->d_lock);
ret = dentry->d_parent->d_inode->i_ino;
spin_unlock(&dentry->d_lock);
return ret;
}
EXPORT_SYMBOL(d_parent_ino);
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
{

View File

@ -952,10 +952,6 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
path_noexec(&file->f_path)))
goto exit;
err = deny_write_access(file);
if (err)
goto exit;
out:
return file;
@ -971,8 +967,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
*
* Returns ERR_PTR on failure or allocated struct file on success.
*
* As this is a wrapper for the internal do_open_execat(), callers
* must call allow_write_access() before fput() on release. Also see
* As this is a wrapper for the internal do_open_execat(). Also see
* do_close_execat().
*/
struct file *open_exec(const char *name)
@ -1524,10 +1519,8 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
/* Matches do_open_execat() */
static void do_close_execat(struct file *file)
{
if (!file)
return;
allow_write_access(file);
fput(file);
if (file)
fput(file);
}
static void free_bprm(struct linux_binprm *bprm)
@ -1846,7 +1839,6 @@ static int exec_binprm(struct linux_binprm *bprm)
bprm->file = bprm->interpreter;
bprm->interpreter = NULL;
allow_write_access(exec);
if (unlikely(bprm->have_execfd)) {
if (bprm->executable) {
fput(exec);

View File

@ -427,7 +427,7 @@ EXPORT_SYMBOL_GPL(exportfs_encode_fh);
struct dentry *
exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
int fileid_type,
int fileid_type, unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context)
{
@ -445,6 +445,11 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
if (IS_ERR_OR_NULL(result))
return result;
if ((flags & EXPORT_FH_DIR_ONLY) && !d_is_dir(result)) {
err = -ENOTDIR;
goto err_result;
}
/*
* If no acceptance criteria was specified by caller, a disconnected
* dentry is also accepatable. Callers may use this mode to query if
@ -581,7 +586,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
{
struct dentry *ret;
ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type,
ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type, 0,
acceptable, context);
if (IS_ERR_OR_NULL(ret)) {
if (ret == ERR_PTR(-ENOMEM))

View File

@ -219,8 +219,7 @@ static int f2fs_acl_update_mode(struct mnt_idmap *idmap,
return error;
if (error == 0)
*acl = NULL;
if (!vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)) &&
!capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
*mode_p = mode;
return 0;

View File

@ -185,7 +185,7 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
if (!dentry)
return 0;
*pino = parent_ino(dentry);
*pino = d_parent_ino(dentry);
dput(dentry);
return 1;
}
@ -923,10 +923,8 @@ static void __setattr_copy(struct mnt_idmap *idmap,
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
if (!vfsgid_in_group_p(vfsgid) &&
!capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
set_acl_inode(inode, mode);
}

View File

@ -115,88 +115,188 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
return err;
}
static struct vfsmount *get_vfsmount_from_fd(int fd)
static int get_path_from_fd(int fd, struct path *root)
{
struct vfsmount *mnt;
if (fd == AT_FDCWD) {
struct fs_struct *fs = current->fs;
spin_lock(&fs->lock);
mnt = mntget(fs->pwd.mnt);
*root = fs->pwd;
path_get(root);
spin_unlock(&fs->lock);
} else {
struct fd f = fdget(fd);
if (!f.file)
return ERR_PTR(-EBADF);
mnt = mntget(f.file->f_path.mnt);
return -EBADF;
*root = f.file->f_path;
path_get(root);
fdput(f);
}
return mnt;
return 0;
}
enum handle_to_path_flags {
HANDLE_CHECK_PERMS = (1 << 0),
HANDLE_CHECK_SUBTREE = (1 << 1),
};
struct handle_to_path_ctx {
struct path root;
enum handle_to_path_flags flags;
unsigned int fh_flags;
};
static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
{
return 1;
}
static int do_handle_to_path(int mountdirfd, struct file_handle *handle,
struct path *path)
{
struct handle_to_path_ctx *ctx = context;
struct user_namespace *user_ns = current_user_ns();
struct dentry *d, *root = ctx->root.dentry;
struct mnt_idmap *idmap = mnt_idmap(ctx->root.mnt);
int retval = 0;
int handle_dwords;
path->mnt = get_vfsmount_from_fd(mountdirfd);
if (IS_ERR(path->mnt)) {
retval = PTR_ERR(path->mnt);
goto out_err;
if (!root)
return 1;
/* Old permission model with global CAP_DAC_READ_SEARCH. */
if (!ctx->flags)
return 1;
/*
* It's racy as we're not taking rename_lock but we're able to ignore
* permissions and we just need an approximation whether we were able
* to follow a path to the file.
*
* It's also potentially expensive on some filesystems especially if
* there is a deep path.
*/
d = dget(dentry);
while (d != root && !IS_ROOT(d)) {
struct dentry *parent = dget_parent(d);
/*
* We know that we have the ability to override DAC permissions
* as we've verified this earlier via CAP_DAC_READ_SEARCH. But
* we also need to make sure that there aren't any unmapped
* inodes in the path that would prevent us from reaching the
* file.
*/
if (!privileged_wrt_inode_uidgid(user_ns, idmap,
d_inode(parent))) {
dput(d);
dput(parent);
return retval;
}
dput(d);
d = parent;
}
/* change the handle size to multiple of sizeof(u32) */
handle_dwords = handle->handle_bytes >> 2;
path->dentry = exportfs_decode_fh(path->mnt,
(struct fid *)handle->f_handle,
handle_dwords, handle->handle_type,
vfs_dentry_acceptable, NULL);
if (IS_ERR(path->dentry)) {
retval = PTR_ERR(path->dentry);
goto out_mnt;
}
return 0;
out_mnt:
mntput(path->mnt);
out_err:
if (!(ctx->flags & HANDLE_CHECK_SUBTREE) || d == root)
retval = 1;
WARN_ON_ONCE(d != root && d != root->d_sb->s_root);
dput(d);
return retval;
}
static int do_handle_to_path(struct file_handle *handle, struct path *path,
struct handle_to_path_ctx *ctx)
{
int handle_dwords;
struct vfsmount *mnt = ctx->root.mnt;
/* change the handle size to multiple of sizeof(u32) */
handle_dwords = handle->handle_bytes >> 2;
path->dentry = exportfs_decode_fh_raw(mnt,
(struct fid *)handle->f_handle,
handle_dwords, handle->handle_type,
ctx->fh_flags,
vfs_dentry_acceptable, ctx);
if (IS_ERR_OR_NULL(path->dentry)) {
if (path->dentry == ERR_PTR(-ENOMEM))
return -ENOMEM;
return -ESTALE;
}
path->mnt = mntget(mnt);
return 0;
}
/*
* Allow relaxed permissions of file handles if the caller has the
* ability to mount the filesystem or create a bind-mount of the
* provided @mountdirfd.
*
* In both cases the caller may be able to get an unobstructed way to
* the encoded file handle. If the caller is only able to create a
* bind-mount we need to verify that there are no locked mounts on top
* of it that could prevent us from getting to the encoded file.
*
* In principle, locked mounts can prevent the caller from mounting the
* filesystem but that only applies to procfs and sysfs neither of which
* support decoding file handles.
*/
static inline bool may_decode_fh(struct handle_to_path_ctx *ctx,
unsigned int o_flags)
{
struct path *root = &ctx->root;
/*
* Restrict to O_DIRECTORY to provide a deterministic API that avoids a
* confusing api in the face of disconnected non-dir dentries.
*
* There's only one dentry for each directory inode (VFS rule)...
*/
if (!(o_flags & O_DIRECTORY))
return false;
if (ns_capable(root->mnt->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
ctx->flags = HANDLE_CHECK_PERMS;
else if (is_mounted(root->mnt) &&
ns_capable(real_mount(root->mnt)->mnt_ns->user_ns,
CAP_SYS_ADMIN) &&
!has_locked_children(real_mount(root->mnt), root->dentry))
ctx->flags = HANDLE_CHECK_PERMS | HANDLE_CHECK_SUBTREE;
else
return false;
/* Are we able to override DAC permissions? */
if (!ns_capable(current_user_ns(), CAP_DAC_READ_SEARCH))
return false;
ctx->fh_flags = EXPORT_FH_DIR_ONLY;
return true;
}
static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
struct path *path)
struct path *path, unsigned int o_flags)
{
int retval = 0;
struct file_handle f_handle;
struct file_handle *handle = NULL;
struct handle_to_path_ctx ctx = {};
/*
* With handle we don't look at the execute bit on the
* directory. Ideally we would like CAP_DAC_SEARCH.
* But we don't have that
*/
if (!capable(CAP_DAC_READ_SEARCH)) {
retval = -EPERM;
retval = get_path_from_fd(mountdirfd, &ctx.root);
if (retval)
goto out_err;
if (!capable(CAP_DAC_READ_SEARCH) && !may_decode_fh(&ctx, o_flags)) {
retval = -EPERM;
goto out_path;
}
if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) {
retval = -EFAULT;
goto out_err;
goto out_path;
}
if ((f_handle.handle_bytes > MAX_HANDLE_SZ) ||
(f_handle.handle_bytes == 0)) {
retval = -EINVAL;
goto out_err;
goto out_path;
}
handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
if (!handle) {
retval = -ENOMEM;
goto out_err;
goto out_path;
}
/* copy the full handle */
*handle = f_handle;
@ -207,10 +307,12 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
goto out_handle;
}
retval = do_handle_to_path(mountdirfd, handle, path);
retval = do_handle_to_path(handle, path, &ctx);
out_handle:
kfree(handle);
out_path:
path_put(&ctx.root);
out_err:
return retval;
}
@ -223,7 +325,7 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
struct file *file;
int fd;
retval = handle_to_path(mountdirfd, ufh, &path);
retval = handle_to_path(mountdirfd, ufh, &path, open_flag);
if (retval)
return retval;

View File

@ -220,10 +220,6 @@ static int vfs_cmd_create(struct fs_context *fc, bool exclusive)
if (!mount_capable(fc))
return -EPERM;
/* require the new mount api */
if (exclusive && fc->ops == &legacy_fs_context_ops)
return -EOPNOTSUPP;
fc->phase = FS_CONTEXT_CREATING;
fc->exclusive = exclusive;
@ -411,6 +407,7 @@ SYSCALL_DEFINE5(fsconfig,
case FSCONFIG_SET_PATH:
case FSCONFIG_SET_PATH_EMPTY:
case FSCONFIG_SET_FD:
case FSCONFIG_CMD_CREATE_EXCL:
ret = -EOPNOTSUPP;
goto out_f;
}
@ -451,7 +448,7 @@ SYSCALL_DEFINE5(fsconfig,
fallthrough;
case FSCONFIG_SET_PATH:
param.type = fs_value_is_filename;
param.name = getname_flags(_value, lookup_flags, NULL);
param.name = getname_flags(_value, lookup_flags);
if (IS_ERR(param.name)) {
ret = PTR_ERR(param.name);
goto out_key;

View File

@ -146,8 +146,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
* be stripped.
*/
if (fc->posix_acl &&
!vfsgid_in_group_p(i_gid_into_vfsgid(&nop_mnt_idmap, inode)) &&
!capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID))
!in_group_or_capable(&nop_mnt_idmap, inode,
i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
ret = fuse_setxattr(inode, name, value, size, 0, extra_flags);

View File

@ -200,6 +200,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
if (S_ISDIR(mode)) {
inode->i_size = 2;
HFS_SB(sb)->folder_count++;
@ -275,6 +276,8 @@ void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
for (count = 0, i = 0; i < 3; i++)
count += be16_to_cpu(ext[i].count);
HFS_I(inode)->first_blocks = count;
HFS_I(inode)->cached_start = 0;
HFS_I(inode)->cached_blocks = 0;
inode->i_size = HFS_I(inode)->phys_size = log_size;
HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;

View File

@ -25,19 +25,8 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
switch (tree->cnid) {
case HFSPLUS_CAT_CNID:
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
break;
case HFSPLUS_EXT_CNID:
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
break;
case HFSPLUS_ATTR_CNID:
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
break;
default:
BUG();
}
mutex_lock_nested(&tree->tree_lock,
hfsplus_btree_lock_class(tree));
return 0;
}

View File

@ -430,7 +430,8 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
hfsplus_free_extents(sb, ext_entry, total_blocks - start,
total_blocks);
total_blocks = start;
mutex_lock(&fd.tree->tree_lock);
mutex_lock_nested(&fd.tree->tree_lock,
hfsplus_btree_lock_class(fd.tree));
} while (total_blocks > blocks);
hfs_find_exit(&fd);
@ -592,7 +593,8 @@ void hfsplus_file_truncate(struct inode *inode)
alloc_cnt, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->first_extents);
hip->first_blocks = blk_cnt;
mutex_lock(&fd.tree->tree_lock);
mutex_lock_nested(&fd.tree->tree_lock,
hfsplus_btree_lock_class(fd.tree));
break;
}
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
@ -606,7 +608,8 @@ void hfsplus_file_truncate(struct inode *inode)
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->cached_extents);
mutex_lock(&fd.tree->tree_lock);
mutex_lock_nested(&fd.tree->tree_lock,
hfsplus_btree_lock_class(fd.tree));
if (blk_cnt > start) {
hip->extent_state |= HFSPLUS_EXT_DIRTY;
break;

View File

@ -553,6 +553,27 @@ static inline __be32 __hfsp_ut2mt(time64_t ut)
return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
}
static inline enum hfsplus_btree_mutex_classes
hfsplus_btree_lock_class(struct hfs_btree *tree)
{
enum hfsplus_btree_mutex_classes class;
switch (tree->cnid) {
case HFSPLUS_CAT_CNID:
class = CATALOG_BTREE_MUTEX;
break;
case HFSPLUS_EXT_CNID:
class = EXTENTS_BTREE_MUTEX;
break;
case HFSPLUS_ATTR_CNID:
class = ATTR_BTREE_MUTEX;
break;
default:
BUG();
}
return class;
}
/* compatibility */
#define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)

View File

@ -40,7 +40,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
/* Directory containing the bootable system */
vh->finder_info[0] = bvh->finder_info[0] =
cpu_to_be32(parent_ino(dentry));
cpu_to_be32(d_parent_ino(dentry));
/*
* Bootloader. Just using the inode here breaks in the case of
@ -51,7 +51,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
/* Per spec, the OS X system folder - same as finder_info[0] here */
vh->finder_info[5] = bvh->finder_info[5] =
cpu_to_be32(parent_ino(dentry));
cpu_to_be32(d_parent_ino(dentry));
mutex_unlock(&sbi->vh_mutex);
return 0;

View File

@ -2538,6 +2538,7 @@ bool in_group_or_capable(struct mnt_idmap *idmap,
return true;
return false;
}
EXPORT_SYMBOL(in_group_or_capable);
/**
* mode_strip_sgid - handle the sgid bit for non-directories

View File

@ -247,6 +247,8 @@ extern const struct dentry_operations ns_dentry_operations;
int getname_statx_lookup_flags(int flags);
int do_statx(int dfd, struct filename *filename, unsigned int flags,
unsigned int mask, struct statx __user *buffer);
int do_statx_fd(int fd, unsigned int flags, unsigned int mask,
struct statx __user *buffer);
/*
* fs/splice.c:
@ -321,3 +323,15 @@ struct stashed_operations {
int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
struct path *path);
void stashed_dentry_prune(struct dentry *dentry);
/**
* path_mounted - check whether path is mounted
* @path: path to check
*
* Determine whether @path refers to the root of a mount.
*
* Return: true if @path is the root of a mount, false if not.
*/
static inline bool path_mounted(const struct path *path)
{
return path->mnt->mnt_root == path->dentry;
}

View File

@ -152,3 +152,4 @@ static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
}
extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
bool has_locked_children(struct mount *mnt, struct dentry *dentry);

View File

@ -126,7 +126,7 @@
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
struct filename *
getname_flags(const char __user *filename, int flags, int *empty)
getname_flags(const char __user *filename, int flags)
{
struct filename *result;
char *kname;
@ -148,9 +148,20 @@ getname_flags(const char __user *filename, int flags, int *empty)
result->name = kname;
len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
if (unlikely(len < 0)) {
__putname(result);
return ERR_PTR(len);
/*
* Handle both empty path and copy failure in one go.
*/
if (unlikely(len <= 0)) {
if (unlikely(len < 0)) {
__putname(result);
return ERR_PTR(len);
}
/* The empty path is special. */
if (!(flags & LOOKUP_EMPTY)) {
__putname(result);
return ERR_PTR(-ENOENT);
}
}
/*
@ -180,6 +191,12 @@ getname_flags(const char __user *filename, int flags, int *empty)
kfree(result);
return ERR_PTR(len);
}
/* The empty path is special. */
if (unlikely(!len) && !(flags & LOOKUP_EMPTY)) {
__putname(kname);
kfree(result);
return ERR_PTR(-ENOENT);
}
if (unlikely(len == PATH_MAX)) {
__putname(kname);
kfree(result);
@ -188,16 +205,6 @@ getname_flags(const char __user *filename, int flags, int *empty)
}
atomic_set(&result->refcnt, 1);
/* The empty path is special. */
if (unlikely(!len)) {
if (empty)
*empty = 1;
if (!(flags & LOOKUP_EMPTY)) {
putname(result);
return ERR_PTR(-ENOENT);
}
}
result->uptr = filename;
result->aname = NULL;
audit_getname(result);
@ -209,13 +216,13 @@ getname_uflags(const char __user *filename, int uflags)
{
int flags = (uflags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
return getname_flags(filename, flags, NULL);
return getname_flags(filename, flags);
}
struct filename *
getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
return getname_flags(filename, 0);
}
struct filename *
@ -1233,29 +1240,48 @@ int may_linkat(struct mnt_idmap *idmap, const struct path *link)
*
* Returns 0 if the open is allowed, -ve on error.
*/
static int may_create_in_sticky(struct mnt_idmap *idmap,
struct nameidata *nd, struct inode *const inode)
static int may_create_in_sticky(struct mnt_idmap *idmap, struct nameidata *nd,
struct inode *const inode)
{
umode_t dir_mode = nd->dir_mode;
vfsuid_t dir_vfsuid = nd->dir_vfsuid;
vfsuid_t dir_vfsuid = nd->dir_vfsuid, i_vfsuid;
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
likely(!(dir_mode & S_ISVTX)) ||
vfsuid_eq(i_uid_into_vfsuid(idmap, inode), dir_vfsuid) ||
vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid()))
if (likely(!(dir_mode & S_ISVTX)))
return 0;
if (likely(dir_mode & 0002) ||
(dir_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
const char *operation = S_ISFIFO(inode->i_mode) ?
"sticky_create_fifo" :
"sticky_create_regular";
audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
if (S_ISREG(inode->i_mode) && !sysctl_protected_regular)
return 0;
if (S_ISFIFO(inode->i_mode) && !sysctl_protected_fifos)
return 0;
i_vfsuid = i_uid_into_vfsuid(idmap, inode);
if (vfsuid_eq(i_vfsuid, dir_vfsuid))
return 0;
if (vfsuid_eq_kuid(i_vfsuid, current_fsuid()))
return 0;
if (likely(dir_mode & 0002)) {
audit_log_path_denied(AUDIT_ANOM_CREAT, "sticky_create");
return -EACCES;
}
if (dir_mode & 0020) {
if (sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) {
audit_log_path_denied(AUDIT_ANOM_CREAT,
"sticky_create_fifo");
return -EACCES;
}
if (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode)) {
audit_log_path_denied(AUDIT_ANOM_CREAT,
"sticky_create_regular");
return -EACCES;
}
}
return 0;
}
@ -2969,16 +2995,16 @@ int path_pts(struct path *path)
}
#endif
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
int user_path_at(int dfd, const char __user *name, unsigned flags,
struct path *path)
{
struct filename *filename = getname_flags(name, flags, empty);
struct filename *filename = getname_flags(name, flags);
int ret = filename_lookup(dfd, filename, flags, path, NULL);
putname(filename);
return ret;
}
EXPORT_SYMBOL(user_path_at_empty);
EXPORT_SYMBOL(user_path_at);
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode)

View File

@ -1846,19 +1846,6 @@ bool may_mount(void)
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
/**
* path_mounted - check whether path is mounted
* @path: path to check
*
* Determine whether @path refers to the root of a mount.
*
* Return: true if @path is the root of a mount, false if not.
*/
static inline bool path_mounted(const struct path *path)
{
return path->mnt->mnt_root == path->dentry;
}
static void warn_mandlock(void)
{
pr_warn_once("=======================================================\n"
@ -1966,69 +1953,72 @@ static bool mnt_ns_loop(struct dentry *dentry)
return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
}
struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
int flag)
{
struct mount *res, *p, *q, *r, *parent;
struct mount *res, *src_parent, *src_root_child, *src_mnt,
*dst_parent, *dst_mnt;
if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root))
return ERR_PTR(-EINVAL);
if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
return ERR_PTR(-EINVAL);
res = q = clone_mnt(mnt, dentry, flag);
if (IS_ERR(q))
return q;
res = dst_mnt = clone_mnt(src_root, dentry, flag);
if (IS_ERR(dst_mnt))
return dst_mnt;
q->mnt_mountpoint = mnt->mnt_mountpoint;
src_parent = src_root;
dst_mnt->mnt_mountpoint = src_root->mnt_mountpoint;
p = mnt;
list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
struct mount *s;
if (!is_subdir(r->mnt_mountpoint, dentry))
list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) {
if (!is_subdir(src_root_child->mnt_mountpoint, dentry))
continue;
for (s = r; s; s = next_mnt(s, r)) {
for (src_mnt = src_root_child; src_mnt;
src_mnt = next_mnt(src_mnt, src_root_child)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) {
if (s->mnt.mnt_flags & MNT_LOCKED) {
IS_MNT_UNBINDABLE(src_mnt)) {
if (src_mnt->mnt.mnt_flags & MNT_LOCKED) {
/* Both unbindable and locked. */
q = ERR_PTR(-EPERM);
dst_mnt = ERR_PTR(-EPERM);
goto out;
} else {
s = skip_mnt_tree(s);
src_mnt = skip_mnt_tree(src_mnt);
continue;
}
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(s->mnt.mnt_root)) {
s = skip_mnt_tree(s);
is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
src_mnt = skip_mnt_tree(src_mnt);
continue;
}
while (p != s->mnt_parent) {
p = p->mnt_parent;
q = q->mnt_parent;
while (src_parent != src_mnt->mnt_parent) {
src_parent = src_parent->mnt_parent;
dst_mnt = dst_mnt->mnt_parent;
}
p = s;
parent = q;
q = clone_mnt(p, p->mnt.mnt_root, flag);
if (IS_ERR(q))
src_parent = src_mnt;
dst_parent = dst_mnt;
dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag);
if (IS_ERR(dst_mnt))
goto out;
lock_mount_hash();
list_add_tail(&q->mnt_list, &res->mnt_list);
attach_mnt(q, parent, p->mnt_mp, false);
list_add_tail(&dst_mnt->mnt_list, &res->mnt_list);
attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp, false);
unlock_mount_hash();
}
}
return res;
out:
if (res) {
lock_mount_hash();
umount_tree(res, UMOUNT_SYNC);
unlock_mount_hash();
}
return q;
return dst_mnt;
}
/* Caller should check returned pointer for errors */
@ -2078,7 +2068,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
namespace_unlock();
}
static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
bool has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;

View File

@ -247,7 +247,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
dentry = dget(exp->ex_path.dentry);
else {
dentry = exportfs_decode_fh_raw(exp->ex_path.mnt, fid,
data_left, fileid_type,
data_left, fileid_type, 0,
nfsd_acceptable, exp);
if (IS_ERR_OR_NULL(dentry)) {
trace_nfsd_set_fh_dentry_badhandle(rqstp, fhp,

View File

@ -247,6 +247,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
long ret;
loff_t sum;
if (offset < 0 || len <= 0)
return -EINVAL;
@ -319,8 +320,11 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
return -ENODEV;
/* Check for wrap through zero too */
if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
/* Check for wraparound */
if (check_add_overflow(offset, len, &sum))
return -EFBIG;
if (sum > inode->i_sb->s_maxbytes)
return -EFBIG;
if (!file->f_op->fallocate)
@ -982,12 +986,11 @@ static int do_dentry_open(struct file *f,
*/
if (f->f_mode & FMODE_WRITE) {
/*
* Paired with smp_mb() in collapse_file() to ensure nr_thps
* is up to date and the update to i_writecount by
* get_write_access() is visible. Ensures subsequent insertion
* of THPs into the page cache will fail.
* Depends on full fence from get_write_access() to synchronize
* against collapse_file() regarding i_writecount and nr_thps
* updates. Ensures subsequent insertion of THPs into the page
* cache will fail.
*/
smp_mb();
if (filemap_nr_thps(inode->i_mapping)) {
struct address_space *mapping = inode->i_mapping;

View File

@ -202,8 +202,8 @@ int proc_alloc_inum(unsigned int *inum)
{
int i;
i = ida_simple_get(&proc_inum_ida, 0, UINT_MAX - PROC_DYNAMIC_FIRST + 1,
GFP_KERNEL);
i = ida_alloc_max(&proc_inum_ida, UINT_MAX - PROC_DYNAMIC_FIRST,
GFP_KERNEL);
if (i < 0)
return i;
@ -213,7 +213,7 @@ int proc_alloc_inum(unsigned int *inum)
void proc_free_inum(unsigned int inum)
{
ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
}
static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)

View File

@ -2246,9 +2246,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
int cnt;
struct quota_info *dqopt = sb_dqopt(sb);
/* s_umount should be held in exclusive mode */
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
up_read(&sb->s_umount);
rwsem_assert_held_write(&sb->s_umount);
/* Cannot turn off usage accounting without turning off limits, or
* suspend quotas and simultaneously turn quotas off. */
@ -2510,9 +2508,7 @@ int dquot_resume(struct super_block *sb, int type)
int ret = 0, cnt;
unsigned int flags;
/* s_umount should be held in exclusive mode */
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
up_read(&sb->s_umount);
rwsem_assert_held_write(&sb->s_umount);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)

View File

@ -22,8 +22,6 @@
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
/*
* Some filesystems were never converted to '->iterate_shared()'
* and their directory iterators want the inode lock held for
@ -72,7 +70,7 @@ int wrap_directory_iterator(struct file *file,
EXPORT_SYMBOL(wrap_directory_iterator);
/*
* Note the "unsafe_put_user() semantics: we goto a
* Note the "unsafe_put_user()" semantics: we goto a
* label for errors.
*/
#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \

170
fs/stat.c
View File

@ -214,6 +214,43 @@ int getname_statx_lookup_flags(int flags)
return lookup_flags;
}
static int vfs_statx_path(struct path *path, int flags, struct kstat *stat,
u32 request_mask)
{
int error = vfs_getattr(path, stat, request_mask, flags);
if (request_mask & STATX_MNT_ID_UNIQUE) {
stat->mnt_id = real_mount(path->mnt)->mnt_id_unique;
stat->result_mask |= STATX_MNT_ID_UNIQUE;
} else {
stat->mnt_id = real_mount(path->mnt)->mnt_id;
stat->result_mask |= STATX_MNT_ID;
}
if (path_mounted(path))
stat->attributes |= STATX_ATTR_MOUNT_ROOT;
stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
/* Handle STATX_DIOALIGN for block devices. */
if (request_mask & STATX_DIOALIGN) {
struct inode *inode = d_backing_inode(path->dentry);
if (S_ISBLK(inode->i_mode))
bdev_statx_dioalign(inode, stat);
}
return error;
}
static int vfs_statx_fd(int fd, int flags, struct kstat *stat,
u32 request_mask)
{
CLASS(fd_raw, f)(fd);
if (!f.file)
return -EBADF;
return vfs_statx_path(&f.file->f_path, flags, stat, request_mask);
}
/**
* vfs_statx - Get basic and extra attributes by filename
* @dfd: A file descriptor representing the base dir for a relative filename
@ -243,36 +280,13 @@ static int vfs_statx(int dfd, struct filename *filename, int flags,
retry:
error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
if (error)
goto out;
error = vfs_getattr(&path, stat, request_mask, flags);
if (request_mask & STATX_MNT_ID_UNIQUE) {
stat->mnt_id = real_mount(path.mnt)->mnt_id_unique;
stat->result_mask |= STATX_MNT_ID_UNIQUE;
} else {
stat->mnt_id = real_mount(path.mnt)->mnt_id;
stat->result_mask |= STATX_MNT_ID;
}
if (path.mnt->mnt_root == path.dentry)
stat->attributes |= STATX_ATTR_MOUNT_ROOT;
stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
/* Handle STATX_DIOALIGN for block devices. */
if (request_mask & STATX_DIOALIGN) {
struct inode *inode = d_backing_inode(path.dentry);
if (S_ISBLK(inode->i_mode))
bdev_statx_dioalign(inode, stat);
}
return error;
error = vfs_statx_path(&path, flags, stat, request_mask);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
@ -289,18 +303,10 @@ int vfs_fstatat(int dfd, const char __user *filename,
* If AT_EMPTY_PATH is set, we expect the common case to be that
* empty path, and avoid doing all the extra pathname work.
*/
if (dfd >= 0 && flags == AT_EMPTY_PATH) {
char c;
if (flags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename))
return vfs_fstat(dfd, stat);
ret = get_user(c, filename);
if (unlikely(ret))
return ret;
if (likely(!c))
return vfs_fstat(dfd, stat);
}
name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
name = getname_flags(filename, getname_statx_lookup_flags(statx_flags));
ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
putname(name);
@ -488,34 +494,39 @@ static int do_readlinkat(int dfd, const char __user *pathname,
char __user *buf, int bufsiz)
{
struct path path;
struct filename *name;
int error;
int empty = 0;
unsigned int lookup_flags = LOOKUP_EMPTY;
if (bufsiz <= 0)
return -EINVAL;
retry:
error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
if (!error) {
struct inode *inode = d_backing_inode(path.dentry);
name = getname_flags(pathname, lookup_flags);
error = filename_lookup(dfd, name, lookup_flags, &path, NULL);
if (unlikely(error)) {
putname(name);
return error;
}
error = empty ? -ENOENT : -EINVAL;
/*
* AFS mountpoints allow readlink(2) but are not symlinks
*/
if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
error = security_inode_readlink(path.dentry);
if (!error) {
touch_atime(&path);
error = vfs_readlink(path.dentry, buf, bufsiz);
}
}
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
/*
* AFS mountpoints allow readlink(2) but are not symlinks
*/
if (d_is_symlink(path.dentry) ||
d_backing_inode(path.dentry)->i_op->readlink) {
error = security_inode_readlink(path.dentry);
if (!error) {
touch_atime(&path);
error = vfs_readlink(path.dentry, buf, bufsiz);
}
} else {
error = (name->name[0] == '\0') ? -ENOENT : -EINVAL;
}
path_put(&path);
putname(name);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
@ -674,7 +685,8 @@ int do_statx(int dfd, struct filename *filename, unsigned int flags,
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
/* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
/*
* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
* from userland.
*/
mask &= ~STATX_CHANGE_COOKIE;
@ -686,16 +698,41 @@ int do_statx(int dfd, struct filename *filename, unsigned int flags,
return cp_statx(&stat, buffer);
}
int do_statx_fd(int fd, unsigned int flags, unsigned int mask,
struct statx __user *buffer)
{
struct kstat stat;
int error;
if (mask & STATX__RESERVED)
return -EINVAL;
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
/*
* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
* from userland.
*/
mask &= ~STATX_CHANGE_COOKIE;
error = vfs_statx_fd(fd, flags, &stat, mask);
if (error)
return error;
return cp_statx(&stat, buffer);
}
/**
* sys_statx - System call to get enhanced stats
* @dfd: Base directory to pathwalk from *or* fd to stat.
* @filename: File to stat or "" with AT_EMPTY_PATH
* @filename: File to stat or either NULL or "" with AT_EMPTY_PATH
* @flags: AT_* flags to control pathwalk.
* @mask: Parts of statx struct actually required.
* @buffer: Result buffer.
*
* Note that fstat() can be emulated by setting dfd to the fd of interest,
* supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
* supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH
* in the flags.
*/
SYSCALL_DEFINE5(statx,
int, dfd, const char __user *, filename, unsigned, flags,
@ -703,9 +740,24 @@ SYSCALL_DEFINE5(statx,
struct statx __user *, buffer)
{
int ret;
unsigned lflags;
struct filename *name;
name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL);
/*
* Short-circuit handling of NULL and "" paths.
*
* For a NULL path we require and accept only the AT_EMPTY_PATH flag
* (possibly |'d with AT_STATX flags).
*
* However, glibc on 32-bit architectures implements fstatat as statx
* with the "" pathname and AT_NO_AUTOMOUNT | AT_EMPTY_PATH flags.
* Supporting this results in the uglification below.
*/
lflags = flags & ~(AT_NO_AUTOMOUNT | AT_STATX_SYNC_TYPE);
if (lflags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename))
return do_statx_fd(dfd, flags & ~AT_NO_AUTOMOUNT, mask, buffer);
name = getname_flags(filename, getname_statx_lookup_flags(flags));
ret = do_statx(dfd, name, flags, mask, buffer);
putname(name);

View File

@ -278,6 +278,8 @@ static inline unsigned d_count(const struct dentry *dentry)
return dentry->d_lockref.count;
}
ino_t d_parent_ino(struct dentry *dentry);
/*
* helper function for dentry_operations.d_dname() members
*/

View File

@ -158,6 +158,7 @@ struct fid {
#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */
/**
* struct export_operations - for nfsd to communicate with file systems
@ -305,6 +306,7 @@ static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
struct fid *fid, int fh_len,
int fileid_type,
unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,

View File

@ -660,9 +660,13 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
struct timespec64 __i_atime;
struct timespec64 __i_mtime;
struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
time64_t i_atime_sec;
time64_t i_mtime_sec;
time64_t i_ctime_sec;
u32 i_atime_nsec;
u32 i_mtime_nsec;
u32 i_ctime_nsec;
u32 i_generation;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
@ -719,10 +723,10 @@ struct inode {
unsigned i_dir_seq;
};
__u32 i_generation;
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
/* 32-bit hole reserved for expanding i_fsnotify_mask */
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
@ -1538,23 +1542,27 @@ struct timespec64 inode_set_ctime_current(struct inode *inode);
static inline time64_t inode_get_atime_sec(const struct inode *inode)
{
return inode->__i_atime.tv_sec;
return inode->i_atime_sec;
}
static inline long inode_get_atime_nsec(const struct inode *inode)
{
return inode->__i_atime.tv_nsec;
return inode->i_atime_nsec;
}
static inline struct timespec64 inode_get_atime(const struct inode *inode)
{
return inode->__i_atime;
struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode),
.tv_nsec = inode_get_atime_nsec(inode) };
return ts;
}
static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
struct timespec64 ts)
{
inode->__i_atime = ts;
inode->i_atime_sec = ts.tv_sec;
inode->i_atime_nsec = ts.tv_nsec;
return ts;
}
@ -1563,28 +1571,32 @@ static inline struct timespec64 inode_set_atime(struct inode *inode,
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
return inode_set_atime_to_ts(inode, ts);
}
static inline time64_t inode_get_mtime_sec(const struct inode *inode)
{
return inode->__i_mtime.tv_sec;
return inode->i_mtime_sec;
}
static inline long inode_get_mtime_nsec(const struct inode *inode)
{
return inode->__i_mtime.tv_nsec;
return inode->i_mtime_nsec;
}
static inline struct timespec64 inode_get_mtime(const struct inode *inode)
{
return inode->__i_mtime;
struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode),
.tv_nsec = inode_get_mtime_nsec(inode) };
return ts;
}
static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
struct timespec64 ts)
{
inode->__i_mtime = ts;
inode->i_mtime_sec = ts.tv_sec;
inode->i_mtime_nsec = ts.tv_nsec;
return ts;
}
@ -1598,23 +1610,27 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode,
static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
return inode->__i_ctime.tv_sec;
return inode->i_ctime_sec;
}
static inline long inode_get_ctime_nsec(const struct inode *inode)
{
return inode->__i_ctime.tv_nsec;
return inode->i_ctime_nsec;
}
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
return inode->__i_ctime;
struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode),
.tv_nsec = inode_get_ctime_nsec(inode) };
return ts;
}
static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
struct timespec64 ts)
{
inode->__i_ctime = ts;
inode->i_ctime_sec = ts.tv_sec;
inode->i_ctime_nsec = ts.tv_nsec;
return ts;
}
@ -1926,6 +1942,8 @@ void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
extern bool may_open_dev(const struct path *path);
umode_t mode_strip_sgid(struct mnt_idmap *idmap,
const struct inode *dir, umode_t mode);
bool in_group_or_capable(struct mnt_idmap *idmap,
const struct inode *inode, vfsgid_t vfsgid);
/*
* This is the "filldir" function type, used by readdir() to let
@ -2685,7 +2703,7 @@ static inline struct file *file_clone_open(struct file *file)
}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
extern struct filename *getname_flags(const char __user *, int);
extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
@ -3436,20 +3454,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
return 0;
}
static inline ino_t parent_ino(struct dentry *dentry)
{
ino_t res;
/*
* Don't strictly need d_lock here? If the parent ino could change
* then surely we'd have a deeper race in the caller?
*/
spin_lock(&dentry->d_lock);
res = dentry->d_parent->d_inode->i_ino;
spin_unlock(&dentry->d_lock);
return res;
}
/* Transaction based IO helpers */
/*
@ -3574,7 +3578,7 @@ static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
parent_ino(file->f_path.dentry), DT_DIR);
d_parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@ -3613,4 +3617,21 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
static inline bool vfs_empty_path(int dfd, const char __user *path)
{
char c;
if (dfd < 0)
return false;
/* We now allow NULL to be used for empty path. */
if (!path)
return true;
if (unlikely(get_user(c, path)))
return false;
return !c;
}
#endif /* _LINUX_FS_H */

View File

@ -50,13 +50,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
extern int path_pts(struct path *path);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
struct path *path)
{
return user_path_at_empty(dfd, name, flags, path, NULL);
}
extern int user_path_at(int, const char __user *, unsigned, struct path *);
struct dentry *lookup_one_qstr_excl(const struct qstr *name,
struct dentry *base,

View File

@ -37,8 +37,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sx->flags = READ_ONCE(sqe->statx_flags);
sx->filename = getname_flags(path,
getname_statx_lookup_flags(sx->flags),
NULL);
getname_statx_lookup_flags(sx->flags));
if (IS_ERR(sx->filename)) {
int ret = PTR_ERR(sx->filename);

View File

@ -96,7 +96,7 @@ int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
ix->filename = getname_flags(path, LOOKUP_FOLLOW);
if (IS_ERR(ix->filename)) {
ret = PTR_ERR(ix->filename);
ix->filename = NULL;
@ -189,7 +189,7 @@ int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
ix->filename = getname_flags(path, LOOKUP_FOLLOW);
if (IS_ERR(ix->filename)) {
ret = PTR_ERR(ix->filename);
ix->filename = NULL;

View File

@ -903,7 +903,8 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
audit_mq_open(oflag, mode, attr);
if (IS_ERR(name = getname(u_name)))
name = getname(u_name);
if (IS_ERR(name))
return PTR_ERR(name);
fd = get_unused_fd_flags(O_CLOEXEC);

View File

@ -616,12 +616,6 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
exe_file = get_mm_exe_file(oldmm);
RCU_INIT_POINTER(mm->exe_file, exe_file);
/*
* We depend on the oldmm having properly denied write access to the
* exe_file already.
*/
if (exe_file && deny_write_access(exe_file))
pr_warn_once("deny_write_access() failed in %s\n", __func__);
}
#ifdef CONFIG_MMU
@ -1412,20 +1406,11 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
*/
old_exe_file = rcu_dereference_raw(mm->exe_file);
if (new_exe_file) {
/*
* We expect the caller (i.e., sys_execve) to already denied
* write access, so this is unlikely to fail.
*/
if (unlikely(deny_write_access(new_exe_file)))
return -EACCES;
if (new_exe_file)
get_file(new_exe_file);
}
rcu_assign_pointer(mm->exe_file, new_exe_file);
if (old_exe_file) {
allow_write_access(old_exe_file);
if (old_exe_file)
fput(old_exe_file);
}
return 0;
}
@ -1464,9 +1449,6 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
return ret;
}
ret = deny_write_access(new_exe_file);
if (ret)
return -EACCES;
get_file(new_exe_file);
/* set the new file */
@ -1475,10 +1457,8 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
rcu_assign_pointer(mm->exe_file, new_exe_file);
mmap_write_unlock(mm);
if (old_exe_file) {
allow_write_access(old_exe_file);
if (old_exe_file)
fput(old_exe_file);
}
return 0;
}

View File

@ -2000,9 +2000,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
if (!is_shmem) {
filemap_nr_thps_inc(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to ensure
* i_writecount is up to date and the update to nr_thps is
* visible. Ensures the page cache will be truncated if the
* Paired with the fence in do_dentry_open() -> get_write_access()
* to ensure i_writecount is up to date and the update to nr_thps
* is visible. Ensures the page cache will be truncated if the
* file is opened writable.
*/
smp_mb();
@ -2190,8 +2190,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to
* ensure the update to nr_thps is visible.
* Paired with the fence in do_dentry_open() -> get_write_access()
* to ensure the update to nr_thps is visible.
*/
smp_mb();
}

View File

@ -3177,10 +3177,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
struct folio *folio;
/*
* Good, the fallocate(2) manpage permits EINTR: we may have
* been interrupted because we are using up too much memory.
* Check for fatal signal so that we abort early in OOM
* situations. We don't want to abort in case of non-fatal
* signals as large fallocate can take noticeable time and
* e.g. periodic timers may result in fallocate constantly
* restarting.
*/
if (signal_pending(current))
if (fatal_signal_pending(current))
error = -EINTR;
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;