linux-stable/fs/f2fs/verity.c
Linus Torvalds 79952bdcbc f2fs-6.12-rc1
In this series, the main changes include 1) converting major IO paths to use
 folio, and 2) adding various knobs to control GC more flexibly for Zoned
 devices. In addition, there are several patches to address corner cases of
 atomic file operations and better support for file pinning on zoned device.
 
 Enhancement:
  - add knobs to tune foreground/background GCs for Zoned devices
  - convert IO paths to use folio
  - reduce expensive checkpoint trigger frequency
  - allow F2FS_IPU_NOCACHE for pinned file
  - forcibly migrate to secure space for zoned device file pinning
  - get rid of buffer_head use
  - add write priority option based on zone UFS
  - get rid of online repair on corrupted directory
 
 Bug fix:
  - fix to don't panic system for no free segment fault injection
  - fix to don't set SB_RDONLY in f2fs_handle_critical_error()
  - avoid unused block when dio write in LFS mode
  - compress: don't redirty sparse cluster during {,de}compress
  - check discard support for conventional zones
  - atomic: prevent atomic file from being dirtied before commit
  - atomic: fix to check atomic_file in f2fs ioctl interfaces
  - atomic: fix to forbid dio in atomic_file
  - atomic: fix to truncate pagecache before on-disk metadata truncation
  - atomic: create COW inode from parent dentry
  - atomic: fix to avoid racing w/ GC
  - atomic: require FMODE_WRITE for atomic write ioctls
  - fix to wait page writeback before setting gcing flag
  - fix to avoid racing in between read and OPU dio write, dio completion
  - fix several potential integer overflows in file offsets and dir_block_index
  - fix to avoid use-after-free in f2fs_stop_gc_thread()
 
 As usual, there are several code clean-ups and refactorings.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE00UqedjCtOrGVvQiQBSofoJIUNIFAmbyJn8ACgkQQBSofoJI
 UNJz9Q/+LDDJjD6xh0Fs6H2NeltFNbuNmS79kN5oG0xfjIAiKXE1lsw2n2gwrDKv
 EHKUPa2D4Rztckp8EFF6/st2SXVXH5U7YY2z5jkIUFccbeod+CrK9AGHjJe54iXL
 D0ulbgE2jR8uuwAkNEooNJK1a5ZhZLVy+fXknNIgKoqx31YYE+mKOJaaJFbCxvNT
 grZdH9ApweJB8L4A4ebwIWyBy8Bh4lhr2d6ngsx6HA5TFA2Ay0V9kaoZrLPZvJhv
 3qJ+xu3oeGJbP4e5h5g9omafBskI1pfEE6/sY94o1Zy5Ahx3iCR6U/qehtyyU3TF
 5QLoMXTvIz0MkRuBaW1XxVDpFevVzUfYmbLycuxjArBtjHnvsdh12DKT1Pk5BDZ4
 GgkUyt4pK4PYyEZFtayCleLZljSRzKzi+Y9XEs82z01s41mvx71kz44bR8SPcb1Q
 D4VOJld4O4qMmNrZhhwW8sj4UiDVgliURwmpiZwz9zT9fXU/ZPD1gThcfSWJZ/53
 rrx87e1Bnyk/cMuN/gxEdVV20nggxng4hl2oDcUzBBV1G1R9I3RZJWQt/YFXpB0O
 Whv5pJkV8BZXFWoRmm9cpWe0MslRRhsKBPzcKmlowy/lYdgjpQTmh7TSJ1Teh+2Y
 r77XI31Y/ACaKDJsRmUVbtqdM3N/88N97Fa52wOByK0PjMbgM0E=
 =EKzY
 -----END PGP SIGNATURE-----

Merge tag 'f2fs-for-6.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "The main changes include converting major IO paths to use folio, and
  adding various knobs to control GC more flexibly for Zoned devices.

  In addition, there are several patches to address corner cases of
  atomic file operations and better support for file pinning on zoned
  device.

  Enhancement:
   - add knobs to tune foreground/background GCs for Zoned devices
   - convert IO paths to use folio
   - reduce expensive checkpoint trigger frequency
   - allow F2FS_IPU_NOCACHE for pinned file
   - forcibly migrate to secure space for zoned device file pinning
   - get rid of buffer_head use
   - add write priority option based on zone UFS
   - get rid of online repair on corrupted directory

  Bug fixes:
   - fix to don't panic system for no free segment fault injection
   - fix to don't set SB_RDONLY in f2fs_handle_critical_error()
   - avoid unused block when dio write in LFS mode
   - compress: don't redirty sparse cluster during {,de}compress
   - check discard support for conventional zones
   - atomic: prevent atomic file from being dirtied before commit
   - atomic: fix to check atomic_file in f2fs ioctl interfaces
   - atomic: fix to forbid dio in atomic_file
   - atomic: fix to truncate pagecache before on-disk metadata truncation
   - atomic: create COW inode from parent dentry
   - atomic: fix to avoid racing w/ GC
   - atomic: require FMODE_WRITE for atomic write ioctls
   - fix to wait page writeback before setting gcing flag
   - fix to avoid racing in between read and OPU dio write, dio completion
   - fix several potential integer overflows in file offsets and dir_block_index
   - fix to avoid use-after-free in f2fs_stop_gc_thread()

  As usual, there are several code clean-ups and refactorings"

* tag 'f2fs-for-6.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (60 commits)
  f2fs: allow F2FS_IPU_NOCACHE for pinned file
  f2fs: forcibly migrate to secure space for zoned device file pinning
  f2fs: remove unused parameters
  f2fs: fix to don't panic system for no free segment fault injection
  f2fs: fix to don't set SB_RDONLY in f2fs_handle_critical_error()
  f2fs: add valid block ratio not to do excessive GC for one time GC
  f2fs: create gc_no_zoned_gc_percent and gc_boost_zoned_gc_percent
  f2fs: do FG_GC when GC boosting is required for zoned devices
  f2fs: increase BG GC migration window granularity when boosted for zoned devices
  f2fs: add reserved_segments sysfs node
  f2fs: introduce migration_window_granularity
  f2fs: make BG GC more aggressive for zoned devices
  f2fs: avoid unused block when dio write in LFS mode
  f2fs: fix to check atomic_file in f2fs ioctl interfaces
  f2fs: get rid of online repaire on corrupted directory
  f2fs: prevent atomic file from being dirtied before commit
  f2fs: get rid of page->index
  f2fs: convert read_node_page() to use folio
  f2fs: convert __write_node_page() to use folio
  f2fs: convert f2fs_write_data_page() to use folio
  ...
2024-09-24 15:12:38 -07:00

296 lines
8.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/verity.c: fs-verity support for f2fs
*
* Copyright 2019 Google LLC
*/
/*
* Implementation of fsverity_operations for f2fs.
*
* Like ext4, f2fs stores the verity metadata (Merkle tree and
* fsverity_descriptor) past the end of the file, starting at the first 64K
* boundary beyond i_size. This approach works because (a) verity files are
* readonly, and (b) pages fully beyond i_size aren't visible to userspace but
* can be read/written internally by f2fs with only some relatively small
* changes to f2fs. Extended attributes cannot be used because (a) f2fs limits
* the total size of an inode's xattr entries to 4096 bytes, which wouldn't be
* enough for even a single Merkle tree block, and (b) f2fs encryption doesn't
* encrypt xattrs, yet the verity metadata *must* be encrypted when the file is
* because it contains hashes of the plaintext data.
*
* Using a 64K boundary rather than a 4K one keeps things ready for
* architectures with 64K pages, and it doesn't necessarily waste space on-disk
* since there can be a hole between i_size and the start of the Merkle tree.
*/
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "xattr.h"
#define F2FS_VERIFY_VER (1)
static inline loff_t f2fs_verity_metadata_pos(const struct inode *inode)
{
return round_up(inode->i_size, 65536);
}
/*
* Read some verity metadata from the inode. __vfs_read() can't be used because
* we need to read beyond i_size.
*/
static int pagecache_read(struct inode *inode, void *buf, size_t count,
loff_t pos)
{
while (count) {
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
NULL);
if (IS_ERR(page))
return PTR_ERR(page);
memcpy_from_page(buf, page, offset_in_page(pos), n);
put_page(page);
buf += n;
pos += n;
count -= n;
}
return 0;
}
/*
* Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
* kernel_write() can't be used because the file descriptor is readonly.
*/
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
if (pos + count > F2FS_BLK_TO_BYTES(max_file_blocks(inode)))
return -EFBIG;
while (count) {
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct folio *folio;
void *fsdata = NULL;
int res;
res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res)
return res;
memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0)
return res;
if (res != n)
return -EIO;
buf += n;
pos += n;
count -= n;
}
return 0;
}
/*
* Format of f2fs verity xattr. This points to the location of the verity
* descriptor within the file data rather than containing it directly because
* the verity descriptor *must* be encrypted when f2fs encryption is used. But,
* f2fs encryption does not encrypt xattrs.
*/
struct fsverity_descriptor_location {
__le32 version;
__le32 size;
__le64 pos;
};
static int f2fs_begin_enable_verity(struct file *filp)
{
struct inode *inode = file_inode(filp);
int err;
if (f2fs_verity_in_progress(inode))
return -EBUSY;
if (f2fs_is_atomic_file(inode))
return -EOPNOTSUPP;
/*
* Since the file was opened readonly, we have to initialize the quotas
* here and not rely on ->open() doing it. This must be done before
* evicting the inline data.
*/
err = f2fs_dquot_initialize(inode);
if (err)
return err;
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
set_inode_flag(inode, FI_VERITY_IN_PROGRESS);
return 0;
}
static int f2fs_end_enable_verity(struct file *filp, const void *desc,
size_t desc_size, u64 merkle_tree_size)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
struct fsverity_descriptor_location dloc = {
.version = cpu_to_le32(F2FS_VERIFY_VER),
.size = cpu_to_le32(desc_size),
.pos = cpu_to_le64(desc_pos),
};
int err = 0, err2 = 0;
/*
* If an error already occurred (which fs/verity/ signals by passing
* desc == NULL), then only clean-up is needed.
*/
if (desc == NULL)
goto cleanup;
/* Append the verity descriptor. */
err = pagecache_write(inode, desc, desc_size, desc_pos);
if (err)
goto cleanup;
/*
* Write all pages (both data and verity metadata). Note that this must
* happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
* i_size won't be written properly. For crash consistency, this also
* must happen before the verity inode flag gets persisted.
*/
err = filemap_write_and_wait(inode->i_mapping);
if (err)
goto cleanup;
/* Set the verity xattr. */
err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
NULL, XATTR_CREATE);
if (err)
goto cleanup;
/* Finally, set the verity inode flag. */
file_set_verity(inode);
f2fs_set_inode_flags(inode);
f2fs_mark_inode_dirty_sync(inode, true);
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
return 0;
cleanup:
/*
* Verity failed to be enabled, so clean up by truncating any verity
* metadata that was written beyond i_size (both from cache and from
* disk) and clearing FI_VERITY_IN_PROGRESS.
*
* Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
* from re-instantiating cached pages we are truncating (since unlike
* normal file accesses, garbage collection isn't limited by i_size).
*/
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
truncate_inode_pages(inode->i_mapping, inode->i_size);
err2 = f2fs_truncate(inode);
if (err2) {
f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
err2);
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
return err ?: err2;
}
static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
size_t buf_size)
{
struct fsverity_descriptor_location dloc;
int res;
u32 size;
u64 pos;
/* Get the descriptor location */
res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_VERITY,
F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc), NULL);
if (res < 0 && res != -ERANGE)
return res;
if (res != sizeof(dloc) || dloc.version != cpu_to_le32(F2FS_VERIFY_VER)) {
f2fs_warn(F2FS_I_SB(inode), "unknown verity xattr format");
return -EINVAL;
}
size = le32_to_cpu(dloc.size);
pos = le64_to_cpu(dloc.pos);
/* Get the descriptor */
if (pos + size < pos ||
pos + size > F2FS_BLK_TO_BYTES(max_file_blocks(inode)) ||
pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
f2fs_handle_error(F2FS_I_SB(inode),
ERROR_CORRUPTED_VERITY_XATTR);
return -EFSCORRUPTED;
}
if (buf_size) {
if (size > buf_size)
return -ERANGE;
res = pagecache_read(inode, buf, size, pos);
if (res)
return res;
}
return size;
}
static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
struct folio *folio;
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
if (!IS_ERR(folio))
folio_put(folio);
else if (num_ra_pages > 1)
page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
folio = read_mapping_folio(inode->i_mapping, index, NULL);
if (IS_ERR(folio))
return ERR_CAST(folio);
}
return folio_file_page(folio, index);
}
static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,
u64 pos, unsigned int size)
{
pos += f2fs_verity_metadata_pos(inode);
return pagecache_write(inode, buf, size, pos);
}
const struct fsverity_operations f2fs_verityops = {
.begin_enable_verity = f2fs_begin_enable_verity,
.end_enable_verity = f2fs_end_enable_verity,
.get_verity_descriptor = f2fs_get_verity_descriptor,
.read_merkle_tree_page = f2fs_read_merkle_tree_page,
.write_merkle_tree_block = f2fs_write_merkle_tree_block,
};