ntfs3 changes for 6.11-rc1

Added:
     simple fileattr has been implement.
 Fixed:
     transform resident to nonresident for compressed files;
     the format of the "nocase" mount option;
     getting file type;
     many other internal bugs.
 Refactored:
     unused function and macros have been removed;
     partial transition from page to folio (suggested by Matthew Wilcox);
     legacy ntfs support.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEh0DEKNP0I9IjwfWEqbAzH4MkB7YFAmaeJa0ACgkQqbAzH4Mk
 B7b0AQ/7BboEmiLXjgstyF6ZQcjT3NamN09OLOTu2SMlGhqcOjIn1xNNMCJPRdVR
 KJ+/LG1jYMqdyjsR7n5jt8Bxn4njLInPD3+X89NcKPVJxWFYQvg/5pzZw5YF3XGA
 G9f0ky1nvSrTLCTWuGktfkFJ5xBg9J7qkvnjiUsXUtPrf6SS2hH5z2uVbKMOfua9
 xSoe76hrxVxfpYTfk2ERtS8b1gcQUnT/JrAO88yboNQ8i4IVqdxYUklhHoXZnUHr
 YZHcg04SD9+oqrK5eR2xx2belBEVPVoD0BJZkLQyNNqBfVwNzPXc9fkkpyc9Co5R
 duRyXPzc9qYkFVCsGY9N73F+2lPmruGcz5TYgn1EOZUb00dOdZd2LF/J96lRmwpC
 hICtwbNgMWp2a9jAC1G0lNhmvHHiFZgR5o7N3YmkMUaCCcT1Z3jeCCQ2JGE0cNCf
 lwaMda6jxl1f7QLUP53qlgTb+RlVbVpPFD0JQ7mEeZyRuZepgsk6xkHQ2G18nicQ
 hmui6t+tQYMgoosjOAy8rlTtcK5A3gKUbYyeStCUNVANEM6XPUpjF0lRQuwN2bae
 p/nTjLBESp6ulq1MIFZ2h6G3OlX+vkezHULo80QWfoIEuMtkZ+MyypmNUDqlBE5n
 pDyE8bcqCVI4ODGzIj8aoZSDggQYM49SqDzmoNc7n2hhFkS8Drk=
 =sA5e
 -----END PGP SIGNATURE-----

Merge tag 'ntfs3_for_6.11' of https://github.com/Paragon-Software-Group/linux-ntfs3

Pull ntfs3 updates from Konstantin Komarov:
 "New code:
   - simple fileattr support

  Fixes:
   - transform resident to nonresident for compressed files
   - the format of the "nocase" mount option
   - getting file type
   - many other internal bugs

  Refactoring:
   - remove unused functions and macros
   - partial transition from page to folio (suggested by Matthew Wilcox)
   - legacy ntfs support"

* tag 'ntfs3_for_6.11' of https://github.com/Paragon-Software-Group/linux-ntfs3: (42 commits)
  fs/ntfs3: Fix formatting, change comments, renaming
  fs/ntfs3: Update log->page_{mask,bits} if log->page_size changed
  fs/ntfs3: Implement simple fileattr
  fs/ntfs3: Redesign legacy ntfs support
  fs/ntfs3: Use function file_inode to get inode from file
  fs/ntfs3: Minor ntfs_list_ea refactoring
  fs/ntfs3: Check more cases when directory is corrupted
  fs/ntfs3: Do copy_to_user out of run_lock
  fs/ntfs3: Keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP
  fs/ntfs3: Missed error return
  fs/ntfs3: Fix the format of the "nocase" mount option
  fs/ntfs3: Fix field-spanning write in INDEX_HDR
  ntfs3: Convert attr_wof_frame_info() to use a folio
  ntfs3: Convert ni_readpage_cmpr() to take a folio
  ntfs3: Convert ntfs_get_frame_pages() to use a folio
  ntfs3: Remove calls to set/clear the error flag
  ntfs3: Convert attr_make_nonresident to use a folio
  ntfs3: Convert attr_data_write_resident to use a folio
  ntfs3: Convert ntfs_write_end() to work on a folio
  ntfs3: Convert attr_data_read_resident() to take a folio
  ...
This commit is contained in:
Linus Torvalds 2024-07-22 10:50:18 -07:00
commit 5ea6d72489
14 changed files with 479 additions and 308 deletions

View File

@ -231,7 +231,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ntfs_sb_info *sbi;
struct ATTRIB *attr_s;
struct MFT_REC *rec;
u32 used, asize, rsize, aoff, align;
u32 used, asize, rsize, aoff;
bool is_data;
CLST len, alen;
char *next;
@ -252,10 +252,13 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
rsize = le32_to_cpu(attr->res.data_size);
is_data = attr->type == ATTR_DATA && !attr->name_len;
align = sbi->cluster_size;
if (is_attr_compressed(attr))
align <<= COMPRESSION_UNIT;
len = (rsize + align - 1) >> sbi->cluster_bits;
/* len - how many clusters required to store 'rsize' bytes */
if (is_attr_compressed(attr)) {
u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
} else {
len = bytes_to_cluster(sbi, rsize);
}
run_init(run);
@ -285,22 +288,21 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
if (err)
goto out2;
} else if (!page) {
char *kaddr;
struct address_space *mapping = ni->vfs_inode.i_mapping;
struct folio *folio;
page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
if (!page) {
err = -ENOMEM;
folio = __filemap_get_folio(
mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping_gfp_mask(mapping));
if (IS_ERR(folio)) {
err = PTR_ERR(folio);
goto out2;
}
kaddr = kmap_atomic(page);
memcpy(kaddr, data, rsize);
memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
set_page_dirty(page);
unlock_page(page);
put_page(page);
folio_fill_tail(folio, 0, data, rsize);
folio_mark_uptodate(folio);
folio_mark_dirty(folio);
folio_unlock(folio);
folio_put(folio);
}
}
@ -670,7 +672,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
goto undo_2;
}
if (!is_mft)
/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
if (ni->mi.rno != MFT_REC_MFT)
run_truncate_head(run, evcn + 1);
svcn = le64_to_cpu(attr->nres.svcn);
@ -972,6 +975,19 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
if (err)
goto out;
/* Check for compressed frame. */
err = attr_is_frame_compressed(ni, attr, vcn >> NTFS_LZNT_CUNIT, &hint);
if (err)
goto out;
if (hint) {
/* if frame is compressed - don't touch it. */
*lcn = COMPRESSED_LCN;
*len = hint;
err = -EOPNOTSUPP;
goto out;
}
if (!*len) {
if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
if (*lcn != SPARSE_LCN || !new)
@ -1223,11 +1239,12 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
goto out;
}
int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio)
{
u64 vbo;
struct ATTRIB *attr;
u32 data_size;
size_t len;
attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
if (!attr)
@ -1236,30 +1253,20 @@ int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
if (attr->non_res)
return E_NTFS_NONRESIDENT;
vbo = page->index << PAGE_SHIFT;
vbo = folio->index << PAGE_SHIFT;
data_size = le32_to_cpu(attr->res.data_size);
if (vbo < data_size) {
const char *data = resident_data(attr);
char *kaddr = kmap_atomic(page);
u32 use = data_size - vbo;
if (vbo > data_size)
len = 0;
else
len = min(data_size - vbo, folio_size(folio));
if (use > PAGE_SIZE)
use = PAGE_SIZE;
memcpy(kaddr, data + vbo, use);
memset(kaddr + use, 0, PAGE_SIZE - use);
kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
} else if (!PageUptodate(page)) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
}
folio_fill_tail(folio, 0, resident_data(attr) + vbo, len);
folio_mark_uptodate(folio);
return 0;
}
int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
{
u64 vbo;
struct mft_inode *mi;
@ -1275,17 +1282,13 @@ int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
return E_NTFS_NONRESIDENT;
}
vbo = page->index << PAGE_SHIFT;
vbo = folio->index << PAGE_SHIFT;
data_size = le32_to_cpu(attr->res.data_size);
if (vbo < data_size) {
char *data = resident_data(attr);
char *kaddr = kmap_atomic(page);
u32 use = data_size - vbo;
size_t len = min(data_size - vbo, folio_size(folio));
if (use > PAGE_SIZE)
use = PAGE_SIZE;
memcpy(data + vbo, kaddr, use);
kunmap_atomic(kaddr);
memcpy_from_folio(data + vbo, folio, 0, len);
mi->dirty = true;
}
ni->i_valid = data_size;
@ -1378,7 +1381,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
u32 voff;
u8 bytes_per_off;
char *addr;
struct page *page;
struct folio *folio;
int i, err;
__le32 *off32;
__le64 *off64;
@ -1423,18 +1426,18 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
wof_size = le64_to_cpu(attr->nres.data_size);
down_write(&ni->file.run_lock);
page = ni->file.offs_page;
if (!page) {
page = alloc_page(GFP_KERNEL);
if (!page) {
folio = ni->file.offs_folio;
if (!folio) {
folio = folio_alloc(GFP_KERNEL, 0);
if (!folio) {
err = -ENOMEM;
goto out;
}
page->index = -1;
ni->file.offs_page = page;
folio->index = -1;
ni->file.offs_folio = folio;
}
lock_page(page);
addr = page_address(page);
folio_lock(folio);
addr = folio_address(folio);
if (vbo[1]) {
voff = vbo[1] & (PAGE_SIZE - 1);
@ -1450,7 +1453,8 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
do {
pgoff_t index = vbo[i] >> PAGE_SHIFT;
if (index != page->index) {
if (index != folio->index) {
struct page *page = &folio->page;
u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
u64 to = min(from + PAGE_SIZE, wof_size);
@ -1463,10 +1467,10 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
err = ntfs_bio_pages(sbi, run, &page, 1, from,
to - from, REQ_OP_READ);
if (err) {
page->index = -1;
folio->index = -1;
goto out1;
}
page->index = index;
folio->index = index;
}
if (i) {
@ -1504,7 +1508,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
*ondisk_size = off[1] - off[0];
out1:
unlock_page(page);
folio_unlock(folio);
out:
up_write(&ni->file.run_lock);
return err;
@ -1722,6 +1726,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
attr_b->nres.total_size = cpu_to_le64(total_size);
inode_set_bytes(&ni->vfs_inode, total_size);
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
mi_b->dirty = true;
mark_inode_dirty(&ni->vfs_inode);
@ -2356,8 +2361,13 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
}
if (vbo > data_size) {
/* Insert range after the file size is not allowed. */
if (vbo >= data_size) {
/*
* Insert range after the file size is not allowed.
* If the offset is equal to or greater than the end of
* file, an error is returned. For such operations (i.e., inserting
* a hole at the end of file), ftruncate(2) should be used.
*/
return -EINVAL;
}

View File

@ -1382,7 +1382,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
if (err)
break;
return err;
bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
if (!bh)

View File

@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
}
static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
const struct NTFS_DE *e, u8 *name,
struct dir_context *ctx)
/*
* returns false if 'ctx' if full
*/
static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
struct ntfs_inode *ni, const struct NTFS_DE *e,
u8 *name, struct dir_context *ctx)
{
const struct ATTR_FILE_NAME *fname;
unsigned long ino;
@ -284,29 +287,29 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
fname = Add2Ptr(e, sizeof(struct NTFS_DE));
if (fname->type == FILE_NAME_DOS)
return 0;
return true;
if (!mi_is_ref(&ni->mi, &fname->home))
return 0;
return true;
ino = ino_get(&e->ref);
if (ino == MFT_REC_ROOT)
return 0;
return true;
/* Skip meta files. Unless option to show metafiles is set. */
if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
return 0;
return true;
if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
return 0;
return true;
name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
PATH_MAX);
if (name_len <= 0) {
ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
ino);
return 0;
return true;
}
/*
@ -326,7 +329,8 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
* It does additional locks/reads just to get the type of name.
* Should we use additional mount option to enable branch below?
*/
if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
if (((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) ||
fname->dup.ea_size) &&
ino != ni->mi.rno) {
struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
if (!IS_ERR_OR_NULL(inode)) {
@ -335,17 +339,20 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
}
}
return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
}
/*
* ntfs_read_hdr - Helper function for ntfs_readdir().
*
* returns 0 if ok.
* returns -EINVAL if directory is corrupted.
* returns +1 if 'ctx' is full.
*/
static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
u8 *name, struct dir_context *ctx)
{
int err;
const struct NTFS_DE *e;
u32 e_size;
u32 end = le32_to_cpu(hdr->used);
@ -353,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
for (;; off += e_size) {
if (off + sizeof(struct NTFS_DE) > end)
return -1;
return -EINVAL;
e = Add2Ptr(hdr, off);
e_size = le16_to_cpu(e->size);
if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
return -1;
return -EINVAL;
if (de_is_last(e))
return 0;
@ -368,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
continue;
if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
return -1;
return -EINVAL;
ctx->pos = vbo + off;
/* Submit the name to the filldir callback. */
err = ntfs_filldir(sbi, ni, e, name, ctx);
if (err)
return err;
if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) {
/* ctx is full. */
return +1;
}
}
}
@ -474,8 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
vbo = (u64)bit << index_bits;
if (vbo >= i_size) {
ntfs_inode_err(dir, "Looks like your dir is corrupt");
ctx->pos = eod;
err = -EINVAL;
goto out;
}
@ -498,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
__putname(name);
put_indx_node(node);
if (err == -ENOENT) {
if (err == 1) {
/* 'ctx' is full. */
err = 0;
} else if (err == -ENOENT) {
err = 0;
ctx->pos = pos;
} else if (err < 0) {
if (err == -EINVAL)
ntfs_inode_err(dir, "directory corrupted");
ctx->pos = eod;
}
return err;
@ -618,10 +631,12 @@ const struct file_operations ntfs_dir_operations = {
#endif
};
#if IS_ENABLED(CONFIG_NTFS_FS)
const struct file_operations ntfs_legacy_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = ntfs_readdir,
.open = ntfs_file_open,
};
#endif
// clang-format on

View File

@ -13,6 +13,7 @@
#include <linux/compat.h>
#include <linux/falloc.h>
#include <linux/fiemap.h>
#include <linux/fileattr.h>
#include "debug.h"
#include "ntfs.h"
@ -48,6 +49,65 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
return 0;
}
/*
* ntfs_fileattr_get - inode_operations::fileattr_get
*/
int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
struct ntfs_inode *ni = ntfs_i(inode);
u32 flags = 0;
if (inode->i_flags & S_IMMUTABLE)
flags |= FS_IMMUTABLE_FL;
if (inode->i_flags & S_APPEND)
flags |= FS_APPEND_FL;
if (is_compressed(ni))
flags |= FS_COMPR_FL;
if (is_encrypted(ni))
flags |= FS_ENCRYPT_FL;
fileattr_fill_flags(fa, flags);
return 0;
}
/*
* ntfs_fileattr_set - inode_operations::fileattr_set
*/
int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
struct fileattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 flags = fa->flags;
unsigned int new_fl = 0;
if (fileattr_has_fsx(fa))
return -EOPNOTSUPP;
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL))
return -EOPNOTSUPP;
if (flags & FS_IMMUTABLE_FL)
new_fl |= S_IMMUTABLE;
if (flags & FS_APPEND_FL)
new_fl |= S_APPEND;
inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
}
/*
* ntfs_ioctl - file_operations::unlocked_ioctl
*/
long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@ -77,20 +137,27 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct inode *inode = d_inode(path->dentry);
struct ntfs_inode *ni = ntfs_i(inode);
stat->result_mask |= STATX_BTIME;
stat->btime = ni->i_crtime;
stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
if (inode->i_flags & S_IMMUTABLE)
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (inode->i_flags & S_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (is_compressed(ni))
stat->attributes |= STATX_ATTR_COMPRESSED;
if (is_encrypted(ni))
stat->attributes |= STATX_ATTR_ENCRYPTED;
stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
generic_fillattr(idmap, request_mask, inode, stat);
stat->result_mask |= STATX_BTIME;
stat->btime = ni->i_crtime;
stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
return 0;
}
@ -196,9 +263,9 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
PAGE_SIZE;
iblock = page_off >> inode->i_blkbits;
folio = __filemap_get_folio(mapping, idx,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping_gfp_constraint(mapping, ~__GFP_FS));
folio = __filemap_get_folio(
mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping_gfp_constraint(mapping, ~__GFP_FS));
if (IS_ERR(folio))
return PTR_ERR(folio);
@ -253,8 +320,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
*/
static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct inode *inode = file_inode(file);
struct ntfs_inode *ni = ntfs_i(inode);
u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
bool rw = vma->vm_flags & VM_WRITE;
@ -299,10 +365,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
}
if (ni->i_valid < to) {
if (!inode_trylock(inode)) {
err = -EAGAIN;
goto out;
}
inode_lock(inode);
err = ntfs_extend_initialized_size(file, ni,
ni->i_valid, to);
inode_unlock(inode);
@ -431,7 +494,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
*/
static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
{
struct inode *inode = file->f_mapping->host;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
@ -744,7 +807,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct inode *inode = file_inode(file);
struct ntfs_inode *ni = ntfs_i(inode);
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
@ -781,7 +844,7 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct inode *inode = in->f_mapping->host;
struct inode *inode = file_inode(in);
struct ntfs_inode *ni = ntfs_i(inode);
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
@ -824,23 +887,25 @@ static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
*frame_uptodate = true;
for (npages = 0; npages < pages_per_frame; npages++, index++) {
struct page *page;
struct folio *folio;
page = find_or_create_page(mapping, index, gfp_mask);
if (!page) {
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
gfp_mask);
if (IS_ERR(folio)) {
while (npages--) {
page = pages[npages];
unlock_page(page);
put_page(page);
folio = page_folio(pages[npages]);
folio_unlock(folio);
folio_put(folio);
}
return -ENOMEM;
}
if (!PageUptodate(page))
if (!folio_test_uptodate(folio))
*frame_uptodate = false;
pages[npages] = page;
pages[npages] = &folio->page;
}
return 0;
@ -1075,8 +1140,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct inode *inode = file_inode(file);
ssize_t ret;
int err;
struct ntfs_inode *ni = ntfs_i(inode);
@ -1198,7 +1262,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
}
/*
* ntfs_fiemap - file_operations::fiemap
* ntfs_fiemap - inode_operations::fiemap
*/
int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
@ -1227,6 +1291,8 @@ const struct inode_operations ntfs_file_inode_operations = {
.get_acl = ntfs_get_acl,
.set_acl = ntfs_set_acl,
.fiemap = ntfs_fiemap,
.fileattr_get = ntfs_fileattr_get,
.fileattr_set = ntfs_fileattr_set,
};
const struct file_operations ntfs_file_operations = {
@ -1246,6 +1312,7 @@ const struct file_operations ntfs_file_operations = {
.release = ntfs_file_release,
};
#if IS_ENABLED(CONFIG_NTFS_FS)
const struct file_operations ntfs_legacy_file_operations = {
.llseek = generic_file_llseek,
.read_iter = ntfs_file_read_iter,
@ -1253,4 +1320,5 @@ const struct file_operations ntfs_legacy_file_operations = {
.open = ntfs_file_open,
.release = ntfs_file_release,
};
#endif
// clang-format on

View File

@ -122,10 +122,10 @@ void ni_clear(struct ntfs_inode *ni)
else {
run_close(&ni->file.run);
#ifdef CONFIG_NTFS3_LZX_XPRESS
if (ni->file.offs_page) {
if (ni->file.offs_folio) {
/* On-demand allocated page for offsets. */
put_page(ni->file.offs_page);
ni->file.offs_page = NULL;
folio_put(ni->file.offs_folio);
ni->file.offs_folio = NULL;
}
#endif
}
@ -1501,7 +1501,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (is_ext) {
if (flags & ATTR_FLAG_COMPRESSED)
attr->nres.c_unit = COMPRESSION_UNIT;
attr->nres.c_unit = NTFS_LZNT_CUNIT;
attr->nres.total_size = attr->nres.alloc_size;
}
@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size);
roff = le16_to_cpu(attr->nres.run_off);
if (roff > asize)
if (roff > asize) {
_ntfs_bad_inode(&ni->vfs_inode);
return -EINVAL;
}
/* run==1 means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
@ -1896,6 +1898,47 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
return REPARSE_LINK;
}
/*
* fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent
* but it accepts kernel address for fi_extents_start
*/
static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo,
u64 logical, u64 phys, u64 len, u32 flags)
{
struct fiemap_extent extent;
struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
/* only count the extents */
if (fieinfo->fi_extents_max == 0) {
fieinfo->fi_extents_mapped++;
return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
}
if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
return 1;
if (flags & FIEMAP_EXTENT_DELALLOC)
flags |= FIEMAP_EXTENT_UNKNOWN;
if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED)
flags |= FIEMAP_EXTENT_ENCODED;
if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE))
flags |= FIEMAP_EXTENT_NOT_ALIGNED;
memset(&extent, 0, sizeof(extent));
extent.fe_logical = logical;
extent.fe_physical = phys;
extent.fe_length = len;
extent.fe_flags = flags;
dest += fieinfo->fi_extents_mapped;
memcpy(dest, &extent, sizeof(extent));
fieinfo->fi_extents_mapped++;
if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
return 1;
return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
}
/*
* ni_fiemap - Helper for file_fiemap().
*
@ -1906,6 +1949,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
__u64 vbo, __u64 len)
{
int err = 0;
struct fiemap_extent __user *fe_u = fieinfo->fi_extents_start;
struct fiemap_extent *fe_k = NULL;
struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits;
struct runs_tree *run;
@ -1953,6 +1998,18 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
goto out;
}
/*
* To avoid lock problems replace pointer to user memory by pointer to kernel memory.
*/
fe_k = kmalloc_array(fieinfo->fi_extents_max,
sizeof(struct fiemap_extent),
GFP_NOFS | __GFP_ZERO);
if (!fe_k) {
err = -ENOMEM;
goto out;
}
fieinfo->fi_extents_start = fe_k;
end = vbo + len;
alloc_size = le64_to_cpu(attr->nres.alloc_size);
if (end > alloc_size)
@ -2041,8 +2098,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
if (vbo + dlen >= end)
flags |= FIEMAP_EXTENT_LAST;
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
flags);
err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, dlen,
flags);
if (err < 0)
break;
if (err == 1) {
@ -2062,7 +2120,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
if (vbo + bytes >= end)
flags |= FIEMAP_EXTENT_LAST;
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, bytes,
flags);
if (err < 0)
break;
if (err == 1) {
@ -2075,7 +2134,19 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
up_read(run_lock);
/*
* Copy to user memory out of lock
*/
if (copy_to_user(fe_u, fe_k,
fieinfo->fi_extents_max *
sizeof(struct fiemap_extent))) {
err = -EFAULT;
}
out:
/* Restore original pointer. */
fieinfo->fi_extents_start = fe_u;
kfree(fe_k);
return err;
}
@ -2085,12 +2156,12 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
* When decompressing, we typically obtain more than one page per reference.
* We inject the additional pages into the page cache.
*/
int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
{
int err;
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct address_space *mapping = page->mapping;
pgoff_t index = page->index;
struct address_space *mapping = folio->mapping;
pgoff_t index = folio->index;
u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
struct page **pages = NULL; /* Array of at most 16 pages. stack? */
u8 frame_bits;
@ -2100,7 +2171,8 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
struct page *pg;
if (vbo >= i_size_read(&ni->vfs_inode)) {
SetPageUptodate(page);
folio_zero_range(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
err = 0;
goto out;
}
@ -2124,7 +2196,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
goto out;
}
pages[idx] = page;
pages[idx] = &folio->page;
index = frame_vbo >> PAGE_SHIFT;
gfp_mask = mapping_gfp_mask(mapping);
@ -2143,9 +2215,6 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
out1:
if (err)
SetPageError(page);
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
if (i == idx || !pg)
@ -2157,7 +2226,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
out:
/* At this point, err contains 0 or -EIO depending on the "critical" page. */
kfree(pages);
unlock_page(page);
folio_unlock(folio);
return err;
}
@ -2362,9 +2431,9 @@ int ni_decompress_file(struct ntfs_inode *ni)
/* Clear cached flag. */
ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
if (ni->file.offs_page) {
put_page(ni->file.offs_page);
ni->file.offs_page = NULL;
if (ni->file.offs_folio) {
folio_put(ni->file.offs_folio);
ni->file.offs_folio = NULL;
}
mapping->a_ops = &ntfs_aops;
@ -2718,7 +2787,6 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
kunmap(pg);
ClearPageError(pg);
SetPageUptodate(pg);
}

View File

@ -724,7 +724,8 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
if (!rsize || rsize > bytes ||
rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
le16_to_cpu(rt->total) > ne || ff > ts - sizeof(__le32) ||
lf > ts - sizeof(__le32) ||
(ff && ff < sizeof(struct RESTART_TABLE)) ||
(lf && lf < sizeof(struct RESTART_TABLE))) {
return false;
@ -754,6 +755,9 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
return false;
off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
if (off > ts - sizeof(__le32))
return false;
}
return true;
@ -2992,7 +2996,7 @@ static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
if (is_ext) {
attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
if (is_attr_compressed(attr))
attr->nres.c_unit = COMPRESSION_UNIT;
attr->nres.c_unit = NTFS_LZNT_CUNIT;
attr->nres.run_off =
cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
@ -3722,6 +3726,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
u64 rec_lsn, checkpt_lsn = 0, rlsn = 0;
struct ATTR_NAME_ENTRY *attr_names = NULL;
u32 attr_names_bytes = 0;
u32 oatbl_bytes = 0;
struct RESTART_TABLE *dptbl = NULL;
struct RESTART_TABLE *trtbl = NULL;
const struct RESTART_TABLE *rt;
@ -3736,6 +3742,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
struct NTFS_RESTART *rst = NULL;
struct lcb *lcb = NULL;
struct OPEN_ATTR_ENRTY *oe;
struct ATTR_NAME_ENTRY *ane;
struct TRANSACTION_ENTRY *tr;
struct DIR_PAGE_ENTRY *dp;
u32 i, bytes_per_attr_entry;
@ -3915,6 +3922,9 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
goto out;
}
log->page_mask = log->page_size - 1;
log->page_bits = blksize_bits(log->page_size);
/* If the file size has shrunk then we won't mount it. */
if (log->l_size < le64_to_cpu(ra2->l_size)) {
err = -EINVAL;
@ -4104,7 +4114,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
/* Allocate and Read the Transaction Table. */
if (!rst->transact_table_len)
goto check_dirty_page_table;
goto check_dirty_page_table; /* reduce tab pressure. */
t64 = le64_to_cpu(rst->transact_table_lsn);
err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
@ -4144,7 +4154,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
check_dirty_page_table:
/* The next record back should be the Dirty Pages Table. */
if (!rst->dirty_pages_len)
goto check_attribute_names;
goto check_attribute_names; /* reduce tab pressure. */
t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
@ -4180,7 +4190,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
/* Convert Ra version '0' into version '1'. */
if (rst->major_ver)
goto end_conv_1;
goto end_conv_1; /* reduce tab pressure. */
dp = NULL;
while ((dp = enum_rstbl(dptbl, dp))) {
@ -4200,8 +4210,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
* remembering the oldest lsn values.
*/
if (sbi->cluster_size <= log->page_size)
goto trace_dp_table;
goto trace_dp_table; /* reduce tab pressure. */
dp = NULL;
while ((dp = enum_rstbl(dptbl, dp))) {
struct DIR_PAGE_ENTRY *next = dp;
@ -4222,7 +4231,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
check_attribute_names:
/* The next record should be the Attribute Names. */
if (!rst->attr_names_len)
goto check_attr_table;
goto check_attr_table; /* reduce tab pressure. */
t64 = le64_to_cpu(rst->attr_names_lsn);
err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
@ -4240,9 +4249,9 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
}
t32 = lrh_length(lrh);
rec_len -= t32;
attr_names_bytes = rec_len - t32;
attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
attr_names = kmemdup(Add2Ptr(lrh, t32), attr_names_bytes, GFP_NOFS);
if (!attr_names) {
err = -ENOMEM;
goto out;
@ -4254,7 +4263,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
check_attr_table:
/* The next record should be the attribute Table. */
if (!rst->open_attr_len)
goto check_attribute_names2;
goto check_attribute_names2; /* reduce tab pressure. */
t64 = le64_to_cpu(rst->open_attr_table_lsn);
err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
@ -4274,14 +4283,14 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
t16 = le16_to_cpu(lrh->redo_off);
rt = Add2Ptr(lrh, t16);
t32 = rec_len - t16;
oatbl_bytes = rec_len - t16;
if (!check_rstbl(rt, t32)) {
if (!check_rstbl(rt, oatbl_bytes)) {
err = -EINVAL;
goto out;
}
oatbl = kmemdup(rt, t32, GFP_NOFS);
oatbl = kmemdup(rt, oatbl_bytes, GFP_NOFS);
if (!oatbl) {
err = -ENOMEM;
goto out;
@ -4314,17 +4323,40 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
lcb = NULL;
check_attribute_names2:
if (rst->attr_names_len && oatbl) {
struct ATTR_NAME_ENTRY *ane = attr_names;
while (ane->off) {
if (attr_names && oatbl) {
off = 0;
for (;;) {
/* Check we can use attribute name entry 'ane'. */
static_assert(sizeof(*ane) == 4);
if (off + sizeof(*ane) > attr_names_bytes) {
/* just ignore the rest. */
break;
}
ane = Add2Ptr(attr_names, off);
t16 = le16_to_cpu(ane->off);
if (!t16) {
/* this is the only valid exit. */
break;
}
/* Check we can use open attribute entry 'oe'. */
if (t16 + sizeof(*oe) > oatbl_bytes) {
/* just ignore the rest. */
break;
}
/* TODO: Clear table on exit! */
oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
oe = Add2Ptr(oatbl, t16);
t16 = le16_to_cpu(ane->name_bytes);
off += t16 + sizeof(*ane);
if (off > attr_names_bytes) {
/* just ignore the rest. */
break;
}
oe->name_len = t16 / sizeof(short);
oe->ptr = ane->name;
oe->is_attr_name = 2;
ane = Add2Ptr(ane,
sizeof(struct ATTR_NAME_ENTRY) + t16);
}
}
@ -4520,7 +4552,6 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
}
}
goto next_log_record_analyze;
;
}
case OpenNonresidentAttribute:
@ -4659,7 +4690,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
* table are not empty.
*/
if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
goto end_reply;
goto end_replay;
sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
if (is_ro)
@ -5088,7 +5119,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
end_reply:
end_replay:
err = 0;
if (is_ro)

View File

@ -2650,8 +2650,8 @@ int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
{
int err;
struct ATTRIB *attr;
u32 uni_bytes;
struct ntfs_inode *ni = sbi->volume.ni;
const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
/* Allocate PATH_MAX bytes. */
struct cpu_str *uni = __getname();
@ -2663,7 +2663,8 @@ int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
if (err < 0)
goto out;
if (uni->len > max_ulen) {
uni_bytes = uni->len * sizeof(u16);
if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
ntfs_warn(sbi->sb, "new label is too long");
err = -EFBIG;
goto out;
@ -2674,13 +2675,13 @@ int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
/* Ignore any errors. */
ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
0, &attr, NULL, NULL);
err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
NULL, NULL);
if (err < 0)
goto unlock_out;
/* write new label in on-disk struct. */
memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
memcpy(resident_data(attr), uni->name, uni_bytes);
/* update cached value of current label. */
if (len >= ARRAY_SIZE(sbi->volume.label))

View File

@ -978,7 +978,7 @@ static struct indx_node *indx_new(struct ntfs_index *indx,
hdr->used =
cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
de_set_vbn_le(e, *sub_vbn);
hdr->flags = 1;
hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES;
} else {
e->size = cpu_to_le16(sizeof(struct NTFS_DE));
hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
@ -1683,7 +1683,7 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
hdr->flags = 1;
hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES;
hdr->used = hdr->total =
cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));

View File

@ -18,7 +18,7 @@
#include "ntfs_fs.h"
/*
* ntfs_read_mft - Read record and parses MFT.
* ntfs_read_mft - Read record and parse MFT.
*/
static struct inode *ntfs_read_mft(struct inode *inode,
const struct cpu_str *name,
@ -441,10 +441,9 @@ static struct inode *ntfs_read_mft(struct inode *inode,
* Usually a hard links to directories are disabled.
*/
inode->i_op = &ntfs_dir_inode_operations;
if (is_legacy_ntfs(inode->i_sb))
inode->i_fop = &ntfs_legacy_dir_operations;
else
inode->i_fop = &ntfs_dir_operations;
inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
&ntfs_legacy_dir_operations :
&ntfs_dir_operations;
ni->i_valid = 0;
} else if (S_ISLNK(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
@ -454,10 +453,9 @@ static struct inode *ntfs_read_mft(struct inode *inode,
} else if (S_ISREG(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_file_inode_operations;
if (is_legacy_ntfs(inode->i_sb))
inode->i_fop = &ntfs_legacy_file_operations;
else
inode->i_fop = &ntfs_file_operations;
inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
&ntfs_legacy_file_operations :
&ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
if (ino != MFT_REC_MFT)
@ -580,10 +578,11 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
bh->b_blocknr = RESIDENT_LCN;
bh->b_size = block_size;
if (!folio) {
/* direct io (read) or bmap call */
err = 0;
} else {
ni_lock(ni);
err = attr_data_read_resident(ni, &folio->page);
err = attr_data_read_resident(ni, folio);
ni_unlock(ni);
if (!err)
@ -710,25 +709,24 @@ static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
static int ntfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
int err;
struct address_space *mapping = page->mapping;
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
if (is_resident(ni)) {
ni_lock(ni);
err = attr_data_read_resident(ni, page);
err = attr_data_read_resident(ni, folio);
ni_unlock(ni);
if (err != E_NTFS_NONRESIDENT) {
unlock_page(page);
folio_unlock(folio);
return err;
}
}
if (is_compressed(ni)) {
ni_lock(ni);
err = ni_readpage_cmpr(ni, page);
err = ni_readpage_cmpr(ni, folio);
ni_unlock(ni);
return err;
}
@ -872,7 +870,7 @@ static int ntfs_resident_writepage(struct folio *folio,
return -EIO;
ni_lock(ni);
ret = attr_data_write_resident(ni, &folio->page);
ret = attr_data_write_resident(ni, folio);
ni_unlock(ni);
if (ret != E_NTFS_NONRESIDENT)
@ -914,24 +912,25 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
*pagep = NULL;
if (is_resident(ni)) {
struct page *page =
grab_cache_page_write_begin(mapping, pos >> PAGE_SHIFT);
struct folio *folio = __filemap_get_folio(
mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
if (!page) {
err = -ENOMEM;
if (IS_ERR(folio)) {
err = PTR_ERR(folio);
goto out;
}
ni_lock(ni);
err = attr_data_read_resident(ni, page);
err = attr_data_read_resident(ni, folio);
ni_unlock(ni);
if (!err) {
*pagep = page;
*pagep = &folio->page;
goto out;
}
unlock_page(page);
put_page(page);
folio_unlock(folio);
folio_put(folio);
if (err != E_NTFS_NONRESIDENT)
goto out;
@ -950,6 +949,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct page *page, void *fsdata)
{
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
u64 valid = ni->i_valid;
@ -958,26 +958,26 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
if (is_resident(ni)) {
ni_lock(ni);
err = attr_data_write_resident(ni, page);
err = attr_data_write_resident(ni, folio);
ni_unlock(ni);
if (!err) {
struct buffer_head *head = folio_buffers(folio);
dirty = true;
/* Clear any buffers in page. */
if (page_has_buffers(page)) {
struct buffer_head *head, *bh;
/* Clear any buffers in folio. */
if (head) {
struct buffer_head *bh = head;
bh = head = page_buffers(page);
do {
clear_buffer_dirty(bh);
clear_buffer_mapped(bh);
set_buffer_uptodate(bh);
} while (head != (bh = bh->b_this_page));
}
SetPageUptodate(page);
folio_mark_uptodate(folio);
err = copied;
}
unlock_page(page);
put_page(page);
folio_unlock(folio);
folio_put(folio);
} else {
err = generic_write_end(file, mapping, pos, len, copied, page,
fsdata);
@ -1093,33 +1093,31 @@ int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
if (!ret && i2)
ret = writeback_inode(i2);
if (!ret)
ret = sync_blockdev_nowait(sb->s_bdev);
ret = filemap_flush(sb->s_bdev_file->f_mapping);
return ret;
}
int inode_write_data(struct inode *inode, const void *data, size_t bytes)
/*
* Helper function to read file.
*/
int inode_read_data(struct inode *inode, void *data, size_t bytes)
{
pgoff_t idx;
struct address_space *mapping = inode->i_mapping;
/* Write non resident data. */
for (idx = 0; bytes; idx++) {
size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
struct page *page = ntfs_map_page(inode->i_mapping, idx);
struct page *page = read_mapping_page(mapping, idx, NULL);
void *kaddr;
if (IS_ERR(page))
return PTR_ERR(page);
lock_page(page);
WARN_ON(!PageUptodate(page));
ClearPageUptodate(page);
kaddr = kmap_atomic(page);
memcpy(data, kaddr, op);
kunmap_atomic(kaddr);
memcpy(page_address(page), data, op);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
ntfs_unmap_page(page);
put_page(page);
bytes -= op;
data = Add2Ptr(data, PAGE_SIZE);
@ -1508,7 +1506,7 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
attr->flags = ATTR_FLAG_COMPRESSED;
attr->nres.c_unit = COMPRESSION_UNIT;
attr->nres.c_unit = NTFS_LZNT_CUNIT;
asize = SIZEOF_NONRESIDENT_EX + 8;
} else {
attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
@ -1559,7 +1557,7 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
/*
* Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
* It is good idea to keep extened attributes resident.
* It is good idea to keep extended attributes resident.
*/
if (asize + t16 + 0x78 + 8 > sbi->record_size) {
CLST alen;
@ -1628,10 +1626,9 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
if (S_ISDIR(mode)) {
inode->i_op = &ntfs_dir_inode_operations;
if (is_legacy_ntfs(inode->i_sb))
inode->i_fop = &ntfs_legacy_dir_operations;
else
inode->i_fop = &ntfs_dir_operations;
inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
&ntfs_legacy_dir_operations :
&ntfs_dir_operations;
} else if (S_ISLNK(mode)) {
inode->i_op = &ntfs_link_inode_operations;
inode->i_fop = NULL;
@ -1640,10 +1637,9 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
inode_nohighmem(inode);
} else if (S_ISREG(mode)) {
inode->i_op = &ntfs_file_inode_operations;
if (is_legacy_ntfs(inode->i_sb))
inode->i_fop = &ntfs_legacy_file_operations;
else
inode->i_fop = &ntfs_file_operations;
inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
&ntfs_legacy_file_operations :
&ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
init_rwsem(&ni->file.run_lock);
@ -1668,7 +1664,9 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
* The packed size of extended attribute is stored in direntry too.
* 'fname' here points to inside new_de.
*/
ntfs_save_wsl_perm(inode, &fname->dup.ea_size);
err = ntfs_save_wsl_perm(inode, &fname->dup.ea_size);
if (err)
goto out6;
/*
* update ea_size in file_name attribute too.
@ -1712,6 +1710,12 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
goto out2;
out6:
attr = ni_find_attr(ni, NULL, NULL, ATTR_EA, NULL, 0, NULL, NULL);
if (attr && attr->non_res) {
/* Delete ATTR_EA, if non-resident. */
attr_set_size(ni, ATTR_EA, NULL, 0, NULL, 0, NULL, false, NULL);
}
if (rp_inserted)
ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
@ -2133,5 +2137,6 @@ const struct address_space_operations ntfs_aops = {
const struct address_space_operations ntfs_aops_cmpr = {
.read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
.dirty_folio = block_dirty_folio,
};
// clang-format on

View File

@ -112,9 +112,7 @@ static int ntfs_create(struct mnt_idmap *idmap, struct inode *dir,
}
/*
* ntfs_mknod
*
* inode_operations::mknod
* ntfs_mknod - inode_operations::mknod
*/
static int ntfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
@ -509,6 +507,8 @@ const struct inode_operations ntfs_dir_inode_operations = {
.getattr = ntfs_getattr,
.listxattr = ntfs_listxattr,
.fiemap = ntfs_fiemap,
.fileattr_get = ntfs_fileattr_get,
.fileattr_set = ntfs_fileattr_set,
};
const struct inode_operations ntfs_special_inode_operations = {

View File

@ -82,9 +82,6 @@ typedef u32 CLST;
#define RESIDENT_LCN ((CLST)-2)
#define COMPRESSED_LCN ((CLST)-3)
#define COMPRESSION_UNIT 4
#define COMPRESS_MAX_CLUSTER 0x1000
enum RECORD_NUM {
MFT_REC_MFT = 0,
MFT_REC_MIRR = 1,
@ -696,14 +693,15 @@ static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
offsetof(struct ATTR_FILE_NAME, name) + \
NTFS_NAME_LEN * sizeof(short), 8)
#define NTFS_INDEX_HDR_HAS_SUBNODES cpu_to_le32(1)
struct INDEX_HDR {
__le32 de_off; // 0x00: The offset from the start of this structure
// to the first NTFS_DE.
__le32 used; // 0x04: The size of this structure plus all
// entries (quad-word aligned).
__le32 total; // 0x08: The allocated size of for this structure plus all entries.
u8 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory.
u8 res[3];
__le32 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory.
//
// de_off + used <= total
@ -751,7 +749,7 @@ static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
{
return hdr->flags & 1;
return hdr->flags & NTFS_INDEX_HDR_HAS_SUBNODES;
}
struct INDEX_BUFFER {
@ -771,7 +769,7 @@ static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
{
return !(ib->ihdr.flags & 1);
return !(ib->ihdr.flags & NTFS_INDEX_HDR_HAS_SUBNODES);
}
/* Index root structure ( 0x90 ). */
@ -1002,9 +1000,6 @@ struct REPARSE_POINT {
static_assert(sizeof(struct REPARSE_POINT) == 0x18);
/* Maximum allowed size of the reparse data. */
#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE (16 * 1024)
/*
* The value of the following constant needs to satisfy the following
* conditions:

View File

@ -383,7 +383,7 @@ struct ntfs_inode {
struct rw_semaphore run_lock;
struct runs_tree run;
#ifdef CONFIG_NTFS3_LZX_XPRESS
struct page *offs_page;
struct folio *offs_folio;
#endif
} file;
};
@ -434,8 +434,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
struct ATTRIB **ret);
int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
CLST *len, bool *new, bool zero);
int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio);
int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio);
int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, struct runs_tree *run,
CLST vcn);
@ -497,6 +497,9 @@ extern const struct file_operations ntfs_dir_operations;
extern const struct file_operations ntfs_legacy_dir_operations;
/* Globals from file.c */
int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
struct fileattr *fa);
int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags);
int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
@ -564,7 +567,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint);
#define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
__u64 vbo, __u64 len);
int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page);
int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio);
int ni_decompress_file(struct ntfs_inode *ni);
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
u32 pages_per_frame);
@ -716,7 +719,7 @@ int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
struct inode *i2);
int inode_write_data(struct inode *inode, const void *data, size_t bytes);
int inode_read_data(struct inode *inode, void *data, size_t bytes);
int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const struct cpu_str *uni,
umode_t mode, dev_t dev, const char *symname, u32 size,
@ -910,22 +913,6 @@ static inline bool ntfs_is_meta_file(struct ntfs_sb_info *sbi, CLST rno)
rno == sbi->usn_jrnl_no;
}
static inline void ntfs_unmap_page(struct page *page)
{
kunmap(page);
put_page(page);
}
static inline struct page *ntfs_map_page(struct address_space *mapping,
unsigned long index)
{
struct page *page = read_mapping_page(mapping, index, NULL);
if (!IS_ERR(page))
kmap(page);
return page;
}
static inline size_t wnd_zone_bit(const struct wnd_bitmap *wnd)
{
return wnd->zone_bit;
@ -1156,6 +1143,13 @@ static inline void le64_sub_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) - val);
}
#if IS_ENABLED(CONFIG_NTFS_FS)
bool is_legacy_ntfs(struct super_block *sb);
#else
static inline bool is_legacy_ntfs(struct super_block *sb)
{
return false;
}
#endif
#endif /* _LINUX_NTFS3_NTFS_FS_H */

View File

@ -275,7 +275,7 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_flag_no("acl", Opt_acl),
fsparam_string("iocharset", Opt_iocharset),
fsparam_flag_no("prealloc", Opt_prealloc),
fsparam_flag_no("nocase", Opt_nocase),
fsparam_flag_no("case", Opt_nocase),
{}
};
// clang-format on
@ -464,7 +464,7 @@ static int ntfs3_volinfo(struct seq_file *m, void *o)
struct super_block *sb = m->private;
struct ntfs_sb_info *sbi = sb->s_fs_info;
seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n",
seq_printf(m, "ntfs%d.%d\n%u\n%zu\n%zu\n%zu\n%s\n%s\n",
sbi->volume.major_ver, sbi->volume.minor_ver,
sbi->cluster_size, sbi->used.bitmap.nbits,
sbi->mft.bitmap.nbits,
@ -1159,7 +1159,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
CLST vcn, lcn, len;
struct ATTRIB *attr;
const struct VOLUME_INFO *info;
u32 idx, done, bytes;
u32 done, bytes;
struct ATTR_DEF_ENTRY *t;
u16 *shared;
struct MFT_REF ref;
@ -1201,7 +1201,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
/*
* Load $Volume. This should be done before $LogFile
* 'cause 'sbi->volume.ni' is used 'ntfs_set_state'.
* 'cause 'sbi->volume.ni' is used in 'ntfs_set_state'.
*/
ref.low = cpu_to_le32(MFT_REC_VOL);
ref.seq = cpu_to_le16(MFT_REC_VOL);
@ -1431,31 +1431,22 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto put_inode_out;
}
for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
unsigned long tail = bytes - done;
struct page *page = ntfs_map_page(inode->i_mapping, idx);
/* Read the entire file. */
err = inode_read_data(inode, sbi->def_table, bytes);
if (err) {
ntfs_err(sb, "Failed to read $AttrDef (%d).", err);
goto put_inode_out;
}
if (IS_ERR(page)) {
err = PTR_ERR(page);
ntfs_err(sb, "Failed to read $AttrDef (%d).", err);
goto put_inode_out;
}
memcpy(Add2Ptr(t, done), page_address(page),
min(PAGE_SIZE, tail));
ntfs_unmap_page(page);
if (!idx && ATTR_STD != t->type) {
ntfs_err(sb, "$AttrDef is corrupted.");
err = -EINVAL;
goto put_inode_out;
}
if (ATTR_STD != t->type) {
ntfs_err(sb, "$AttrDef is corrupted.");
err = -EINVAL;
goto put_inode_out;
}
t += 1;
sbi->def_entries = 1;
done = sizeof(struct ATTR_DEF_ENTRY);
sbi->reparse.max_size = MAXIMUM_REPARSE_DATA_BUFFER_SIZE;
sbi->ea_max_size = 0x10000; /* default formatter value */
while (done + sizeof(struct ATTR_DEF_ENTRY) <= bytes) {
u32 t32 = le32_to_cpu(t->type);
@ -1491,27 +1482,22 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto put_inode_out;
}
for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
const __le16 *src;
u16 *dst = Add2Ptr(sbi->upcase, idx << PAGE_SHIFT);
struct page *page = ntfs_map_page(inode->i_mapping, idx);
if (IS_ERR(page)) {
err = PTR_ERR(page);
ntfs_err(sb, "Failed to read $UpCase (%d).", err);
goto put_inode_out;
}
src = page_address(page);
/* Read the entire file. */
err = inode_read_data(inode, sbi->upcase, 0x10000 * sizeof(short));
if (err) {
ntfs_err(sb, "Failed to read $UpCase (%d).", err);
goto put_inode_out;
}
#ifdef __BIG_ENDIAN
for (i = 0; i < PAGE_SIZE / sizeof(u16); i++)
{
const __le16 *src = sbi->upcase;
u16 *dst = sbi->upcase;
for (i = 0; i < 0x10000; i++)
*dst++ = le16_to_cpu(*src++);
#else
memcpy(dst, src, PAGE_SIZE);
#endif
ntfs_unmap_page(page);
}
#endif
shared = ntfs_set_shared(sbi->upcase, 0x10000 * sizeof(short));
if (shared && sbi->upcase != shared) {
@ -1847,10 +1833,8 @@ bool is_legacy_ntfs(struct super_block *sb)
#else
static inline void register_as_ntfs_legacy(void) {}
static inline void unregister_as_ntfs_legacy(void) {}
bool is_legacy_ntfs(struct super_block *sb) { return false; }
#endif
// clang-format on
static int __init init_ntfs_fs(void)
@ -1876,8 +1860,7 @@ static int __init init_ntfs_fs(void)
ntfs_inode_cachep = kmem_cache_create(
"ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
(SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT),
init_once);
(SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT), init_once);
if (!ntfs_inode_cachep) {
err = -ENOMEM;
goto out1;

View File

@ -195,10 +195,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
{
const struct EA_INFO *info;
struct EA_FULL *ea_all = NULL;
const struct EA_FULL *ea;
u32 off, size;
int err;
int ea_size;
size_t ret;
err = ntfs_read_ea(ni, &ea_all, 0, &info);
@ -212,16 +210,18 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
/* Enumerate all xattrs. */
ret = 0;
for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
ea = Add2Ptr(ea_all, off);
ea_size = unpacked_ea_size(ea);
off = 0;
while (off + sizeof(struct EA_FULL) < size) {
const struct EA_FULL *ea = Add2Ptr(ea_all, off);
int ea_size = unpacked_ea_size(ea);
u8 name_len = ea->name_len;
if (!ea->name_len)
if (!name_len)
break;
if (ea->name_len > ea_size) {
if (name_len > ea_size) {
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
err = -EINVAL; /* corrupted fs */
err = -EINVAL; /* corrupted fs. */
break;
}
@ -230,16 +230,17 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
if (off + ea_size > size)
break;
if (ret + ea->name_len + 1 > bytes_per_buffer) {
if (ret + name_len + 1 > bytes_per_buffer) {
err = -ERANGE;
goto out;
}
memcpy(buffer + ret, ea->name, ea->name_len);
buffer[ret + ea->name_len] = 0;
memcpy(buffer + ret, ea->name, name_len);
buffer[ret + name_len] = 0;
}
ret += ea->name_len + 1;
ret += name_len + 1;
off += ea_size;
}
out: