mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
vfs-6.12.folio
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZuQEvgAKCRCRxhvAZXjc ou77AQD3U1KjbdgzbUi6kaUmiiWOPhfYTlm8mho8dBjqvTCB+AD/XTWSFCWWhHB4 KyQZTbjRD81xmVNbKjASazp0EA6Ahwc= =gIsD -----END PGP SIGNATURE----- Merge tag 'vfs-6.12.folio' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs Pull vfs folio updates from Christian Brauner: "This contains work to port write_begin and write_end to rely on folios for various filesystems. This converts ocfs2, vboxfs, orangefs, jffs2, hostfs, fuse, f2fs, ecryptfs, ntfs3, nilfs2, reiserfs, minixfs, qnx6, sysv, ufs, and squashfs. After this series lands a bunch of the filesystems in this list do not mention struct page anymore" * tag 'vfs-6.12.folio' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs: (61 commits) Squashfs: Ensure all readahead pages have been used Squashfs: Rewrite and update squashfs_readahead_fragment() to not use page->index Squashfs: Update squashfs_readpage_block() to not use page->index Squashfs: Update squashfs_readahead() to not use page->index Squashfs: Update page_actor to not use page->index jffs2: Use a folio in jffs2_garbage_collect_dnode() jffs2: Convert jffs2_do_readpage_nolock to take a folio buffer: Convert __block_write_begin() to take a folio ocfs2: Convert ocfs2_write_zero_page to use a folio fs: Convert aops->write_begin to take a folio fs: Convert aops->write_end to take a folio vboxsf: Use a folio in vboxsf_write_end() orangefs: Convert orangefs_write_begin() to use a folio orangefs: Convert orangefs_write_end() to use a folio jffs2: Convert jffs2_write_begin() to use a folio jffs2: Convert jffs2_write_end() to use a folio hostfs: Convert hostfs_write_end() to use a folio fuse: Convert fuse_write_begin() to use a folio fuse: Convert fuse_write_end() to use a folio f2fs: Convert f2fs_write_begin() to use a folio ...
This commit is contained in:
commit
2775df6e5e
@ -251,10 +251,10 @@ prototypes::
|
||||
void (*readahead)(struct readahead_control *);
|
||||
int (*write_begin)(struct file *, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata);
|
||||
struct folio **foliop, void **fsdata);
|
||||
int (*write_end)(struct file *, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata);
|
||||
struct folio *folio, void *fsdata);
|
||||
sector_t (*bmap)(struct address_space *, sector_t);
|
||||
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
|
||||
bool (*release_folio)(struct folio *, gfp_t);
|
||||
@ -280,7 +280,7 @@ read_folio: yes, unlocks shared
|
||||
writepages:
|
||||
dirty_folio: maybe
|
||||
readahead: yes, unlocks shared
|
||||
write_begin: locks the page exclusive
|
||||
write_begin: locks the folio exclusive
|
||||
write_end: yes, unlocks exclusive
|
||||
bmap:
|
||||
invalidate_folio: yes exclusive
|
||||
|
@ -810,7 +810,7 @@ cache in your filesystem. The following members are defined:
|
||||
struct page **pagep, void **fsdata);
|
||||
int (*write_end)(struct file *, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata);
|
||||
struct folio *folio, void *fsdata);
|
||||
sector_t (*bmap)(struct address_space *, sector_t);
|
||||
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
|
||||
bool (*release_folio)(struct folio *, gfp_t);
|
||||
@ -926,12 +926,12 @@ cache in your filesystem. The following members are defined:
|
||||
(if they haven't been read already) so that the updated blocks
|
||||
can be written out properly.
|
||||
|
||||
The filesystem must return the locked pagecache page for the
|
||||
specified offset, in ``*pagep``, for the caller to write into.
|
||||
The filesystem must return the locked pagecache folio for the
|
||||
specified offset, in ``*foliop``, for the caller to write into.
|
||||
|
||||
It must be able to cope with short writes (where the length
|
||||
passed to write_begin is greater than the number of bytes copied
|
||||
into the page).
|
||||
into the folio).
|
||||
|
||||
A void * may be returned in fsdata, which then gets passed into
|
||||
write_end.
|
||||
@ -944,8 +944,8 @@ cache in your filesystem. The following members are defined:
|
||||
called. len is the original len passed to write_begin, and
|
||||
copied is the amount that was able to be copied.
|
||||
|
||||
The filesystem must take care of unlocking the page and
|
||||
releasing it refcount, and updating i_size.
|
||||
The filesystem must take care of unlocking the folio,
|
||||
decrementing its refcount, and updating i_size.
|
||||
|
||||
Returns < 0 on failure, otherwise the number of bytes (<=
|
||||
'copied') that were able to be copied into pagecache.
|
||||
|
12
block/fops.c
12
block/fops.c
@ -451,20 +451,20 @@ static void blkdev_readahead(struct readahead_control *rac)
|
||||
}
|
||||
|
||||
static int blkdev_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
|
||||
return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
|
||||
}
|
||||
|
||||
static int blkdev_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied, struct page *page,
|
||||
loff_t pos, unsigned len, unsigned copied, struct folio *folio,
|
||||
void *fsdata)
|
||||
{
|
||||
int ret;
|
||||
ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -424,7 +424,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
const struct address_space_operations *aops = mapping->a_ops;
|
||||
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
|
||||
u64 remain, offset;
|
||||
u64 remain;
|
||||
loff_t pos;
|
||||
unsigned int pg;
|
||||
|
||||
/* Caller already validated user args */
|
||||
@ -457,12 +458,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
|
||||
*/
|
||||
|
||||
remain = arg->size;
|
||||
offset = arg->offset;
|
||||
pg = offset_in_page(offset);
|
||||
pos = arg->offset;
|
||||
pg = offset_in_page(pos);
|
||||
|
||||
do {
|
||||
unsigned int len, unwritten;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *data, *vaddr;
|
||||
int err;
|
||||
char __maybe_unused c;
|
||||
@ -480,21 +481,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = aops->write_begin(obj->base.filp, mapping, offset, len,
|
||||
&page, &data);
|
||||
err = aops->write_begin(obj->base.filp, mapping, pos, len,
|
||||
&folio, &data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vaddr = kmap_local_page(page);
|
||||
vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos));
|
||||
pagefault_disable();
|
||||
unwritten = __copy_from_user_inatomic(vaddr + pg,
|
||||
user_data,
|
||||
len);
|
||||
unwritten = __copy_from_user_inatomic(vaddr, user_data, len);
|
||||
pagefault_enable();
|
||||
kunmap_local(vaddr);
|
||||
|
||||
err = aops->write_end(obj->base.filp, mapping, offset, len,
|
||||
len - unwritten, page, data);
|
||||
err = aops->write_end(obj->base.filp, mapping, pos, len,
|
||||
len - unwritten, folio, data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@ -504,7 +503,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
|
||||
|
||||
remain -= len;
|
||||
user_data += len;
|
||||
offset += len;
|
||||
pos += len;
|
||||
pg = 0;
|
||||
} while (remain);
|
||||
|
||||
@ -660,7 +659,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct file *file;
|
||||
const struct address_space_operations *aops;
|
||||
resource_size_t offset;
|
||||
loff_t pos;
|
||||
int err;
|
||||
|
||||
GEM_WARN_ON(IS_DGFX(i915));
|
||||
@ -672,29 +671,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
|
||||
|
||||
file = obj->base.filp;
|
||||
aops = file->f_mapping->a_ops;
|
||||
offset = 0;
|
||||
pos = 0;
|
||||
do {
|
||||
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
|
||||
struct page *page;
|
||||
void *pgdata, *vaddr;
|
||||
struct folio *folio;
|
||||
void *fsdata;
|
||||
|
||||
err = aops->write_begin(file, file->f_mapping, offset, len,
|
||||
&page, &pgdata);
|
||||
err = aops->write_begin(file, file->f_mapping, pos, len,
|
||||
&folio, &fsdata);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
vaddr = kmap(page);
|
||||
memcpy(vaddr, data, len);
|
||||
kunmap(page);
|
||||
memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len);
|
||||
|
||||
err = aops->write_end(file, file->f_mapping, offset, len, len,
|
||||
page, pgdata);
|
||||
err = aops->write_end(file, file->f_mapping, pos, len, len,
|
||||
folio, fsdata);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
size -= len;
|
||||
data += len;
|
||||
offset += len;
|
||||
pos += len;
|
||||
} while (size);
|
||||
|
||||
return obj;
|
||||
|
@ -55,12 +55,11 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int adfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*pagep = NULL;
|
||||
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
|
||||
ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
|
||||
adfs_get_block,
|
||||
&ADFS_I(mapping->host)->mmu_private);
|
||||
if (unlikely(ret))
|
||||
|
@ -417,12 +417,11 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
||||
static int affs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*pagep = NULL;
|
||||
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
|
||||
ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
|
||||
affs_get_block,
|
||||
&AFFS_I(mapping->host)->mmu_private);
|
||||
if (unlikely(ret))
|
||||
@ -433,12 +432,12 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int affs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned int len, unsigned int copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
int ret;
|
||||
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
|
||||
/* Clear Archived bit on file writes, as AmigaOS would do */
|
||||
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
|
||||
@ -648,7 +647,7 @@ static int affs_read_folio_ofs(struct file *file, struct folio *folio)
|
||||
|
||||
static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct folio *folio;
|
||||
@ -671,7 +670,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
return 0;
|
||||
@ -687,9 +686,8 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
|
||||
|
||||
static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct inode *inode = mapping->host;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct buffer_head *bh, *prev_bh;
|
||||
@ -882,14 +880,14 @@ affs_truncate(struct inode *inode)
|
||||
|
||||
if (inode->i_size > AFFS_I(inode)->mmu_private) {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
loff_t isize = inode->i_size;
|
||||
int res;
|
||||
|
||||
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata);
|
||||
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
|
||||
if (!res)
|
||||
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
|
||||
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
|
||||
else
|
||||
inode->i_size = AFFS_I(inode)->mmu_private;
|
||||
mark_inode_dirty(inode);
|
||||
|
@ -659,7 +659,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
|
||||
|
||||
int bch2_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct bch_inode_info *inode = to_bch_ei(mapping->host);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
@ -728,12 +728,11 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
|
||||
goto err;
|
||||
}
|
||||
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
return 0;
|
||||
err:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
*pagep = NULL;
|
||||
err_unlock:
|
||||
bch2_pagecache_add_put(inode);
|
||||
kfree(res);
|
||||
@ -743,12 +742,11 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
int bch2_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct bch_inode_info *inode = to_bch_ei(mapping->host);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch2_folio_reservation *res = fsdata;
|
||||
struct folio *folio = page_folio(page);
|
||||
unsigned offset = pos - folio_pos(folio);
|
||||
|
||||
lockdep_assert_held(&inode->v.i_rwsem);
|
||||
|
@ -10,10 +10,10 @@ int bch2_read_folio(struct file *, struct folio *);
|
||||
int bch2_writepages(struct address_space *, struct writeback_control *);
|
||||
void bch2_readahead(struct readahead_control *);
|
||||
|
||||
int bch2_write_begin(struct file *, struct address_space *, loff_t,
|
||||
unsigned, struct page **, void **);
|
||||
int bch2_write_begin(struct file *, struct address_space *, loff_t pos,
|
||||
unsigned len, struct folio **, void **);
|
||||
int bch2_write_end(struct file *, struct address_space *, loff_t,
|
||||
unsigned, unsigned, struct page *, void *);
|
||||
unsigned len, unsigned copied, struct folio *, void *);
|
||||
|
||||
ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
|
||||
|
||||
|
@ -172,11 +172,11 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int bfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, bfs_get_block);
|
||||
if (unlikely(ret))
|
||||
bfs_write_failed(mapping, pos + len);
|
||||
|
||||
|
63
fs/buffer.c
63
fs/buffer.c
@ -2164,11 +2164,10 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
||||
return err;
|
||||
}
|
||||
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
|
||||
get_block_t *get_block)
|
||||
{
|
||||
return __block_write_begin_int(page_folio(page), pos, len, get_block,
|
||||
NULL);
|
||||
return __block_write_begin_int(folio, pos, len, get_block, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(__block_write_begin);
|
||||
|
||||
@ -2218,33 +2217,33 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
|
||||
* The filesystem needs to handle block truncation upon failure.
|
||||
*/
|
||||
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
||||
struct page **pagep, get_block_t *get_block)
|
||||
struct folio **foliop, get_block_t *get_block)
|
||||
{
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int status;
|
||||
|
||||
page = grab_cache_page_write_begin(mapping, index);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
status = __block_write_begin(page, pos, len, get_block);
|
||||
status = __block_write_begin_int(folio, pos, len, get_block, NULL);
|
||||
if (unlikely(status)) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
page = NULL;
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
folio = NULL;
|
||||
}
|
||||
|
||||
*pagep = page;
|
||||
*foliop = folio;
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(block_write_begin);
|
||||
|
||||
int block_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
size_t start = pos - folio_pos(folio);
|
||||
|
||||
if (unlikely(copied < len)) {
|
||||
@ -2276,19 +2275,19 @@ EXPORT_SYMBOL(block_write_end);
|
||||
|
||||
int generic_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
loff_t old_size = inode->i_size;
|
||||
bool i_size_changed = false;
|
||||
|
||||
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
|
||||
/*
|
||||
* No need to use i_size_read() here, the i_size cannot change under us
|
||||
* because we hold i_rwsem.
|
||||
*
|
||||
* But it's important to update i_size while still holding page lock:
|
||||
* But it's important to update i_size while still holding folio lock:
|
||||
* page writeout could otherwise come in and zero beyond i_size.
|
||||
*/
|
||||
if (pos + copied > inode->i_size) {
|
||||
@ -2296,8 +2295,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
||||
i_size_changed = true;
|
||||
}
|
||||
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
if (old_size < pos)
|
||||
pagecache_isize_extended(inode, old_size, pos);
|
||||
@ -2463,7 +2462,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
const struct address_space_operations *aops = mapping->a_ops;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
int err;
|
||||
|
||||
@ -2471,11 +2470,11 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
|
||||
err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
|
||||
err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
|
||||
BUG_ON(err > 0);
|
||||
|
||||
out:
|
||||
@ -2489,7 +2488,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
||||
struct inode *inode = mapping->host;
|
||||
const struct address_space_operations *aops = mapping->a_ops;
|
||||
unsigned int blocksize = i_blocksize(inode);
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
pgoff_t index, curidx;
|
||||
loff_t curpos;
|
||||
@ -2508,12 +2507,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
||||
len = PAGE_SIZE - zerofrom;
|
||||
|
||||
err = aops->write_begin(file, mapping, curpos, len,
|
||||
&page, &fsdata);
|
||||
&folio, &fsdata);
|
||||
if (err)
|
||||
goto out;
|
||||
zero_user(page, zerofrom, len);
|
||||
folio_zero_range(folio, offset_in_folio(folio, curpos), len);
|
||||
err = aops->write_end(file, mapping, curpos, len, len,
|
||||
page, fsdata);
|
||||
folio, fsdata);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
BUG_ON(err != len);
|
||||
@ -2541,12 +2540,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
||||
len = offset - zerofrom;
|
||||
|
||||
err = aops->write_begin(file, mapping, curpos, len,
|
||||
&page, &fsdata);
|
||||
&folio, &fsdata);
|
||||
if (err)
|
||||
goto out;
|
||||
zero_user(page, zerofrom, len);
|
||||
folio_zero_range(folio, offset_in_folio(folio, curpos), len);
|
||||
err = aops->write_end(file, mapping, curpos, len, len,
|
||||
page, fsdata);
|
||||
folio, fsdata);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
BUG_ON(err != len);
|
||||
@ -2562,7 +2561,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
||||
*/
|
||||
int cont_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata,
|
||||
struct folio **foliop, void **fsdata,
|
||||
get_block_t *get_block, loff_t *bytes)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
@ -2580,7 +2579,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
|
||||
(*bytes)++;
|
||||
}
|
||||
|
||||
return block_write_begin(mapping, pos, len, pagep, get_block);
|
||||
return block_write_begin(mapping, pos, len, foliop, get_block);
|
||||
}
|
||||
EXPORT_SYMBOL(cont_write_begin);
|
||||
|
||||
|
@ -1508,20 +1508,18 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
|
||||
*/
|
||||
static int ceph_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct folio *folio = NULL;
|
||||
int r;
|
||||
|
||||
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
|
||||
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
folio_wait_private_2(folio); /* [DEPRECATED] */
|
||||
WARN_ON_ONCE(!folio_test_locked(folio));
|
||||
*pagep = &folio->page;
|
||||
folio_wait_private_2(*foliop); /* [DEPRECATED] */
|
||||
WARN_ON_ONCE(!folio_test_locked(*foliop));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1531,9 +1529,8 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
|
||||
*/
|
||||
static int ceph_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *subpage, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(subpage);
|
||||
struct inode *inode = file_inode(file);
|
||||
struct ceph_client *cl = ceph_inode_to_client(inode);
|
||||
bool check_cap = false;
|
||||
|
@ -234,17 +234,17 @@ static int ecryptfs_read_folio(struct file *file, struct folio *folio)
|
||||
/*
|
||||
* Called with lower inode mutex held.
|
||||
*/
|
||||
static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
|
||||
static int fill_zeros_to_end_of_page(struct folio *folio, unsigned int to)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
int end_byte_in_page;
|
||||
|
||||
if ((i_size_read(inode) / PAGE_SIZE) != page->index)
|
||||
if ((i_size_read(inode) / PAGE_SIZE) != folio->index)
|
||||
goto out;
|
||||
end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
|
||||
if (to > end_byte_in_page)
|
||||
end_byte_in_page = to;
|
||||
zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
|
||||
folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
@ -255,7 +255,7 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
|
||||
* @mapping: The eCryptfs object
|
||||
* @pos: The file offset at which to start writing
|
||||
* @len: Length of the write
|
||||
* @pagep: Pointer to return the page
|
||||
* @foliop: Pointer to return the folio
|
||||
* @fsdata: Pointer to return fs data (unused)
|
||||
*
|
||||
* This function must zero any hole we create
|
||||
@ -265,38 +265,39 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
|
||||
static int ecryptfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
loff_t prev_page_end_size;
|
||||
int rc = 0;
|
||||
|
||||
page = grab_cache_page_write_begin(mapping, index);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
*pagep = page;
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
*foliop = folio;
|
||||
|
||||
prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
|
||||
if (!PageUptodate(page)) {
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
struct ecryptfs_crypt_stat *crypt_stat =
|
||||
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
|
||||
|
||||
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
|
||||
rc = ecryptfs_read_lower_page_segment(
|
||||
page, index, 0, PAGE_SIZE, mapping->host);
|
||||
&folio->page, index, 0, PAGE_SIZE, mapping->host);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "%s: Error attempting to read "
|
||||
"lower page segment; rc = [%d]\n",
|
||||
__func__, rc);
|
||||
ClearPageUptodate(page);
|
||||
folio_clear_uptodate(folio);
|
||||
goto out;
|
||||
} else
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
|
||||
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
|
||||
rc = ecryptfs_copy_up_encrypted_with_header(
|
||||
page, crypt_stat);
|
||||
&folio->page, crypt_stat);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "%s: Error attempting "
|
||||
"to copy the encrypted content "
|
||||
@ -304,46 +305,46 @@ static int ecryptfs_write_begin(struct file *file,
|
||||
"inserting the metadata from "
|
||||
"the xattr into the header; rc "
|
||||
"= [%d]\n", __func__, rc);
|
||||
ClearPageUptodate(page);
|
||||
folio_clear_uptodate(folio);
|
||||
goto out;
|
||||
}
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
} else {
|
||||
rc = ecryptfs_read_lower_page_segment(
|
||||
page, index, 0, PAGE_SIZE,
|
||||
&folio->page, index, 0, PAGE_SIZE,
|
||||
mapping->host);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "%s: Error reading "
|
||||
"page; rc = [%d]\n",
|
||||
__func__, rc);
|
||||
ClearPageUptodate(page);
|
||||
folio_clear_uptodate(folio);
|
||||
goto out;
|
||||
}
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
} else {
|
||||
if (prev_page_end_size
|
||||
>= i_size_read(page->mapping->host)) {
|
||||
zero_user(page, 0, PAGE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
>= i_size_read(mapping->host)) {
|
||||
folio_zero_range(folio, 0, PAGE_SIZE);
|
||||
folio_mark_uptodate(folio);
|
||||
} else if (len < PAGE_SIZE) {
|
||||
rc = ecryptfs_decrypt_page(page);
|
||||
rc = ecryptfs_decrypt_page(&folio->page);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "%s: Error decrypting "
|
||||
"page at index [%ld]; "
|
||||
"rc = [%d]\n",
|
||||
__func__, page->index, rc);
|
||||
ClearPageUptodate(page);
|
||||
__func__, folio->index, rc);
|
||||
folio_clear_uptodate(folio);
|
||||
goto out;
|
||||
}
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* If creating a page or more of holes, zero them out via truncate.
|
||||
* Note, this will increase i_size. */
|
||||
if (index != 0) {
|
||||
if (prev_page_end_size > i_size_read(page->mapping->host)) {
|
||||
if (prev_page_end_size > i_size_read(mapping->host)) {
|
||||
rc = ecryptfs_truncate(file->f_path.dentry,
|
||||
prev_page_end_size);
|
||||
if (rc) {
|
||||
@ -359,12 +360,11 @@ static int ecryptfs_write_begin(struct file *file,
|
||||
* of page? Zero it out. */
|
||||
if ((i_size_read(mapping->host) == prev_page_end_size)
|
||||
&& (pos != 0))
|
||||
zero_user(page, 0, PAGE_SIZE);
|
||||
folio_zero_range(folio, 0, PAGE_SIZE);
|
||||
out:
|
||||
if (unlikely(rc)) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
*pagep = NULL;
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@ -457,13 +457,13 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
|
||||
* @pos: The file position
|
||||
* @len: The length of the data (unused)
|
||||
* @copied: The amount of data copied
|
||||
* @page: The eCryptfs page
|
||||
* @folio: The eCryptfs folio
|
||||
* @fsdata: The fsdata (unused)
|
||||
*/
|
||||
static int ecryptfs_write_end(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
unsigned from = pos & (PAGE_SIZE - 1);
|
||||
@ -476,8 +476,8 @@ static int ecryptfs_write_end(struct file *file,
|
||||
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
|
||||
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
|
||||
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
|
||||
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0,
|
||||
to);
|
||||
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
|
||||
&folio->page, 0, to);
|
||||
if (!rc) {
|
||||
rc = copied;
|
||||
fsstack_copy_inode_size(ecryptfs_inode,
|
||||
@ -485,21 +485,21 @@ static int ecryptfs_write_end(struct file *file,
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
if (!PageUptodate(page)) {
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
if (copied < PAGE_SIZE) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
/* Fills in zeros if 'to' goes beyond inode size */
|
||||
rc = fill_zeros_to_end_of_page(page, to);
|
||||
rc = fill_zeros_to_end_of_page(folio, to);
|
||||
if (rc) {
|
||||
ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
|
||||
"zeros in page with index = [0x%.16lx]\n", index);
|
||||
goto out;
|
||||
}
|
||||
rc = ecryptfs_encrypt_page(page);
|
||||
rc = ecryptfs_encrypt_page(&folio->page);
|
||||
if (rc) {
|
||||
ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
|
||||
"index [0x%.16lx])\n", index);
|
||||
@ -518,8 +518,8 @@ static int ecryptfs_write_end(struct file *file,
|
||||
else
|
||||
rc = copied;
|
||||
out:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -535,20 +535,20 @@ static int exfat_file_zeroed_range(struct file *file, loff_t start, loff_t end)
|
||||
|
||||
while (start < end) {
|
||||
u32 zerofrom, len;
|
||||
struct page *page = NULL;
|
||||
struct folio *folio;
|
||||
|
||||
zerofrom = start & (PAGE_SIZE - 1);
|
||||
len = PAGE_SIZE - zerofrom;
|
||||
if (start + len > end)
|
||||
len = end - start;
|
||||
|
||||
err = ops->write_begin(file, mapping, start, len, &page, NULL);
|
||||
err = ops->write_begin(file, mapping, start, len, &folio, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
zero_user_segment(page, zerofrom, zerofrom + len);
|
||||
folio_zero_range(folio, offset_in_folio(folio, start), len);
|
||||
|
||||
err = ops->write_end(file, mapping, start, len, len, page, NULL);
|
||||
err = ops->write_end(file, mapping, start, len, len, folio, NULL);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
start += len;
|
||||
|
@ -448,12 +448,11 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int exfat_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned int len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*pagep = NULL;
|
||||
ret = block_write_begin(mapping, pos, len, pagep, exfat_get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
|
||||
|
||||
if (ret < 0)
|
||||
exfat_write_failed(mapping, pos+len);
|
||||
@ -463,13 +462,13 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int exfat_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned int len, unsigned int copied,
|
||||
struct page *pagep, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct exfat_inode_info *ei = EXFAT_I(inode);
|
||||
int err;
|
||||
|
||||
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
|
||||
err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
|
||||
if (ei->i_size_aligned < i_size_read(inode)) {
|
||||
exfat_fs_error(inode->i_sb,
|
||||
|
@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
struct inode *dir = mapping->host;
|
||||
|
||||
inode_inc_iversion(dir);
|
||||
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
||||
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||
|
||||
if (pos+len > dir->i_size) {
|
||||
i_size_write(dir, pos+len);
|
||||
@ -434,7 +434,7 @@ int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino)
|
||||
|
||||
static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(&folio->page, pos, len, ext2_get_block);
|
||||
return __block_write_begin(folio, pos, len, ext2_get_block);
|
||||
}
|
||||
|
||||
static int ext2_handle_dirsync(struct inode *dir)
|
||||
|
@ -916,11 +916,11 @@ static void ext2_readahead(struct readahead_control *rac)
|
||||
|
||||
static int
|
||||
ext2_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, ext2_get_block);
|
||||
if (ret < 0)
|
||||
ext2_write_failed(mapping, pos + len);
|
||||
return ret;
|
||||
@ -928,11 +928,11 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int ext2_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
if (ret < len)
|
||||
ext2_write_failed(mapping, pos + len);
|
||||
return ret;
|
||||
|
@ -3563,13 +3563,13 @@ int ext4_readpage_inline(struct inode *inode, struct folio *folio);
|
||||
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
|
||||
struct inode *inode,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep);
|
||||
struct folio **foliop);
|
||||
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
||||
unsigned copied, struct folio *folio);
|
||||
extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
|
||||
struct inode *inode,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep,
|
||||
struct folio **foliop,
|
||||
void **fsdata);
|
||||
extern int ext4_try_add_inline_entry(handle_t *handle,
|
||||
struct ext4_filename *fname,
|
||||
|
@ -601,10 +601,10 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
|
||||
goto out;
|
||||
|
||||
if (ext4_should_dioread_nolock(inode)) {
|
||||
ret = __block_write_begin(&folio->page, from, to,
|
||||
ret = __block_write_begin(folio, from, to,
|
||||
ext4_get_block_unwritten);
|
||||
} else
|
||||
ret = __block_write_begin(&folio->page, from, to, ext4_get_block);
|
||||
ret = __block_write_begin(folio, from, to, ext4_get_block);
|
||||
|
||||
if (!ret && ext4_should_journal_data(inode)) {
|
||||
ret = ext4_walk_page_buffers(handle, inode,
|
||||
@ -660,7 +660,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
|
||||
int ext4_try_to_write_inline_data(struct address_space *mapping,
|
||||
struct inode *inode,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep)
|
||||
struct folio **foliop)
|
||||
{
|
||||
int ret;
|
||||
handle_t *handle;
|
||||
@ -708,7 +708,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
|
||||
goto out;
|
||||
}
|
||||
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
down_read(&EXT4_I(inode)->xattr_sem);
|
||||
if (!ext4_has_inline_data(inode)) {
|
||||
ret = 0;
|
||||
@ -856,7 +856,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __block_write_begin(&folio->page, 0, inline_size,
|
||||
ret = __block_write_begin(folio, 0, inline_size,
|
||||
ext4_da_get_block_prep);
|
||||
if (ret) {
|
||||
up_read(&EXT4_I(inode)->xattr_sem);
|
||||
@ -891,7 +891,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
|
||||
int ext4_da_write_inline_data_begin(struct address_space *mapping,
|
||||
struct inode *inode,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep,
|
||||
struct folio **foliop,
|
||||
void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
@ -954,7 +954,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
|
||||
goto out_release_page;
|
||||
|
||||
up_read(&EXT4_I(inode)->xattr_sem);
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
brelse(iloc.bh);
|
||||
return 1;
|
||||
out_release_page:
|
||||
|
@ -1145,7 +1145,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
|
||||
*/
|
||||
static int ext4_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
int ret, needed_blocks;
|
||||
@ -1170,7 +1170,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
|
||||
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
|
||||
pagep);
|
||||
foliop);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 1)
|
||||
@ -1224,10 +1224,10 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
|
||||
ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
|
||||
#else
|
||||
if (ext4_should_dioread_nolock(inode))
|
||||
ret = __block_write_begin(&folio->page, pos, len,
|
||||
ret = __block_write_begin(folio, pos, len,
|
||||
ext4_get_block_unwritten);
|
||||
else
|
||||
ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
|
||||
ret = __block_write_begin(folio, pos, len, ext4_get_block);
|
||||
#endif
|
||||
if (!ret && ext4_should_journal_data(inode)) {
|
||||
ret = ext4_walk_page_buffers(handle, inode,
|
||||
@ -1270,7 +1270,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
|
||||
folio_put(folio);
|
||||
return ret;
|
||||
}
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1298,9 +1298,8 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
|
||||
static int ext4_write_end(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
handle_t *handle = ext4_journal_current_handle();
|
||||
struct inode *inode = mapping->host;
|
||||
loff_t old_size = inode->i_size;
|
||||
@ -1315,7 +1314,7 @@ static int ext4_write_end(struct file *file,
|
||||
return ext4_write_inline_data_end(inode, pos, len, copied,
|
||||
folio);
|
||||
|
||||
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
/*
|
||||
* it's important to update i_size while still holding folio lock:
|
||||
* page writeout could otherwise come in and zero beyond i_size.
|
||||
@ -1402,9 +1401,8 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
|
||||
static int ext4_journalled_write_end(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
handle_t *handle = ext4_journal_current_handle();
|
||||
struct inode *inode = mapping->host;
|
||||
loff_t old_size = inode->i_size;
|
||||
@ -2926,7 +2924,7 @@ static int ext4_nonda_switch(struct super_block *sb)
|
||||
|
||||
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret, retries = 0;
|
||||
struct folio *folio;
|
||||
@ -2941,14 +2939,14 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
|
||||
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
|
||||
return ext4_write_begin(file, mapping, pos,
|
||||
len, pagep, fsdata);
|
||||
len, foliop, fsdata);
|
||||
}
|
||||
*fsdata = (void *)0;
|
||||
trace_ext4_da_write_begin(inode, pos, len);
|
||||
|
||||
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
|
||||
ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
|
||||
pagep, fsdata);
|
||||
foliop, fsdata);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 1)
|
||||
@ -2964,7 +2962,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
|
||||
#else
|
||||
ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
|
||||
ret = __block_write_begin(folio, pos, len, ext4_da_get_block_prep);
|
||||
#endif
|
||||
if (ret < 0) {
|
||||
folio_unlock(folio);
|
||||
@ -2983,7 +2981,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3029,7 +3027,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
|
||||
* flag, which all that's needed to trigger page writeback.
|
||||
*/
|
||||
copied = block_write_end(NULL, mapping, pos, len, copied,
|
||||
&folio->page, NULL);
|
||||
folio, NULL);
|
||||
new_i_size = pos + copied;
|
||||
|
||||
/*
|
||||
@ -3080,15 +3078,14 @@ static int ext4_da_do_write_end(struct address_space *mapping,
|
||||
static int ext4_da_write_end(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
int write_mode = (int)(unsigned long)fsdata;
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
if (write_mode == FALL_BACK_TO_NONDELALLOC)
|
||||
return ext4_write_end(file, mapping, pos,
|
||||
len, copied, &folio->page, fsdata);
|
||||
len, copied, folio, fsdata);
|
||||
|
||||
trace_ext4_da_write_end(inode, pos, len, copied);
|
||||
|
||||
@ -6219,7 +6216,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
|
||||
if (folio_pos(folio) + len > size)
|
||||
len = size - folio_pos(folio);
|
||||
|
||||
err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
|
||||
err = __block_write_begin(folio, 0, len, ext4_get_block);
|
||||
if (!err) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
if (ext4_journal_folio_buffers(handle, folio, len))
|
||||
|
@ -76,17 +76,17 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
|
||||
while (count) {
|
||||
size_t n = min_t(size_t, count,
|
||||
PAGE_SIZE - offset_in_page(pos));
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
int res;
|
||||
|
||||
res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
|
||||
res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
memcpy_to_page(page, offset_in_page(pos), buf, n);
|
||||
memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
|
||||
|
||||
res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
|
||||
res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
|
||||
if (res < 0)
|
||||
return res;
|
||||
if (res != n)
|
||||
|
@ -3552,12 +3552,12 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct page *page = NULL;
|
||||
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
|
||||
struct folio *folio;
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
bool need_balance = false;
|
||||
bool use_cow = false;
|
||||
block_t blkaddr = NULL_ADDR;
|
||||
@ -3573,7 +3573,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
|
||||
/*
|
||||
* We should check this at this moment to avoid deadlock on inode page
|
||||
* and #0 page. The locking rule for inline_data conversion should be:
|
||||
* lock_page(page #0) -> lock_page(inode_page)
|
||||
* folio_lock(folio #0) -> folio_lock(inode_page)
|
||||
*/
|
||||
if (index != 0) {
|
||||
err = f2fs_convert_inline_inode(inode);
|
||||
@ -3584,18 +3584,20 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
int ret;
|
||||
struct page *page;
|
||||
|
||||
*fsdata = NULL;
|
||||
|
||||
if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
|
||||
goto repeat;
|
||||
|
||||
ret = f2fs_prepare_compress_overwrite(inode, pagep,
|
||||
ret = f2fs_prepare_compress_overwrite(inode, &page,
|
||||
index, fsdata);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
goto fail;
|
||||
} else if (ret) {
|
||||
*foliop = page_folio(page);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -3603,81 +3605,85 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
repeat:
|
||||
/*
|
||||
* Do not use grab_cache_page_write_begin() to avoid deadlock due to
|
||||
* wait_for_stable_page. Will wait that below with our IO control.
|
||||
* Do not use FGP_STABLE to avoid deadlock.
|
||||
* Will wait that below with our IO control.
|
||||
*/
|
||||
page = f2fs_pagecache_get_page(mapping, index,
|
||||
folio = __filemap_get_folio(mapping, index,
|
||||
FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
|
||||
if (!page) {
|
||||
err = -ENOMEM;
|
||||
if (IS_ERR(folio)) {
|
||||
err = PTR_ERR(folio);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* TODO: cluster can be compressed due to race with .writepage */
|
||||
|
||||
*pagep = page;
|
||||
*foliop = folio;
|
||||
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
err = prepare_atomic_write_begin(sbi, page, pos, len,
|
||||
err = prepare_atomic_write_begin(sbi, &folio->page, pos, len,
|
||||
&blkaddr, &need_balance, &use_cow);
|
||||
else
|
||||
err = prepare_write_begin(sbi, page, pos, len,
|
||||
err = prepare_write_begin(sbi, &folio->page, pos, len,
|
||||
&blkaddr, &need_balance);
|
||||
if (err)
|
||||
goto fail;
|
||||
goto put_folio;
|
||||
|
||||
if (need_balance && !IS_NOQUOTA(inode) &&
|
||||
has_not_enough_free_secs(sbi, 0, 0)) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
f2fs_balance_fs(sbi, true);
|
||||
lock_page(page);
|
||||
if (page->mapping != mapping) {
|
||||
/* The page got truncated from under us */
|
||||
f2fs_put_page(page, 1);
|
||||
folio_lock(folio);
|
||||
if (folio->mapping != mapping) {
|
||||
/* The folio got truncated from under us */
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
goto repeat;
|
||||
}
|
||||
}
|
||||
|
||||
f2fs_wait_on_page_writeback(page, DATA, false, true);
|
||||
f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
|
||||
|
||||
if (len == PAGE_SIZE || PageUptodate(page))
|
||||
if (len == folio_size(folio) || folio_test_uptodate(folio))
|
||||
return 0;
|
||||
|
||||
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
|
||||
!f2fs_verity_in_progress(inode)) {
|
||||
zero_user_segment(page, len, PAGE_SIZE);
|
||||
folio_zero_segment(folio, len, PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (blkaddr == NEW_ADDR) {
|
||||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
folio_zero_segment(folio, 0, folio_size(folio));
|
||||
folio_mark_uptodate(folio);
|
||||
} else {
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
|
||||
DATA_GENERIC_ENHANCE_READ)) {
|
||||
err = -EFSCORRUPTED;
|
||||
goto fail;
|
||||
goto put_folio;
|
||||
}
|
||||
err = f2fs_submit_page_read(use_cow ?
|
||||
F2FS_I(inode)->cow_inode : inode, page,
|
||||
F2FS_I(inode)->cow_inode : inode, &folio->page,
|
||||
blkaddr, 0, true);
|
||||
if (err)
|
||||
goto fail;
|
||||
goto put_folio;
|
||||
|
||||
lock_page(page);
|
||||
if (unlikely(page->mapping != mapping)) {
|
||||
f2fs_put_page(page, 1);
|
||||
folio_lock(folio);
|
||||
if (unlikely(folio->mapping != mapping)) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
goto repeat;
|
||||
}
|
||||
if (unlikely(!PageUptodate(page))) {
|
||||
if (unlikely(!folio_test_uptodate(folio))) {
|
||||
err = -EIO;
|
||||
goto fail;
|
||||
goto put_folio;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
put_folio:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
fail:
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_write_failed(inode, pos + len);
|
||||
return err;
|
||||
}
|
||||
@ -3685,9 +3691,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
|
||||
static int f2fs_write_end(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
|
||||
trace_f2fs_write_end(inode, pos, len, copied);
|
||||
|
||||
@ -3696,17 +3702,17 @@ static int f2fs_write_end(struct file *file,
|
||||
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
|
||||
* let generic_perform_write() try to copy data again through copied=0.
|
||||
*/
|
||||
if (!PageUptodate(page)) {
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
if (unlikely(copied != len))
|
||||
copied = 0;
|
||||
else
|
||||
SetPageUptodate(page);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
/* overwrite compressed file */
|
||||
if (f2fs_compressed_file(inode) && fsdata) {
|
||||
f2fs_compress_write_end(inode, fsdata, page->index, copied);
|
||||
f2fs_compress_write_end(inode, fsdata, folio->index, copied);
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
|
||||
if (pos + copied > i_size_read(inode) &&
|
||||
@ -3719,7 +3725,7 @@ static int f2fs_write_end(struct file *file,
|
||||
if (!copied)
|
||||
goto unlock_out;
|
||||
|
||||
set_page_dirty(page);
|
||||
folio_mark_dirty(folio);
|
||||
|
||||
if (pos + copied > i_size_read(inode) &&
|
||||
!f2fs_verity_in_progress(inode)) {
|
||||
@ -3729,7 +3735,8 @@ static int f2fs_write_end(struct file *file,
|
||||
pos + copied);
|
||||
}
|
||||
unlock_out:
|
||||
f2fs_put_page(page, 1);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
return copied;
|
||||
}
|
||||
|
@ -2677,7 +2677,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
|
||||
const struct address_space_operations *a_ops = mapping->a_ops;
|
||||
int offset = off & (sb->s_blocksize - 1);
|
||||
size_t towrite = len;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
int err = 0;
|
||||
int tocopy;
|
||||
@ -2687,7 +2687,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
|
||||
towrite);
|
||||
retry:
|
||||
err = a_ops->write_begin(NULL, mapping, off, tocopy,
|
||||
&page, &fsdata);
|
||||
&folio, &fsdata);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOMEM) {
|
||||
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
|
||||
@ -2697,10 +2697,10 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy_to_page(page, offset, data, tocopy);
|
||||
memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
|
||||
|
||||
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
|
||||
page, fsdata);
|
||||
folio, fsdata);
|
||||
offset = 0;
|
||||
towrite -= tocopy;
|
||||
off += tocopy;
|
||||
|
@ -80,17 +80,17 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
|
||||
while (count) {
|
||||
size_t n = min_t(size_t, count,
|
||||
PAGE_SIZE - offset_in_page(pos));
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
int res;
|
||||
|
||||
res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
|
||||
res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
memcpy_to_page(page, offset_in_page(pos), buf, n);
|
||||
memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
|
||||
|
||||
res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
|
||||
res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
|
||||
if (res < 0)
|
||||
return res;
|
||||
if (res != n)
|
||||
|
@ -221,13 +221,12 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int fat_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int err;
|
||||
|
||||
*pagep = NULL;
|
||||
err = cont_write_begin(file, mapping, pos, len,
|
||||
pagep, fsdata, fat_get_block,
|
||||
foliop, fsdata, fat_get_block,
|
||||
&MSDOS_I(mapping->host)->mmu_private);
|
||||
if (err < 0)
|
||||
fat_write_failed(mapping, pos + len);
|
||||
@ -236,11 +235,11 @@ static int fat_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int fat_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *pagep, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
int err;
|
||||
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
|
||||
err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
if (err < len)
|
||||
fat_write_failed(mapping, pos + len);
|
||||
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
|
||||
|
@ -2393,76 +2393,77 @@ static int fuse_writepages(struct address_space *mapping,
|
||||
* but how to implement it without killing performance need more thinking.
|
||||
*/
|
||||
static int fuse_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
loff_t fsize;
|
||||
int err = -ENOMEM;
|
||||
|
||||
WARN_ON(!fc->writeback_cache);
|
||||
|
||||
page = grab_cache_page_write_begin(mapping, index);
|
||||
if (!page)
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
goto error;
|
||||
|
||||
fuse_wait_on_page_writeback(mapping->host, page->index);
|
||||
fuse_wait_on_page_writeback(mapping->host, folio->index);
|
||||
|
||||
if (PageUptodate(page) || len == PAGE_SIZE)
|
||||
if (folio_test_uptodate(folio) || len >= folio_size(folio))
|
||||
goto success;
|
||||
/*
|
||||
* Check if the start this page comes after the end of file, in which
|
||||
* case the readpage can be optimized away.
|
||||
* Check if the start of this folio comes after the end of file,
|
||||
* in which case the readpage can be optimized away.
|
||||
*/
|
||||
fsize = i_size_read(mapping->host);
|
||||
if (fsize <= (pos & PAGE_MASK)) {
|
||||
size_t off = pos & ~PAGE_MASK;
|
||||
if (fsize <= folio_pos(folio)) {
|
||||
size_t off = offset_in_folio(folio, pos);
|
||||
if (off)
|
||||
zero_user_segment(page, 0, off);
|
||||
folio_zero_segment(folio, 0, off);
|
||||
goto success;
|
||||
}
|
||||
err = fuse_do_readpage(file, page);
|
||||
err = fuse_do_readpage(file, &folio->page);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
success:
|
||||
*pagep = page;
|
||||
*foliop = folio;
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int fuse_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
|
||||
/* Haven't copied anything? Skip zeroing, size extending, dirtying. */
|
||||
if (!copied)
|
||||
goto unlock;
|
||||
|
||||
pos += copied;
|
||||
if (!PageUptodate(page)) {
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
/* Zero any unwritten bytes at the end of the page */
|
||||
size_t endoff = pos & ~PAGE_MASK;
|
||||
if (endoff)
|
||||
zero_user_segment(page, endoff, PAGE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
folio_zero_segment(folio, endoff, PAGE_SIZE);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
|
||||
if (pos > inode->i_size)
|
||||
i_size_write(inode, pos);
|
||||
|
||||
set_page_dirty(page);
|
||||
folio_mark_dirty(folio);
|
||||
|
||||
unlock:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
@ -487,15 +487,15 @@ void hfs_file_truncate(struct inode *inode)
|
||||
if (inode->i_size > HFS_I(inode)->phys_size) {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
void *fsdata = NULL;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
/* XXX: Can use generic_cont_expand? */
|
||||
size = inode->i_size - 1;
|
||||
res = hfs_write_begin(NULL, mapping, size + 1, 0, &page,
|
||||
res = hfs_write_begin(NULL, mapping, size + 1, 0, &folio,
|
||||
&fsdata);
|
||||
if (!res) {
|
||||
res = generic_write_end(NULL, mapping, size + 1, 0, 0,
|
||||
page, fsdata);
|
||||
folio, fsdata);
|
||||
}
|
||||
if (res)
|
||||
inode->i_size = HFS_I(inode)->phys_size;
|
||||
|
@ -202,7 +202,7 @@ extern const struct address_space_operations hfs_aops;
|
||||
extern const struct address_space_operations hfs_btree_aops;
|
||||
|
||||
int hfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata);
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
|
||||
extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
|
||||
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
|
||||
extern int hfs_write_inode(struct inode *, struct writeback_control *);
|
||||
|
@ -45,12 +45,11 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
}
|
||||
|
||||
int hfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*pagep = NULL;
|
||||
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
|
||||
ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
|
||||
hfs_get_block,
|
||||
&HFS_I(mapping->host)->phys_size);
|
||||
if (unlikely(ret))
|
||||
|
@ -554,16 +554,16 @@ void hfsplus_file_truncate(struct inode *inode)
|
||||
|
||||
if (inode->i_size > hip->phys_size) {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
loff_t size = inode->i_size;
|
||||
|
||||
res = hfsplus_write_begin(NULL, mapping, size, 0,
|
||||
&page, &fsdata);
|
||||
&folio, &fsdata);
|
||||
if (res)
|
||||
return;
|
||||
res = generic_write_end(NULL, mapping, size, 0, 0,
|
||||
page, fsdata);
|
||||
folio, fsdata);
|
||||
if (res < 0)
|
||||
return;
|
||||
mark_inode_dirty(inode);
|
||||
|
@ -472,7 +472,7 @@ extern const struct address_space_operations hfsplus_btree_aops;
|
||||
extern const struct dentry_operations hfsplus_dentry_operations;
|
||||
|
||||
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata);
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
|
||||
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode);
|
||||
void hfsplus_delete_inode(struct inode *inode);
|
||||
|
@ -39,12 +39,11 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
|
||||
}
|
||||
|
||||
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*pagep = NULL;
|
||||
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
|
||||
ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
|
||||
hfsplus_get_block,
|
||||
&HFSPLUS_I(mapping->host)->phys_size);
|
||||
if (unlikely(ret))
|
||||
|
@ -465,31 +465,32 @@ static int hostfs_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
|
||||
*pagep = grab_cache_page_write_begin(mapping, index);
|
||||
if (!*pagep)
|
||||
*foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (!*foliop)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hostfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
void *buffer;
|
||||
unsigned from = pos & (PAGE_SIZE - 1);
|
||||
size_t from = offset_in_folio(folio, pos);
|
||||
int err;
|
||||
|
||||
buffer = kmap_local_page(page);
|
||||
err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
|
||||
buffer = kmap_local_folio(folio, from);
|
||||
err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer, copied);
|
||||
kunmap_local(buffer);
|
||||
|
||||
if (!PageUptodate(page) && err == PAGE_SIZE)
|
||||
SetPageUptodate(page);
|
||||
if (!folio_test_uptodate(folio) && err == folio_size(folio))
|
||||
folio_mark_uptodate(folio);
|
||||
|
||||
/*
|
||||
* If err > 0, write_file has added err to pos, so we are comparing
|
||||
@ -497,8 +498,8 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
|
||||
*/
|
||||
if (err > 0 && (pos > inode->i_size))
|
||||
inode->i_size = pos;
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -190,12 +190,11 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*pagep = NULL;
|
||||
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
|
||||
ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
|
||||
hpfs_get_block,
|
||||
&hpfs_i(mapping->host)->mmu_private);
|
||||
if (unlikely(ret))
|
||||
@ -206,11 +205,11 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int hpfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *pagep, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
int err;
|
||||
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
|
||||
err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
if (err < len)
|
||||
hpfs_write_failed(mapping, pos + len);
|
||||
if (!(err < 0)) {
|
||||
|
@ -388,14 +388,14 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
static int hugetlbfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
|
@ -900,7 +900,7 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
|
||||
size_t bh_written;
|
||||
|
||||
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
|
||||
len, copied, &folio->page, NULL);
|
||||
len, copied, folio, NULL);
|
||||
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
|
||||
return bh_written == copied;
|
||||
}
|
||||
|
@ -23,10 +23,10 @@
|
||||
|
||||
static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *pg, void *fsdata);
|
||||
struct folio *folio, void *fsdata);
|
||||
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata);
|
||||
struct folio **foliop, void **fsdata);
|
||||
static int jffs2_read_folio(struct file *filp, struct folio *folio);
|
||||
|
||||
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
||||
@ -77,29 +77,27 @@ const struct address_space_operations jffs2_file_address_operations =
|
||||
.write_end = jffs2_write_end,
|
||||
};
|
||||
|
||||
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
|
||||
static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio)
|
||||
{
|
||||
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
||||
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
||||
unsigned char *pg_buf;
|
||||
unsigned char *kaddr;
|
||||
int ret;
|
||||
|
||||
jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
|
||||
__func__, inode->i_ino, pg->index << PAGE_SHIFT);
|
||||
__func__, inode->i_ino, folio->index << PAGE_SHIFT);
|
||||
|
||||
BUG_ON(!PageLocked(pg));
|
||||
BUG_ON(!folio_test_locked(folio));
|
||||
|
||||
pg_buf = kmap(pg);
|
||||
/* FIXME: Can kmap fail? */
|
||||
|
||||
ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
|
||||
kaddr = kmap_local_folio(folio, 0);
|
||||
ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT,
|
||||
PAGE_SIZE);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (!ret)
|
||||
SetPageUptodate(pg);
|
||||
folio_mark_uptodate(folio);
|
||||
|
||||
flush_dcache_page(pg);
|
||||
kunmap(pg);
|
||||
flush_dcache_folio(folio);
|
||||
|
||||
jffs2_dbg(2, "readpage finished\n");
|
||||
return ret;
|
||||
@ -107,7 +105,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
|
||||
|
||||
int __jffs2_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page);
|
||||
int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio);
|
||||
folio_unlock(folio);
|
||||
return ret;
|
||||
}
|
||||
@ -125,9 +123,9 @@ static int jffs2_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct page *pg;
|
||||
struct folio *folio;
|
||||
struct inode *inode = mapping->host;
|
||||
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
||||
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
||||
@ -206,29 +204,30 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
|
||||
* page in read_cache_page(), which causes a deadlock.
|
||||
*/
|
||||
mutex_lock(&c->alloc_sem);
|
||||
pg = grab_cache_page_write_begin(mapping, index);
|
||||
if (!pg) {
|
||||
ret = -ENOMEM;
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio)) {
|
||||
ret = PTR_ERR(folio);
|
||||
goto release_sem;
|
||||
}
|
||||
*pagep = pg;
|
||||
*foliop = folio;
|
||||
|
||||
/*
|
||||
* Read in the page if it wasn't already present. Cannot optimize away
|
||||
* the whole page write case until jffs2_write_end can handle the
|
||||
* Read in the folio if it wasn't already present. Cannot optimize away
|
||||
* the whole folio write case until jffs2_write_end can handle the
|
||||
* case of a short-copy.
|
||||
*/
|
||||
if (!PageUptodate(pg)) {
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
mutex_lock(&f->sem);
|
||||
ret = jffs2_do_readpage_nolock(inode, pg);
|
||||
ret = jffs2_do_readpage_nolock(inode, folio);
|
||||
mutex_unlock(&f->sem);
|
||||
if (ret) {
|
||||
unlock_page(pg);
|
||||
put_page(pg);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
goto release_sem;
|
||||
}
|
||||
}
|
||||
jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
|
||||
jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags);
|
||||
|
||||
release_sem:
|
||||
mutex_unlock(&c->alloc_sem);
|
||||
@ -238,7 +237,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
|
||||
|
||||
static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *pg, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
/* Actually commit the write from the page cache page we're looking at.
|
||||
* For now, we write the full page out each time. It sucks, but it's simple
|
||||
@ -252,16 +251,17 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
||||
unsigned aligned_start = start & ~3;
|
||||
int ret = 0;
|
||||
uint32_t writtenlen = 0;
|
||||
void *buf;
|
||||
|
||||
jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
|
||||
__func__, inode->i_ino, pg->index << PAGE_SHIFT,
|
||||
start, end, pg->flags);
|
||||
jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n",
|
||||
__func__, inode->i_ino, folio_pos(folio),
|
||||
start, end, folio->flags);
|
||||
|
||||
/* We need to avoid deadlock with page_cache_read() in
|
||||
jffs2_garbage_collect_pass(). So the page must be
|
||||
jffs2_garbage_collect_pass(). So the folio must be
|
||||
up to date to prevent page_cache_read() from trying
|
||||
to re-lock it. */
|
||||
BUG_ON(!PageUptodate(pg));
|
||||
BUG_ON(!folio_test_uptodate(folio));
|
||||
|
||||
if (end == PAGE_SIZE) {
|
||||
/* When writing out the end of a page, write out the
|
||||
@ -276,8 +276,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
||||
if (!ri) {
|
||||
jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
|
||||
__func__);
|
||||
unlock_page(pg);
|
||||
put_page(pg);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -289,15 +289,11 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
||||
ri->isize = cpu_to_je32((uint32_t)inode->i_size);
|
||||
ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
|
||||
|
||||
/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
|
||||
hurt to do it again. The alternative is ifdefs, which are ugly. */
|
||||
kmap(pg);
|
||||
|
||||
ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
|
||||
(pg->index << PAGE_SHIFT) + aligned_start,
|
||||
buf = kmap_local_folio(folio, aligned_start);
|
||||
ret = jffs2_write_inode_range(c, f, ri, buf,
|
||||
folio_pos(folio) + aligned_start,
|
||||
end - aligned_start, &writtenlen);
|
||||
|
||||
kunmap(pg);
|
||||
kunmap_local(buf);
|
||||
|
||||
if (ret)
|
||||
mapping_set_error(mapping, ret);
|
||||
@ -323,12 +319,12 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
||||
it gets reread */
|
||||
jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
|
||||
__func__);
|
||||
ClearPageUptodate(pg);
|
||||
folio_clear_uptodate(folio);
|
||||
}
|
||||
|
||||
jffs2_dbg(1, "%s() returning %d\n",
|
||||
__func__, writtenlen > 0 ? writtenlen : ret);
|
||||
unlock_page(pg);
|
||||
put_page(pg);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return writtenlen > 0 ? writtenlen : ret;
|
||||
}
|
||||
|
@ -1171,7 +1171,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
|
||||
uint32_t alloclen, offset, orig_end, orig_start;
|
||||
int ret = 0;
|
||||
unsigned char *comprbuf = NULL, *writebuf;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
unsigned char *pg_ptr;
|
||||
|
||||
memset(&ri, 0, sizeof(ri));
|
||||
@ -1317,25 +1317,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
|
||||
BUG_ON(start > orig_start);
|
||||
}
|
||||
|
||||
/* The rules state that we must obtain the page lock *before* f->sem, so
|
||||
/* The rules state that we must obtain the folio lock *before* f->sem, so
|
||||
* drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
|
||||
* actually going to *change* so we're safe; we only allow reading.
|
||||
*
|
||||
* It is important to note that jffs2_write_begin() will ensure that its
|
||||
* page is marked Uptodate before allocating space. That means that if we
|
||||
* end up here trying to GC the *same* page that jffs2_write_begin() is
|
||||
* trying to write out, read_cache_page() will not deadlock. */
|
||||
* folio is marked uptodate before allocating space. That means that if we
|
||||
* end up here trying to GC the *same* folio that jffs2_write_begin() is
|
||||
* trying to write out, read_cache_folio() will not deadlock. */
|
||||
mutex_unlock(&f->sem);
|
||||
page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
|
||||
folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
|
||||
__jffs2_read_folio, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
pr_warn("read_cache_page() returned error: %ld\n",
|
||||
PTR_ERR(page));
|
||||
if (IS_ERR(folio)) {
|
||||
pr_warn("read_cache_folio() returned error: %ld\n",
|
||||
PTR_ERR(folio));
|
||||
mutex_lock(&f->sem);
|
||||
return PTR_ERR(page);
|
||||
return PTR_ERR(folio);
|
||||
}
|
||||
|
||||
pg_ptr = kmap(page);
|
||||
pg_ptr = kmap_local_folio(folio, 0);
|
||||
mutex_lock(&f->sem);
|
||||
|
||||
offset = start;
|
||||
@ -1400,7 +1400,6 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
|
||||
}
|
||||
}
|
||||
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
folio_release_kmap(folio, pg_ptr);
|
||||
return ret;
|
||||
}
|
||||
|
@ -292,11 +292,11 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int jfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block);
|
||||
if (unlikely(ret))
|
||||
jfs_write_failed(mapping, pos + len);
|
||||
|
||||
@ -304,12 +304,12 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
|
||||
static int jfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied, struct page *page,
|
||||
loff_t pos, unsigned len, unsigned copied, struct folio *folio,
|
||||
void *fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
if (ret < len)
|
||||
jfs_write_failed(mapping, pos + len);
|
||||
return ret;
|
||||
|
13
fs/libfs.c
13
fs/libfs.c
@ -914,7 +914,7 @@ static int simple_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
int simple_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct folio *folio;
|
||||
|
||||
@ -923,7 +923,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
|
||||
if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
|
||||
size_t from = offset_in_folio(folio, pos);
|
||||
@ -942,11 +942,11 @@ EXPORT_SYMBOL(simple_write_begin);
|
||||
* @pos: "
|
||||
* @len: "
|
||||
* @copied: "
|
||||
* @page: "
|
||||
* @folio: "
|
||||
* @fsdata: "
|
||||
*
|
||||
* simple_write_end does the minimum needed for updating a page after writing is
|
||||
* done. It has the same API signature as the .write_end of
|
||||
* simple_write_end does the minimum needed for updating a folio after
|
||||
* writing is done. It has the same API signature as the .write_end of
|
||||
* address_space_operations vector. So it can just be set onto .write_end for
|
||||
* FSes that don't need any other processing. i_mutex is assumed to be held.
|
||||
* Block based filesystems should use generic_write_end().
|
||||
@ -959,9 +959,8 @@ EXPORT_SYMBOL(simple_write_begin);
|
||||
*/
|
||||
static int simple_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct inode *inode = folio->mapping->host;
|
||||
loff_t last_pos = pos + copied;
|
||||
|
||||
|
134
fs/minix/dir.c
134
fs/minix/dir.c
@ -40,18 +40,18 @@ minix_last_byte(struct inode *inode, unsigned long page_nr)
|
||||
return last_byte;
|
||||
}
|
||||
|
||||
static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct inode *dir = mapping->host;
|
||||
|
||||
block_write_end(NULL, mapping, pos, len, len, page, NULL);
|
||||
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||
|
||||
if (pos+len > dir->i_size) {
|
||||
i_size_write(dir, pos+len);
|
||||
mark_inode_dirty(dir);
|
||||
}
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
static int minix_handle_dirsync(struct inode *dir)
|
||||
@ -64,14 +64,15 @@ static int minix_handle_dirsync(struct inode *dir)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p)
|
||||
static void *dir_get_folio(struct inode *dir, unsigned long n,
|
||||
struct folio **foliop)
|
||||
{
|
||||
struct address_space *mapping = dir->i_mapping;
|
||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||
if (IS_ERR(page))
|
||||
return ERR_CAST(page);
|
||||
*p = page;
|
||||
return kmap_local_page(page);
|
||||
struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return ERR_CAST(folio);
|
||||
*foliop = folio;
|
||||
return kmap_local_folio(folio, 0);
|
||||
}
|
||||
|
||||
static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
|
||||
@ -99,9 +100,9 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
|
||||
|
||||
for ( ; n < npages; n++, offset = 0) {
|
||||
char *p, *kaddr, *limit;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
kaddr = dir_get_page(inode, n, &page);
|
||||
kaddr = dir_get_folio(inode, n, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
p = kaddr+offset;
|
||||
@ -122,13 +123,13 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
|
||||
unsigned l = strnlen(name, sbi->s_namelen);
|
||||
if (!dir_emit(ctx, name, l,
|
||||
inumber, DT_UNKNOWN)) {
|
||||
unmap_and_put_page(page, p);
|
||||
folio_release_kmap(folio, p);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
ctx->pos += chunk_size;
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -144,12 +145,13 @@ static inline int namecompare(int len, int maxlen,
|
||||
/*
|
||||
* minix_find_entry()
|
||||
*
|
||||
* finds an entry in the specified directory with the wanted name. It
|
||||
* returns the cache buffer in which the entry was found, and the entry
|
||||
* itself (as a parameter - res_dir). It does NOT read the inode of the
|
||||
* finds an entry in the specified directory with the wanted name.
|
||||
* It does NOT read the inode of the
|
||||
* entry - you'll have to do that yourself if you want to.
|
||||
*
|
||||
* On Success folio_release_kmap() should be called on *foliop.
|
||||
*/
|
||||
minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
|
||||
minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
|
||||
{
|
||||
const char * name = dentry->d_name.name;
|
||||
int namelen = dentry->d_name.len;
|
||||
@ -158,17 +160,15 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
|
||||
struct minix_sb_info * sbi = minix_sb(sb);
|
||||
unsigned long n;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
struct page *page = NULL;
|
||||
char *p;
|
||||
|
||||
char *namx;
|
||||
__u32 inumber;
|
||||
*res_page = NULL;
|
||||
|
||||
for (n = 0; n < npages; n++) {
|
||||
char *kaddr, *limit;
|
||||
|
||||
kaddr = dir_get_page(dir, n, &page);
|
||||
kaddr = dir_get_folio(dir, n, foliop);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
|
||||
@ -188,12 +188,11 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
|
||||
if (namecompare(namelen, sbi->s_namelen, name, namx))
|
||||
goto found;
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(*foliop, kaddr);
|
||||
}
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
*res_page = page;
|
||||
return (minix_dirent *)p;
|
||||
}
|
||||
|
||||
@ -204,7 +203,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
|
||||
int namelen = dentry->d_name.len;
|
||||
struct super_block * sb = dir->i_sb;
|
||||
struct minix_sb_info * sbi = minix_sb(sb);
|
||||
struct page *page = NULL;
|
||||
struct folio *folio = NULL;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
unsigned long n;
|
||||
char *kaddr, *p;
|
||||
@ -223,10 +222,10 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
|
||||
for (n = 0; n <= npages; n++) {
|
||||
char *limit, *dir_end;
|
||||
|
||||
kaddr = dir_get_page(dir, n, &page);
|
||||
kaddr = dir_get_folio(dir, n, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
return PTR_ERR(kaddr);
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
dir_end = kaddr + minix_last_byte(dir, n);
|
||||
limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
|
||||
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
|
||||
@ -253,15 +252,15 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
|
||||
if (namecompare(namelen, sbi->s_namelen, name, namx))
|
||||
goto out_unlock;
|
||||
}
|
||||
unlock_page(page);
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_unlock(folio);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
|
||||
got_it:
|
||||
pos = page_offset(page) + offset_in_page(p);
|
||||
err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
|
||||
pos = folio_pos(folio) + offset_in_folio(folio, p);
|
||||
err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
memcpy (namx, name, namelen);
|
||||
@ -272,37 +271,37 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
|
||||
memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
|
||||
de->inode = inode->i_ino;
|
||||
}
|
||||
dir_commit_chunk(page, pos, sbi->s_dirsize);
|
||||
dir_commit_chunk(folio, pos, sbi->s_dirsize);
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
mark_inode_dirty(dir);
|
||||
err = minix_handle_dirsync(dir);
|
||||
out_put:
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return err;
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
|
||||
int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
loff_t pos = page_offset(page) + offset_in_page(de);
|
||||
struct inode *inode = folio->mapping->host;
|
||||
loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
|
||||
unsigned len = sbi->s_dirsize;
|
||||
int err;
|
||||
|
||||
lock_page(page);
|
||||
err = minix_prepare_chunk(page, pos, len);
|
||||
folio_lock(folio);
|
||||
err = minix_prepare_chunk(folio, pos, len);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
return err;
|
||||
}
|
||||
if (sbi->s_version == MINIX_V3)
|
||||
((minix3_dirent *)de)->inode = 0;
|
||||
else
|
||||
de->inode = 0;
|
||||
dir_commit_chunk(page, pos, len);
|
||||
dir_commit_chunk(folio, pos, len);
|
||||
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
||||
mark_inode_dirty(inode);
|
||||
return minix_handle_dirsync(inode);
|
||||
@ -310,21 +309,21 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
|
||||
|
||||
int minix_make_empty(struct inode *inode, struct inode *dir)
|
||||
{
|
||||
struct page *page = grab_cache_page(inode->i_mapping, 0);
|
||||
struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
|
||||
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
|
||||
char *kaddr;
|
||||
int err;
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
kaddr = kmap_local_page(page);
|
||||
memset(kaddr, 0, PAGE_SIZE);
|
||||
kaddr = kmap_local_folio(folio, 0);
|
||||
memset(kaddr, 0, folio_size(folio));
|
||||
|
||||
if (sbi->s_version == MINIX_V3) {
|
||||
minix3_dirent *de3 = (minix3_dirent *)kaddr;
|
||||
@ -345,10 +344,10 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
|
||||
}
|
||||
kunmap_local(kaddr);
|
||||
|
||||
dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
|
||||
dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
|
||||
err = minix_handle_dirsync(inode);
|
||||
fail:
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -357,7 +356,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
|
||||
*/
|
||||
int minix_empty_dir(struct inode * inode)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
struct folio *folio = NULL;
|
||||
unsigned long i, npages = dir_pages(inode);
|
||||
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
|
||||
char *name, *kaddr;
|
||||
@ -366,7 +365,7 @@ int minix_empty_dir(struct inode * inode)
|
||||
for (i = 0; i < npages; i++) {
|
||||
char *p, *limit;
|
||||
|
||||
kaddr = dir_get_page(inode, i, &page);
|
||||
kaddr = dir_get_folio(inode, i, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
|
||||
@ -395,44 +394,44 @@ int minix_empty_dir(struct inode * inode)
|
||||
goto not_empty;
|
||||
}
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 1;
|
||||
|
||||
not_empty:
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Releases the page */
|
||||
int minix_set_link(struct minix_dir_entry *de, struct page *page,
|
||||
int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct inode *dir = page->mapping->host;
|
||||
struct inode *dir = folio->mapping->host;
|
||||
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
|
||||
loff_t pos = page_offset(page) + offset_in_page(de);
|
||||
loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
int err;
|
||||
|
||||
lock_page(page);
|
||||
err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
|
||||
folio_lock(folio);
|
||||
err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
return err;
|
||||
}
|
||||
if (sbi->s_version == MINIX_V3)
|
||||
((minix3_dirent *)de)->inode = inode->i_ino;
|
||||
else
|
||||
de->inode = inode->i_ino;
|
||||
dir_commit_chunk(page, pos, sbi->s_dirsize);
|
||||
dir_commit_chunk(folio, pos, sbi->s_dirsize);
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
mark_inode_dirty(dir);
|
||||
return minix_handle_dirsync(dir);
|
||||
}
|
||||
|
||||
struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
|
||||
struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
|
||||
{
|
||||
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
|
||||
struct minix_dir_entry *de = dir_get_page(dir, 0, p);
|
||||
struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
|
||||
|
||||
if (!IS_ERR(de))
|
||||
return minix_next_entry(de, sbi);
|
||||
@ -441,20 +440,19 @@ struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
|
||||
|
||||
ino_t minix_inode_by_name(struct dentry *dentry)
|
||||
{
|
||||
struct page *page;
|
||||
struct minix_dir_entry *de = minix_find_entry(dentry, &page);
|
||||
struct folio *folio;
|
||||
struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
|
||||
ino_t res = 0;
|
||||
|
||||
if (de) {
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
|
||||
|
||||
if (sbi->s_version == MINIX_V3)
|
||||
res = ((minix3_dirent *) de)->inode;
|
||||
else
|
||||
res = de->inode;
|
||||
unmap_and_put_page(page, de);
|
||||
folio_release_kmap(folio, de);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -427,9 +427,9 @@ static int minix_read_folio(struct file *file, struct folio *folio)
|
||||
return block_read_full_folio(folio, minix_get_block);
|
||||
}
|
||||
|
||||
int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(page, pos, len, minix_get_block);
|
||||
return __block_write_begin(folio, pos, len, minix_get_block);
|
||||
}
|
||||
|
||||
static void minix_write_failed(struct address_space *mapping, loff_t to)
|
||||
@ -444,11 +444,11 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int minix_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, minix_get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
|
||||
if (unlikely(ret))
|
||||
minix_write_failed(mapping, pos + len);
|
||||
|
||||
|
@ -42,18 +42,18 @@ struct minix_sb_info {
|
||||
unsigned short s_version;
|
||||
};
|
||||
|
||||
extern struct inode *minix_iget(struct super_block *, unsigned long);
|
||||
extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
|
||||
extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
|
||||
extern struct inode * minix_new_inode(const struct inode *, umode_t);
|
||||
extern void minix_free_inode(struct inode * inode);
|
||||
extern unsigned long minix_count_free_inodes(struct super_block *sb);
|
||||
extern int minix_new_block(struct inode * inode);
|
||||
extern void minix_free_block(struct inode *inode, unsigned long block);
|
||||
extern unsigned long minix_count_free_blocks(struct super_block *sb);
|
||||
extern int minix_getattr(struct mnt_idmap *, const struct path *,
|
||||
struct inode *minix_iget(struct super_block *, unsigned long);
|
||||
struct minix_inode *minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
|
||||
struct minix2_inode *minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
|
||||
struct inode *minix_new_inode(const struct inode *, umode_t);
|
||||
void minix_free_inode(struct inode *inode);
|
||||
unsigned long minix_count_free_inodes(struct super_block *sb);
|
||||
int minix_new_block(struct inode *inode);
|
||||
void minix_free_block(struct inode *inode, unsigned long block);
|
||||
unsigned long minix_count_free_blocks(struct super_block *sb);
|
||||
int minix_getattr(struct mnt_idmap *, const struct path *,
|
||||
struct kstat *, u32, unsigned int);
|
||||
extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
|
||||
int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
|
||||
|
||||
extern void V1_minix_truncate(struct inode *);
|
||||
extern void V2_minix_truncate(struct inode *);
|
||||
@ -64,15 +64,15 @@ extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int);
|
||||
extern unsigned V1_minix_blocks(loff_t, struct super_block *);
|
||||
extern unsigned V2_minix_blocks(loff_t, struct super_block *);
|
||||
|
||||
extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**);
|
||||
extern int minix_add_link(struct dentry*, struct inode*);
|
||||
extern int minix_delete_entry(struct minix_dir_entry*, struct page*);
|
||||
extern int minix_make_empty(struct inode*, struct inode*);
|
||||
extern int minix_empty_dir(struct inode*);
|
||||
int minix_set_link(struct minix_dir_entry *de, struct page *page,
|
||||
struct minix_dir_entry *minix_find_entry(struct dentry *, struct folio **);
|
||||
int minix_add_link(struct dentry*, struct inode*);
|
||||
int minix_delete_entry(struct minix_dir_entry *, struct folio *);
|
||||
int minix_make_empty(struct inode*, struct inode*);
|
||||
int minix_empty_dir(struct inode*);
|
||||
int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
|
||||
struct inode *inode);
|
||||
extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**);
|
||||
extern ino_t minix_inode_by_name(struct dentry*);
|
||||
struct minix_dir_entry *minix_dotdot(struct inode*, struct folio **);
|
||||
ino_t minix_inode_by_name(struct dentry*);
|
||||
|
||||
extern const struct inode_operations minix_file_inode_operations;
|
||||
extern const struct inode_operations minix_dir_inode_operations;
|
||||
|
@ -141,15 +141,15 @@ static int minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
|
||||
static int minix_unlink(struct inode * dir, struct dentry *dentry)
|
||||
{
|
||||
struct inode * inode = d_inode(dentry);
|
||||
struct page * page;
|
||||
struct folio *folio;
|
||||
struct minix_dir_entry * de;
|
||||
int err;
|
||||
|
||||
de = minix_find_entry(dentry, &page);
|
||||
de = minix_find_entry(dentry, &folio);
|
||||
if (!de)
|
||||
return -ENOENT;
|
||||
err = minix_delete_entry(de, page);
|
||||
unmap_and_put_page(page, de);
|
||||
err = minix_delete_entry(de, folio);
|
||||
folio_release_kmap(folio, de);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
@ -180,28 +180,28 @@ static int minix_rename(struct mnt_idmap *idmap,
|
||||
{
|
||||
struct inode * old_inode = d_inode(old_dentry);
|
||||
struct inode * new_inode = d_inode(new_dentry);
|
||||
struct page * dir_page = NULL;
|
||||
struct folio * dir_folio = NULL;
|
||||
struct minix_dir_entry * dir_de = NULL;
|
||||
struct page * old_page;
|
||||
struct folio *old_folio;
|
||||
struct minix_dir_entry * old_de;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (flags & ~RENAME_NOREPLACE)
|
||||
return -EINVAL;
|
||||
|
||||
old_de = minix_find_entry(old_dentry, &old_page);
|
||||
old_de = minix_find_entry(old_dentry, &old_folio);
|
||||
if (!old_de)
|
||||
goto out;
|
||||
|
||||
if (S_ISDIR(old_inode->i_mode)) {
|
||||
err = -EIO;
|
||||
dir_de = minix_dotdot(old_inode, &dir_page);
|
||||
dir_de = minix_dotdot(old_inode, &dir_folio);
|
||||
if (!dir_de)
|
||||
goto out_old;
|
||||
}
|
||||
|
||||
if (new_inode) {
|
||||
struct page * new_page;
|
||||
struct folio *new_folio;
|
||||
struct minix_dir_entry * new_de;
|
||||
|
||||
err = -ENOTEMPTY;
|
||||
@ -209,11 +209,11 @@ static int minix_rename(struct mnt_idmap *idmap,
|
||||
goto out_dir;
|
||||
|
||||
err = -ENOENT;
|
||||
new_de = minix_find_entry(new_dentry, &new_page);
|
||||
new_de = minix_find_entry(new_dentry, &new_folio);
|
||||
if (!new_de)
|
||||
goto out_dir;
|
||||
err = minix_set_link(new_de, new_page, old_inode);
|
||||
unmap_and_put_page(new_page, new_de);
|
||||
err = minix_set_link(new_de, new_folio, old_inode);
|
||||
folio_release_kmap(new_folio, new_de);
|
||||
if (err)
|
||||
goto out_dir;
|
||||
inode_set_ctime_current(new_inode);
|
||||
@ -228,22 +228,22 @@ static int minix_rename(struct mnt_idmap *idmap,
|
||||
inode_inc_link_count(new_dir);
|
||||
}
|
||||
|
||||
err = minix_delete_entry(old_de, old_page);
|
||||
err = minix_delete_entry(old_de, old_folio);
|
||||
if (err)
|
||||
goto out_dir;
|
||||
|
||||
mark_inode_dirty(old_inode);
|
||||
|
||||
if (dir_de) {
|
||||
err = minix_set_link(dir_de, dir_page, new_dir);
|
||||
err = minix_set_link(dir_de, dir_folio, new_dir);
|
||||
if (!err)
|
||||
inode_dec_link_count(old_dir);
|
||||
}
|
||||
out_dir:
|
||||
if (dir_de)
|
||||
unmap_and_put_page(dir_page, dir_de);
|
||||
folio_release_kmap(dir_folio, dir_de);
|
||||
out_old:
|
||||
unmap_and_put_page(old_page, old_de);
|
||||
folio_release_kmap(old_folio, old_de);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
10
fs/namei.c
10
fs/namei.c
@ -5351,7 +5351,7 @@ int page_symlink(struct inode *inode, const char *symname, int len)
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
const struct address_space_operations *aops = mapping->a_ops;
|
||||
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
void *fsdata = NULL;
|
||||
int err;
|
||||
unsigned int flags;
|
||||
@ -5359,16 +5359,16 @@ int page_symlink(struct inode *inode, const char *symname, int len)
|
||||
retry:
|
||||
if (nofs)
|
||||
flags = memalloc_nofs_save();
|
||||
err = aops->write_begin(NULL, mapping, 0, len-1, &page, &fsdata);
|
||||
err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata);
|
||||
if (nofs)
|
||||
memalloc_nofs_restore(flags);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
memcpy(page_address(page), symname, len-1);
|
||||
memcpy(folio_address(folio), symname, len - 1);
|
||||
|
||||
err = aops->write_end(NULL, mapping, 0, len-1, len-1,
|
||||
page, fsdata);
|
||||
err = aops->write_end(NULL, mapping, 0, len - 1, len - 1,
|
||||
folio, fsdata);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
if (err < len-1)
|
||||
|
@ -336,7 +336,7 @@ static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
|
||||
* increment the page use counts until he is done with the page.
|
||||
*/
|
||||
static int nfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep,
|
||||
loff_t pos, unsigned len, struct folio **foliop,
|
||||
void **fsdata)
|
||||
{
|
||||
fgf_t fgp = FGP_WRITEBEGIN;
|
||||
@ -353,7 +353,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
|
||||
ret = nfs_flush_incompatible(file, folio);
|
||||
if (ret) {
|
||||
@ -372,10 +372,9 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int nfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct nfs_open_context *ctx = nfs_file_open_context(file);
|
||||
struct folio *folio = page_folio(page);
|
||||
unsigned offset = offset_in_folio(folio, pos);
|
||||
int status;
|
||||
|
||||
|
@ -83,7 +83,7 @@ static int nilfs_prepare_chunk(struct folio *folio, unsigned int from,
|
||||
{
|
||||
loff_t pos = folio_pos(folio) + from;
|
||||
|
||||
return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block);
|
||||
return __block_write_begin(folio, pos, to - from, nilfs_get_block);
|
||||
}
|
||||
|
||||
static void nilfs_commit_chunk(struct folio *folio,
|
||||
@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio,
|
||||
int err;
|
||||
|
||||
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
|
||||
copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
||||
copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||
if (pos + copied > dir->i_size)
|
||||
i_size_write(dir, pos + copied);
|
||||
if (IS_DIRSYNC(dir))
|
||||
|
@ -250,7 +250,7 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int nilfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
@ -259,7 +259,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
|
||||
err = block_write_begin(mapping, pos, len, foliop, nilfs_get_block);
|
||||
if (unlikely(err)) {
|
||||
nilfs_write_failed(mapping, pos + len);
|
||||
nilfs_transaction_abort(inode->i_sb);
|
||||
@ -269,16 +269,16 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int nilfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned int start = pos & (PAGE_SIZE - 1);
|
||||
unsigned int nr_dirty;
|
||||
int err;
|
||||
|
||||
nr_dirty = nilfs_page_count_clean_buffers(page, start,
|
||||
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start,
|
||||
start + copied);
|
||||
copied = generic_write_end(file, mapping, pos, len, copied, page,
|
||||
copied = generic_write_end(file, mapping, pos, len, copied, folio,
|
||||
fsdata);
|
||||
nilfs_set_file_dirty(inode, nr_dirty);
|
||||
err = nilfs_transaction_commit(inode->i_sb);
|
||||
|
@ -498,7 +498,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
||||
struct inode *inode;
|
||||
struct nilfs_recovery_block *rb, *n;
|
||||
unsigned int blocksize = nilfs->ns_blocksize;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
loff_t pos;
|
||||
int err = 0, err2 = 0;
|
||||
|
||||
@ -512,7 +512,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
||||
|
||||
pos = rb->blkoff << inode->i_blkbits;
|
||||
err = block_write_begin(inode->i_mapping, pos, blocksize,
|
||||
&page, nilfs_get_block);
|
||||
&folio, nilfs_get_block);
|
||||
if (unlikely(err)) {
|
||||
loff_t isize = inode->i_size;
|
||||
|
||||
@ -522,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
||||
goto failed_inode;
|
||||
}
|
||||
|
||||
err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
|
||||
err = nilfs_recovery_copy_block(nilfs, rb, pos, &folio->page);
|
||||
if (unlikely(err))
|
||||
goto failed_page;
|
||||
|
||||
@ -531,17 +531,17 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
||||
goto failed_page;
|
||||
|
||||
block_write_end(NULL, inode->i_mapping, pos, blocksize,
|
||||
blocksize, page, NULL);
|
||||
blocksize, folio, NULL);
|
||||
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
(*nr_salvaged_blocks)++;
|
||||
goto next;
|
||||
|
||||
failed_page:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
failed_inode:
|
||||
nilfs_warn(sb,
|
||||
|
@ -182,7 +182,7 @@ static int ntfs_extend_initialized_size(struct file *file,
|
||||
|
||||
for (;;) {
|
||||
u32 zerofrom, len;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
u8 bits;
|
||||
CLST vcn, lcn, clen;
|
||||
|
||||
@ -208,14 +208,13 @@ static int ntfs_extend_initialized_size(struct file *file,
|
||||
if (pos + len > new_valid)
|
||||
len = new_valid - pos;
|
||||
|
||||
err = ntfs_write_begin(file, mapping, pos, len, &page, NULL);
|
||||
err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
zero_user_segment(page, zerofrom, PAGE_SIZE);
|
||||
folio_zero_range(folio, zerofrom, folio_size(folio));
|
||||
|
||||
/* This function in any case puts page. */
|
||||
err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
|
||||
err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
pos += len;
|
||||
|
@ -901,7 +901,7 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
|
||||
}
|
||||
|
||||
int ntfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, u32 len, struct page **pagep, void **fsdata)
|
||||
loff_t pos, u32 len, struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int err;
|
||||
struct inode *inode = mapping->host;
|
||||
@ -910,7 +910,6 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
|
||||
return -EIO;
|
||||
|
||||
*pagep = NULL;
|
||||
if (is_resident(ni)) {
|
||||
struct folio *folio = __filemap_get_folio(
|
||||
mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN,
|
||||
@ -926,7 +925,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
ni_unlock(ni);
|
||||
|
||||
if (!err) {
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
goto out;
|
||||
}
|
||||
folio_unlock(folio);
|
||||
@ -936,7 +935,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = block_write_begin(mapping, pos, len, pagep,
|
||||
err = block_write_begin(mapping, pos, len, foliop,
|
||||
ntfs_get_block_write_begin);
|
||||
|
||||
out:
|
||||
@ -947,9 +946,8 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
* ntfs_write_end - Address_space_operations::write_end.
|
||||
*/
|
||||
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
|
||||
u32 len, u32 copied, struct page *page, void *fsdata)
|
||||
u32 len, u32 copied, struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct inode *inode = mapping->host;
|
||||
struct ntfs_inode *ni = ntfs_i(inode);
|
||||
u64 valid = ni->i_valid;
|
||||
@ -979,7 +977,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
} else {
|
||||
err = generic_write_end(file, mapping, pos, len, copied, page,
|
||||
err = generic_write_end(file, mapping, pos, len, copied, folio,
|
||||
fsdata);
|
||||
}
|
||||
|
||||
@ -1008,45 +1006,6 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
|
||||
return err;
|
||||
}
|
||||
|
||||
int reset_log_file(struct inode *inode)
|
||||
{
|
||||
int err;
|
||||
loff_t pos = 0;
|
||||
u32 log_size = inode->i_size;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
|
||||
for (;;) {
|
||||
u32 len;
|
||||
void *kaddr;
|
||||
struct page *page;
|
||||
|
||||
len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
|
||||
|
||||
err = block_write_begin(mapping, pos, len, &page,
|
||||
ntfs_get_block_write_begin);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
memset(kaddr, -1, len);
|
||||
kunmap_atomic(kaddr);
|
||||
flush_dcache_page(page);
|
||||
|
||||
err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
pos += len;
|
||||
|
||||
if (pos >= log_size)
|
||||
break;
|
||||
balance_dirty_pages_ratelimited(mapping);
|
||||
}
|
||||
out:
|
||||
mark_inode_dirty_sync(inode);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
{
|
||||
return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
|
||||
|
@ -708,13 +708,12 @@ int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
|
||||
struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
|
||||
const struct cpu_str *name);
|
||||
int ntfs_set_size(struct inode *inode, u64 new_size);
|
||||
int reset_log_file(struct inode *inode);
|
||||
int ntfs_get_block(struct inode *inode, sector_t vbn,
|
||||
struct buffer_head *bh_result, int create);
|
||||
int ntfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, u32 len, struct page **pagep, void **fsdata);
|
||||
loff_t pos, u32 len, struct folio **foliop, void **fsdata);
|
||||
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
|
||||
u32 len, u32 copied, struct page *page, void *fsdata);
|
||||
u32 len, u32 copied, struct folio *folio, void *fsdata);
|
||||
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
|
||||
int ntfs_sync_inode(struct inode *inode);
|
||||
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
|
||||
|
@ -1643,7 +1643,7 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
|
||||
|
||||
int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
loff_t pos, unsigned len, ocfs2_write_type_t type,
|
||||
struct page **pagep, void **fsdata,
|
||||
struct folio **foliop, void **fsdata,
|
||||
struct buffer_head *di_bh, struct page *mmap_page)
|
||||
{
|
||||
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
|
||||
@ -1826,8 +1826,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
ocfs2_free_alloc_context(meta_ac);
|
||||
|
||||
success:
|
||||
if (pagep)
|
||||
*pagep = wc->w_target_page;
|
||||
if (foliop)
|
||||
*foliop = page_folio(wc->w_target_page);
|
||||
*fsdata = wc;
|
||||
return 0;
|
||||
out_quota:
|
||||
@ -1879,7 +1879,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
|
||||
static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
struct buffer_head *di_bh = NULL;
|
||||
@ -1901,7 +1901,7 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
|
||||
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
|
||||
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
|
||||
pagep, fsdata, di_bh, NULL);
|
||||
foliop, fsdata, di_bh, NULL);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out_fail;
|
||||
@ -2076,7 +2076,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
|
||||
|
||||
static int ocfs2_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
int ret;
|
||||
struct inode *inode = mapping->host;
|
||||
|
@ -38,7 +38,7 @@ typedef enum {
|
||||
|
||||
int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
loff_t pos, unsigned len, ocfs2_write_type_t type,
|
||||
struct page **pagep, void **fsdata,
|
||||
struct folio **foliop, void **fsdata,
|
||||
struct buffer_head *di_bh, struct page *mmap_page);
|
||||
|
||||
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
|
||||
|
@ -755,7 +755,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
||||
u64 abs_to, struct buffer_head *di_bh)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
unsigned long index = abs_from >> PAGE_SHIFT;
|
||||
handle_t *handle;
|
||||
int ret = 0;
|
||||
@ -774,9 +774,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
||||
goto out;
|
||||
}
|
||||
|
||||
page = find_or_create_page(mapping, index, GFP_NOFS);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
folio = __filemap_get_folio(mapping, index,
|
||||
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
|
||||
if (IS_ERR(folio)) {
|
||||
ret = PTR_ERR(folio);
|
||||
mlog_errno(ret);
|
||||
goto out_commit_trans;
|
||||
}
|
||||
@ -803,7 +804,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
||||
* __block_write_begin and block_commit_write to zero the
|
||||
* whole block.
|
||||
*/
|
||||
ret = __block_write_begin(page, block_start + 1, 0,
|
||||
ret = __block_write_begin(folio, block_start + 1, 0,
|
||||
ocfs2_get_block);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
@ -812,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
||||
|
||||
|
||||
/* must not update i_size! */
|
||||
block_commit_write(page, block_start + 1, block_start + 1);
|
||||
block_commit_write(&folio->page, block_start + 1, block_start + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -833,8 +834,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
out_commit_trans:
|
||||
if (handle)
|
||||
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
|
||||
|
@ -53,7 +53,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
||||
loff_t pos = page_offset(page);
|
||||
unsigned int len = PAGE_SIZE;
|
||||
pgoff_t last_index;
|
||||
struct page *locked_page = NULL;
|
||||
struct folio *locked_folio = NULL;
|
||||
void *fsdata;
|
||||
loff_t size = i_size_read(inode);
|
||||
|
||||
@ -91,7 +91,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
||||
len = ((size - 1) & ~PAGE_MASK) + 1;
|
||||
|
||||
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
|
||||
&locked_page, &fsdata, di_bh, page);
|
||||
&locked_folio, &fsdata, di_bh, page);
|
||||
if (err) {
|
||||
if (err != -ENOSPC)
|
||||
mlog_errno(err);
|
||||
@ -99,7 +99,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!locked_page) {
|
||||
if (!locked_folio) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -312,11 +312,11 @@ static void omfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int omfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, omfs_get_block);
|
||||
if (unlikely(ret))
|
||||
omfs_write_failed(mapping, pos + len);
|
||||
|
||||
|
@ -309,22 +309,18 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
static int orangefs_write_begin(struct file *file,
|
||||
struct address_space *mapping, loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct orangefs_write_range *wr;
|
||||
struct folio *folio;
|
||||
struct page *page;
|
||||
pgoff_t index;
|
||||
int ret;
|
||||
|
||||
index = pos >> PAGE_SHIFT;
|
||||
folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
page = grab_cache_page_write_begin(mapping, index);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
*pagep = page;
|
||||
folio = page_folio(page);
|
||||
*foliop = folio;
|
||||
|
||||
if (folio_test_dirty(folio) && !folio_test_private(folio)) {
|
||||
/*
|
||||
@ -365,9 +361,10 @@ static int orangefs_write_begin(struct file *file,
|
||||
}
|
||||
|
||||
static int orangefs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
|
||||
loff_t pos, unsigned len, unsigned copied, struct folio *folio,
|
||||
void *fsdata)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
loff_t last_pos = pos + copied;
|
||||
|
||||
/*
|
||||
@ -377,23 +374,23 @@ static int orangefs_write_end(struct file *file, struct address_space *mapping,
|
||||
if (last_pos > inode->i_size)
|
||||
i_size_write(inode, last_pos);
|
||||
|
||||
/* zero the stale part of the page if we did a short copy */
|
||||
if (!PageUptodate(page)) {
|
||||
/* zero the stale part of the folio if we did a short copy */
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
unsigned from = pos & (PAGE_SIZE - 1);
|
||||
if (copied < len) {
|
||||
zero_user(page, from + copied, len - copied);
|
||||
folio_zero_range(folio, from + copied, len - copied);
|
||||
}
|
||||
/* Set fully written pages uptodate. */
|
||||
if (pos == page_offset(page) &&
|
||||
if (pos == folio_pos(folio) &&
|
||||
(len == PAGE_SIZE || pos + len == inode->i_size)) {
|
||||
zero_user_segment(page, from + copied, PAGE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
folio_zero_segment(folio, from + copied, PAGE_SIZE);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
}
|
||||
|
||||
set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_mark_dirty(folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
mark_inode_dirty_sync(file_inode(file));
|
||||
return copied;
|
||||
|
@ -24,13 +24,15 @@ static unsigned qnx6_lfile_checksum(char *name, unsigned size)
|
||||
return crc;
|
||||
}
|
||||
|
||||
static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
|
||||
static void *qnx6_get_folio(struct inode *dir, unsigned long n,
|
||||
struct folio **foliop)
|
||||
{
|
||||
struct address_space *mapping = dir->i_mapping;
|
||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||
if (!IS_ERR(page))
|
||||
kmap(page);
|
||||
return page;
|
||||
struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
*foliop = folio;
|
||||
return kmap_local_folio(folio, 0);
|
||||
}
|
||||
|
||||
static unsigned last_entry(struct inode *inode, unsigned long page_nr)
|
||||
@ -44,19 +46,20 @@ static unsigned last_entry(struct inode *inode, unsigned long page_nr)
|
||||
|
||||
static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
|
||||
struct qnx6_long_dir_entry *de,
|
||||
struct page **p)
|
||||
struct folio **foliop)
|
||||
{
|
||||
struct qnx6_sb_info *sbi = QNX6_SB(sb);
|
||||
u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
|
||||
u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
|
||||
/* within page */
|
||||
u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
|
||||
u32 offs;
|
||||
struct address_space *mapping = sbi->longfile->i_mapping;
|
||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||
if (IS_ERR(page))
|
||||
return ERR_CAST(page);
|
||||
kmap(*p = page);
|
||||
return (struct qnx6_long_filename *)(page_address(page) + offs);
|
||||
struct folio *folio = read_mapping_folio(mapping, n, NULL);
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return ERR_CAST(folio);
|
||||
offs = offset_in_folio(folio, s << sb->s_blocksize_bits);
|
||||
*foliop = folio;
|
||||
return kmap_local_folio(folio, offs);
|
||||
}
|
||||
|
||||
static int qnx6_dir_longfilename(struct inode *inode,
|
||||
@ -67,7 +70,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
|
||||
struct qnx6_long_filename *lf;
|
||||
struct super_block *s = inode->i_sb;
|
||||
struct qnx6_sb_info *sbi = QNX6_SB(s);
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int lf_size;
|
||||
|
||||
if (de->de_size != 0xff) {
|
||||
@ -76,7 +79,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
|
||||
pr_err("invalid direntry size (%i).\n", de->de_size);
|
||||
return 0;
|
||||
}
|
||||
lf = qnx6_longname(s, de, &page);
|
||||
lf = qnx6_longname(s, de, &folio);
|
||||
if (IS_ERR(lf)) {
|
||||
pr_err("Error reading longname\n");
|
||||
return 0;
|
||||
@ -87,7 +90,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
|
||||
if (lf_size > QNX6_LONG_NAME_MAX) {
|
||||
pr_debug("file %s\n", lf->lf_fname);
|
||||
pr_err("Filename too long (%i)\n", lf_size);
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, lf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -100,11 +103,11 @@ static int qnx6_dir_longfilename(struct inode *inode,
|
||||
pr_debug("qnx6_readdir:%.*s inode:%u\n",
|
||||
lf_size, lf->lf_fname, de_inode);
|
||||
if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) {
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, lf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, lf);
|
||||
/* success */
|
||||
return 1;
|
||||
}
|
||||
@ -117,26 +120,27 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
|
||||
loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
|
||||
unsigned long npages = dir_pages(inode);
|
||||
unsigned long n = pos >> PAGE_SHIFT;
|
||||
unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
|
||||
unsigned offset = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
|
||||
bool done = false;
|
||||
|
||||
ctx->pos = pos;
|
||||
if (ctx->pos >= inode->i_size)
|
||||
return 0;
|
||||
|
||||
for ( ; !done && n < npages; n++, start = 0) {
|
||||
struct page *page = qnx6_get_page(inode, n);
|
||||
int limit = last_entry(inode, n);
|
||||
for ( ; !done && n < npages; n++, offset = 0) {
|
||||
struct qnx6_dir_entry *de;
|
||||
int i = start;
|
||||
struct folio *folio;
|
||||
char *kaddr = qnx6_get_folio(inode, n, &folio);
|
||||
char *limit;
|
||||
|
||||
if (IS_ERR(page)) {
|
||||
if (IS_ERR(kaddr)) {
|
||||
pr_err("%s(): read failed\n", __func__);
|
||||
ctx->pos = (n + 1) << PAGE_SHIFT;
|
||||
return PTR_ERR(page);
|
||||
return PTR_ERR(kaddr);
|
||||
}
|
||||
de = ((struct qnx6_dir_entry *)page_address(page)) + start;
|
||||
for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
|
||||
de = (struct qnx6_dir_entry *)(kaddr + offset);
|
||||
limit = kaddr + last_entry(inode, n);
|
||||
for (; (char *)de < limit; de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
|
||||
int size = de->de_size;
|
||||
u32 no_inode = fs32_to_cpu(sbi, de->de_inode);
|
||||
|
||||
@ -164,7 +168,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -177,23 +181,23 @@ static unsigned qnx6_long_match(int len, const char *name,
|
||||
{
|
||||
struct super_block *s = dir->i_sb;
|
||||
struct qnx6_sb_info *sbi = QNX6_SB(s);
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int thislen;
|
||||
struct qnx6_long_filename *lf = qnx6_longname(s, de, &page);
|
||||
struct qnx6_long_filename *lf = qnx6_longname(s, de, &folio);
|
||||
|
||||
if (IS_ERR(lf))
|
||||
return 0;
|
||||
|
||||
thislen = fs16_to_cpu(sbi, lf->lf_size);
|
||||
if (len != thislen) {
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, lf);
|
||||
return 0;
|
||||
}
|
||||
if (memcmp(name, lf->lf_fname, len) == 0) {
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, lf);
|
||||
return fs32_to_cpu(sbi, de->de_inode);
|
||||
}
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, lf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -210,20 +214,17 @@ static unsigned qnx6_match(struct super_block *s, int len, const char *name,
|
||||
}
|
||||
|
||||
|
||||
unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
|
||||
struct page **res_page)
|
||||
unsigned qnx6_find_ino(int len, struct inode *dir, const char *name)
|
||||
{
|
||||
struct super_block *s = dir->i_sb;
|
||||
struct qnx6_inode_info *ei = QNX6_I(dir);
|
||||
struct page *page = NULL;
|
||||
struct folio *folio;
|
||||
unsigned long start, n;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
unsigned ino;
|
||||
struct qnx6_dir_entry *de;
|
||||
struct qnx6_long_dir_entry *lde;
|
||||
|
||||
*res_page = NULL;
|
||||
|
||||
if (npages == 0)
|
||||
return 0;
|
||||
start = ei->i_dir_start_lookup;
|
||||
@ -232,12 +233,11 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
|
||||
n = start;
|
||||
|
||||
do {
|
||||
page = qnx6_get_page(dir, n);
|
||||
if (!IS_ERR(page)) {
|
||||
de = qnx6_get_folio(dir, n, &folio);
|
||||
if (!IS_ERR(de)) {
|
||||
int limit = last_entry(dir, n);
|
||||
int i;
|
||||
|
||||
de = (struct qnx6_dir_entry *)page_address(page);
|
||||
for (i = 0; i < limit; i++, de++) {
|
||||
if (len <= QNX6_SHORT_NAME_MAX) {
|
||||
/* short filename */
|
||||
@ -256,7 +256,7 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
|
||||
} else
|
||||
pr_err("undefined filename size in inode.\n");
|
||||
}
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, de - i);
|
||||
}
|
||||
|
||||
if (++n >= npages)
|
||||
@ -265,8 +265,8 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
|
||||
return 0;
|
||||
|
||||
found:
|
||||
*res_page = page;
|
||||
ei->i_dir_start_lookup = n;
|
||||
folio_release_kmap(folio, de);
|
||||
return ino;
|
||||
}
|
||||
|
||||
|
@ -184,17 +184,17 @@ static const char *qnx6_checkroot(struct super_block *s)
|
||||
struct qnx6_dir_entry *dir_entry;
|
||||
struct inode *root = d_inode(s->s_root);
|
||||
struct address_space *mapping = root->i_mapping;
|
||||
struct page *page = read_mapping_page(mapping, 0, NULL);
|
||||
if (IS_ERR(page))
|
||||
struct folio *folio = read_mapping_folio(mapping, 0, NULL);
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return "error reading root directory";
|
||||
kmap(page);
|
||||
dir_entry = page_address(page);
|
||||
dir_entry = kmap_local_folio(folio, 0);
|
||||
for (i = 0; i < 2; i++) {
|
||||
/* maximum 3 bytes - due to match_root limitation */
|
||||
if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
|
||||
error = 1;
|
||||
}
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, dir_entry);
|
||||
if (error)
|
||||
return "error reading root directory.";
|
||||
return NULL;
|
||||
@ -518,7 +518,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
|
||||
struct inode *inode;
|
||||
struct qnx6_inode_info *ei;
|
||||
struct address_space *mapping;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
u32 n, offs;
|
||||
|
||||
inode = iget_locked(sb, ino);
|
||||
@ -538,17 +538,16 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
|
||||
offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
|
||||
mapping = sbi->inodes->i_mapping;
|
||||
page = read_mapping_page(mapping, n, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
folio = read_mapping_folio(mapping, n, NULL);
|
||||
if (IS_ERR(folio)) {
|
||||
pr_err("major problem: unable to read inode from dev %s\n",
|
||||
sb->s_id);
|
||||
iget_failed(inode);
|
||||
return ERR_CAST(page);
|
||||
return ERR_CAST(folio);
|
||||
}
|
||||
kmap(page);
|
||||
raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs;
|
||||
offs = offset_in_folio(folio, (ino - 1) << QNX6_INODE_SIZE_BITS);
|
||||
raw_inode = kmap_local_folio(folio, offs);
|
||||
|
||||
inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode);
|
||||
i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
|
||||
@ -578,7 +577,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
|
||||
inode->i_mapping->a_ops = &qnx6_aops;
|
||||
} else
|
||||
init_special_inode(inode, inode->i_mode, 0);
|
||||
qnx6_put_page(page);
|
||||
folio_release_kmap(folio, raw_inode);
|
||||
unlock_new_inode(inode);
|
||||
return inode;
|
||||
}
|
||||
|
@ -17,7 +17,6 @@ struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
unsigned ino;
|
||||
struct page *page;
|
||||
struct inode *foundinode = NULL;
|
||||
const char *name = dentry->d_name.name;
|
||||
int len = dentry->d_name.len;
|
||||
@ -25,10 +24,9 @@ struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
|
||||
if (len > QNX6_LONG_NAME_MAX)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
ino = qnx6_find_entry(len, dir, name, &page);
|
||||
ino = qnx6_find_ino(len, dir, name);
|
||||
if (ino) {
|
||||
foundinode = qnx6_iget(dir->i_sb, ino);
|
||||
qnx6_put_page(page);
|
||||
if (IS_ERR(foundinode))
|
||||
pr_debug("lookup->iget -> error %ld\n",
|
||||
PTR_ERR(foundinode));
|
||||
|
@ -126,11 +126,4 @@ static inline __fs16 cpu_to_fs16(struct qnx6_sb_info *sbi, __u16 n)
|
||||
extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
|
||||
int silent);
|
||||
|
||||
static inline void qnx6_put_page(struct page *page)
|
||||
{
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
|
||||
struct page **res_page);
|
||||
unsigned qnx6_find_ino(int len, struct inode *dir, const char *name);
|
||||
|
@ -2178,7 +2178,7 @@ static int grab_tail_page(struct inode *inode,
|
||||
unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
|
||||
struct buffer_head *bh;
|
||||
struct buffer_head *head;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int error;
|
||||
|
||||
/*
|
||||
@ -2190,20 +2190,20 @@ static int grab_tail_page(struct inode *inode,
|
||||
if ((offset & (blocksize - 1)) == 0) {
|
||||
return -ENOENT;
|
||||
}
|
||||
page = grab_cache_page(inode->i_mapping, index);
|
||||
error = -ENOMEM;
|
||||
if (!page) {
|
||||
goto out;
|
||||
}
|
||||
folio = __filemap_get_folio(inode->i_mapping, index,
|
||||
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
|
||||
mapping_gfp_mask(inode->i_mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
/* start within the page of the last block in the file */
|
||||
start = (offset / blocksize) * blocksize;
|
||||
|
||||
error = __block_write_begin(page, start, offset - start,
|
||||
error = __block_write_begin(folio, start, offset - start,
|
||||
reiserfs_get_block_create_0);
|
||||
if (error)
|
||||
goto unlock;
|
||||
|
||||
head = page_buffers(page);
|
||||
head = folio_buffers(folio);
|
||||
bh = head;
|
||||
do {
|
||||
if (pos >= start) {
|
||||
@ -2226,14 +2226,13 @@ static int grab_tail_page(struct inode *inode,
|
||||
goto unlock;
|
||||
}
|
||||
*bh_result = bh;
|
||||
*page_result = page;
|
||||
*page_result = &folio->page;
|
||||
|
||||
out:
|
||||
return error;
|
||||
|
||||
unlock:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -2736,23 +2735,24 @@ static void reiserfs_truncate_failed_write(struct inode *inode)
|
||||
static int reiserfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
pgoff_t index;
|
||||
int ret;
|
||||
int old_ref = 0;
|
||||
|
||||
inode = mapping->host;
|
||||
index = pos >> PAGE_SHIFT;
|
||||
page = grab_cache_page_write_begin(mapping, index);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
*pagep = page;
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
*foliop = folio;
|
||||
|
||||
reiserfs_wait_on_write_block(inode->i_sb);
|
||||
fix_tail_page_for_writing(page);
|
||||
fix_tail_page_for_writing(&folio->page);
|
||||
if (reiserfs_transaction_running(inode->i_sb)) {
|
||||
struct reiserfs_transaction_handle *th;
|
||||
th = (struct reiserfs_transaction_handle *)current->
|
||||
@ -2762,7 +2762,7 @@ static int reiserfs_write_begin(struct file *file,
|
||||
old_ref = th->t_refcount;
|
||||
th->t_refcount++;
|
||||
}
|
||||
ret = __block_write_begin(page, pos, len, reiserfs_get_block);
|
||||
ret = __block_write_begin(folio, pos, len, reiserfs_get_block);
|
||||
if (ret && reiserfs_transaction_running(inode->i_sb)) {
|
||||
struct reiserfs_transaction_handle *th = current->journal_info;
|
||||
/*
|
||||
@ -2792,8 +2792,8 @@ static int reiserfs_write_begin(struct file *file,
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
/* Truncate allocated blocks */
|
||||
reiserfs_truncate_failed_write(inode);
|
||||
}
|
||||
@ -2822,7 +2822,7 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
|
||||
th->t_refcount++;
|
||||
}
|
||||
|
||||
ret = __block_write_begin(page, from, len, reiserfs_get_block);
|
||||
ret = __block_write_begin(page_folio(page), from, len, reiserfs_get_block);
|
||||
if (ret && reiserfs_transaction_running(inode->i_sb)) {
|
||||
struct reiserfs_transaction_handle *th = current->journal_info;
|
||||
/*
|
||||
@ -2862,10 +2862,9 @@ static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
|
||||
|
||||
static int reiserfs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
int ret = 0;
|
||||
int update_sd = 0;
|
||||
struct reiserfs_transaction_handle *th;
|
||||
@ -2887,7 +2886,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
flush_dcache_folio(folio);
|
||||
|
||||
reiserfs_commit_page(inode, page, start, start + copied);
|
||||
reiserfs_commit_page(inode, &folio->page, start, start + copied);
|
||||
|
||||
/*
|
||||
* generic_commit_write does this for us, but does not update the
|
||||
@ -2942,8 +2941,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
|
||||
out:
|
||||
if (locked)
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
if (pos + len > inode->i_size)
|
||||
reiserfs_truncate_failed_write(inode);
|
||||
|
@ -494,39 +494,73 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
|
||||
}
|
||||
|
||||
static int squashfs_readahead_fragment(struct page **page,
|
||||
unsigned int pages, unsigned int expected)
|
||||
unsigned int pages, unsigned int expected, loff_t start)
|
||||
{
|
||||
struct inode *inode = page[0]->mapping->host;
|
||||
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
|
||||
squashfs_i(inode)->fragment_block,
|
||||
squashfs_i(inode)->fragment_size);
|
||||
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
|
||||
unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
|
||||
int error = buffer->error;
|
||||
int i, bytes, copied;
|
||||
struct squashfs_page_actor *actor;
|
||||
unsigned int offset;
|
||||
void *addr;
|
||||
struct page *last_page;
|
||||
|
||||
if (error)
|
||||
if (buffer->error)
|
||||
goto out;
|
||||
|
||||
expected += squashfs_i(inode)->fragment_offset;
|
||||
actor = squashfs_page_actor_init_special(msblk, page, pages,
|
||||
expected, start);
|
||||
if (!actor)
|
||||
goto out;
|
||||
|
||||
for (n = 0; n < pages; n++) {
|
||||
unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
|
||||
unsigned int offset = base + squashfs_i(inode)->fragment_offset;
|
||||
squashfs_actor_nobuff(actor);
|
||||
addr = squashfs_first_page(actor);
|
||||
|
||||
if (expected > offset) {
|
||||
unsigned int avail = min_t(unsigned int, expected -
|
||||
offset, PAGE_SIZE);
|
||||
for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) {
|
||||
int avail = min_t(int, expected - offset, PAGE_SIZE);
|
||||
|
||||
squashfs_fill_page(page[n], buffer, offset, avail);
|
||||
if (!IS_ERR(addr)) {
|
||||
bytes = squashfs_copy_data(addr, buffer, offset +
|
||||
squashfs_i(inode)->fragment_offset, avail);
|
||||
|
||||
if (bytes != avail)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
unlock_page(page[n]);
|
||||
put_page(page[n]);
|
||||
copied += avail;
|
||||
addr = squashfs_next_page(actor);
|
||||
}
|
||||
|
||||
last_page = squashfs_page_actor_free(actor);
|
||||
|
||||
if (copied == expected && !IS_ERR(last_page)) {
|
||||
/* Last page (if present) may have trailing bytes not filled */
|
||||
bytes = copied % PAGE_SIZE;
|
||||
if (bytes && last_page)
|
||||
memzero_page(last_page, bytes, PAGE_SIZE - bytes);
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
flush_dcache_page(page[i]);
|
||||
SetPageUptodate(page[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
unlock_page(page[i]);
|
||||
put_page(page[i]);
|
||||
}
|
||||
|
||||
squashfs_cache_put(buffer);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
squashfs_page_actor_free(actor);
|
||||
|
||||
out:
|
||||
squashfs_cache_put(buffer);
|
||||
return error;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void squashfs_readahead(struct readahead_control *ractl)
|
||||
@ -551,7 +585,6 @@ static void squashfs_readahead(struct readahead_control *ractl)
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
pgoff_t index;
|
||||
int res, bsize;
|
||||
u64 block = 0;
|
||||
unsigned int expected;
|
||||
@ -570,26 +603,21 @@ static void squashfs_readahead(struct readahead_control *ractl)
|
||||
if (readahead_pos(ractl) >= i_size_read(inode))
|
||||
goto skip_pages;
|
||||
|
||||
index = pages[0]->index >> shift;
|
||||
|
||||
if ((pages[nr_pages - 1]->index >> shift) != index)
|
||||
goto skip_pages;
|
||||
|
||||
if (index == file_end && squashfs_i(inode)->fragment_block !=
|
||||
SQUASHFS_INVALID_BLK) {
|
||||
if (start >> msblk->block_log == file_end &&
|
||||
squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) {
|
||||
res = squashfs_readahead_fragment(pages, nr_pages,
|
||||
expected);
|
||||
expected, start);
|
||||
if (res)
|
||||
goto skip_pages;
|
||||
continue;
|
||||
}
|
||||
|
||||
bsize = read_blocklist(inode, index, &block);
|
||||
bsize = read_blocklist(inode, start >> msblk->block_log, &block);
|
||||
if (bsize == 0)
|
||||
goto skip_pages;
|
||||
|
||||
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
|
||||
expected);
|
||||
expected, start);
|
||||
if (!actor)
|
||||
goto skip_pages;
|
||||
|
||||
@ -597,12 +625,12 @@ static void squashfs_readahead(struct readahead_control *ractl)
|
||||
|
||||
last_page = squashfs_page_actor_free(actor);
|
||||
|
||||
if (res == expected) {
|
||||
if (res == expected && !IS_ERR(last_page)) {
|
||||
int bytes;
|
||||
|
||||
/* Last page (if present) may have trailing bytes not filled */
|
||||
bytes = res % PAGE_SIZE;
|
||||
if (index == file_end && bytes && last_page)
|
||||
if (start >> msblk->block_log == file_end && bytes && last_page)
|
||||
memzero_page(last_page, bytes,
|
||||
PAGE_SIZE - bytes);
|
||||
|
||||
@ -616,6 +644,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
|
||||
unlock_page(pages[i]);
|
||||
put_page(pages[i]);
|
||||
}
|
||||
|
||||
start += readahead_batch_length(ractl);
|
||||
}
|
||||
|
||||
kfree(pages);
|
||||
|
@ -23,15 +23,15 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
|
||||
int expected)
|
||||
|
||||
{
|
||||
struct folio *folio = page_folio(target_page);
|
||||
struct inode *inode = target_page->mapping->host;
|
||||
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
|
||||
|
||||
loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
|
||||
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
|
||||
loff_t start_index = target_page->index & ~mask;
|
||||
loff_t start_index = folio->index & ~mask;
|
||||
loff_t end_index = start_index | mask;
|
||||
int i, n, pages, bytes, res = -ENOMEM;
|
||||
struct page **page;
|
||||
struct page **page, *last_page;
|
||||
struct squashfs_page_actor *actor;
|
||||
void *pageaddr;
|
||||
|
||||
@ -46,7 +46,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
|
||||
|
||||
/* Try to grab all the pages covered by the Squashfs block */
|
||||
for (i = 0, n = start_index; n <= end_index; n++) {
|
||||
page[i] = (n == target_page->index) ? target_page :
|
||||
page[i] = (n == folio->index) ? target_page :
|
||||
grab_cache_page_nowait(target_page->mapping, n);
|
||||
|
||||
if (page[i] == NULL)
|
||||
@ -67,27 +67,28 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
|
||||
* Create a "page actor" which will kmap and kunmap the
|
||||
* page cache pages appropriately within the decompressor
|
||||
*/
|
||||
actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
|
||||
actor = squashfs_page_actor_init_special(msblk, page, pages, expected,
|
||||
start_index << PAGE_SHIFT);
|
||||
if (actor == NULL)
|
||||
goto out;
|
||||
|
||||
/* Decompress directly into the page cache buffers */
|
||||
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
|
||||
|
||||
squashfs_page_actor_free(actor);
|
||||
last_page = squashfs_page_actor_free(actor);
|
||||
|
||||
if (res < 0)
|
||||
goto mark_errored;
|
||||
|
||||
if (res != expected) {
|
||||
if (res != expected || IS_ERR(last_page)) {
|
||||
res = -EIO;
|
||||
goto mark_errored;
|
||||
}
|
||||
|
||||
/* Last page (if present) may have trailing bytes not filled */
|
||||
bytes = res % PAGE_SIZE;
|
||||
if (page[pages - 1]->index == end_index && bytes) {
|
||||
pageaddr = kmap_local_page(page[pages - 1]);
|
||||
if (end_index == file_end && last_page && bytes) {
|
||||
pageaddr = kmap_local_page(last_page);
|
||||
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
|
||||
kunmap_local(pageaddr);
|
||||
}
|
||||
|
@ -60,6 +60,11 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
|
||||
}
|
||||
|
||||
/* Implementation of page_actor for decompressing directly into page cache. */
|
||||
static loff_t page_next_index(struct squashfs_page_actor *actor)
|
||||
{
|
||||
return page_folio(actor->page[actor->next_page])->index;
|
||||
}
|
||||
|
||||
static void *handle_next_page(struct squashfs_page_actor *actor)
|
||||
{
|
||||
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
@ -68,7 +73,7 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
|
||||
return NULL;
|
||||
|
||||
if ((actor->next_page == actor->pages) ||
|
||||
(actor->next_index != actor->page[actor->next_page]->index)) {
|
||||
(actor->next_index != page_next_index(actor))) {
|
||||
actor->next_index++;
|
||||
actor->returned_pages++;
|
||||
actor->last_page = NULL;
|
||||
@ -103,7 +108,7 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
|
||||
}
|
||||
|
||||
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
|
||||
struct page **page, int pages, int length)
|
||||
struct page **page, int pages, int length, loff_t start_index)
|
||||
{
|
||||
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
|
||||
|
||||
@ -125,7 +130,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
|
||||
actor->pages = pages;
|
||||
actor->next_page = 0;
|
||||
actor->returned_pages = 0;
|
||||
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
|
||||
actor->next_index = start_index >> PAGE_SHIFT;
|
||||
actor->pageaddr = NULL;
|
||||
actor->last_page = NULL;
|
||||
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
|
||||
|
@ -29,13 +29,15 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
|
||||
int pages, int length);
|
||||
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
|
||||
struct squashfs_sb_info *msblk,
|
||||
struct page **page, int pages, int length);
|
||||
struct page **page, int pages, int length,
|
||||
loff_t start_index);
|
||||
static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
|
||||
{
|
||||
struct page *last_page = actor->last_page;
|
||||
struct page *last_page = actor->next_page == actor->pages ? actor->last_page : ERR_PTR(-EIO);
|
||||
|
||||
kfree(actor->tmp_buffer);
|
||||
kfree(actor);
|
||||
|
||||
return last_page;
|
||||
}
|
||||
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
|
||||
|
156
fs/sysv/dir.c
156
fs/sysv/dir.c
@ -28,17 +28,17 @@ const struct file_operations sysv_dir_operations = {
|
||||
.fsync = generic_file_fsync,
|
||||
};
|
||||
|
||||
static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct inode *dir = mapping->host;
|
||||
|
||||
block_write_end(NULL, mapping, pos, len, len, page, NULL);
|
||||
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||
if (pos+len > dir->i_size) {
|
||||
i_size_write(dir, pos+len);
|
||||
mark_inode_dirty(dir);
|
||||
}
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
static int sysv_handle_dirsync(struct inode *dir)
|
||||
@ -52,20 +52,21 @@ static int sysv_handle_dirsync(struct inode *dir)
|
||||
}
|
||||
|
||||
/*
|
||||
* Calls to dir_get_page()/unmap_and_put_page() must be nested according to the
|
||||
* Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
|
||||
* rules documented in mm/highmem.rst.
|
||||
*
|
||||
* NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_page()
|
||||
* NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_folio()
|
||||
* and must be treated accordingly for nesting purposes.
|
||||
*/
|
||||
static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p)
|
||||
static void *dir_get_folio(struct inode *dir, unsigned long n,
|
||||
struct folio **foliop)
|
||||
{
|
||||
struct address_space *mapping = dir->i_mapping;
|
||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||
if (IS_ERR(page))
|
||||
return ERR_CAST(page);
|
||||
*p = page;
|
||||
return kmap_local_page(page);
|
||||
struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return ERR_CAST(folio);
|
||||
*foliop = folio;
|
||||
return kmap_local_folio(folio, 0);
|
||||
}
|
||||
|
||||
static int sysv_readdir(struct file *file, struct dir_context *ctx)
|
||||
@ -87,9 +88,9 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
|
||||
for ( ; n < npages; n++, offset = 0) {
|
||||
char *kaddr, *limit;
|
||||
struct sysv_dir_entry *de;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
kaddr = dir_get_page(inode, n, &page);
|
||||
kaddr = dir_get_folio(inode, n, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
de = (struct sysv_dir_entry *)(kaddr+offset);
|
||||
@ -103,11 +104,11 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
|
||||
if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
|
||||
fs16_to_cpu(SYSV_SB(sb), de->inode),
|
||||
DT_UNKNOWN)) {
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -126,39 +127,35 @@ static inline int namecompare(int len, int maxlen,
|
||||
/*
|
||||
* sysv_find_entry()
|
||||
*
|
||||
* finds an entry in the specified directory with the wanted name. It
|
||||
* returns the cache buffer in which the entry was found, and the entry
|
||||
* itself (as a parameter - res_dir). It does NOT read the inode of the
|
||||
* finds an entry in the specified directory with the wanted name.
|
||||
* It does NOT read the inode of the
|
||||
* entry - you'll have to do that yourself if you want to.
|
||||
*
|
||||
* On Success unmap_and_put_page() should be called on *res_page.
|
||||
* On Success folio_release_kmap() should be called on *foliop.
|
||||
*
|
||||
* sysv_find_entry() acts as a call to dir_get_page() and must be treated
|
||||
* sysv_find_entry() acts as a call to dir_get_folio() and must be treated
|
||||
* accordingly for nesting purposes.
|
||||
*/
|
||||
struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page)
|
||||
struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct folio **foliop)
|
||||
{
|
||||
const char * name = dentry->d_name.name;
|
||||
int namelen = dentry->d_name.len;
|
||||
struct inode * dir = d_inode(dentry->d_parent);
|
||||
unsigned long start, n;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
struct page *page = NULL;
|
||||
struct sysv_dir_entry *de;
|
||||
|
||||
*res_page = NULL;
|
||||
|
||||
start = SYSV_I(dir)->i_dir_start_lookup;
|
||||
if (start >= npages)
|
||||
start = 0;
|
||||
n = start;
|
||||
|
||||
do {
|
||||
char *kaddr = dir_get_page(dir, n, &page);
|
||||
char *kaddr = dir_get_folio(dir, n, foliop);
|
||||
|
||||
if (!IS_ERR(kaddr)) {
|
||||
de = (struct sysv_dir_entry *)kaddr;
|
||||
kaddr += PAGE_SIZE - SYSV_DIRSIZE;
|
||||
kaddr += folio_size(*foliop) - SYSV_DIRSIZE;
|
||||
for ( ; (char *) de <= kaddr ; de++) {
|
||||
if (!de->inode)
|
||||
continue;
|
||||
@ -166,7 +163,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
|
||||
name, de->name))
|
||||
goto found;
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(*foliop, kaddr);
|
||||
}
|
||||
|
||||
if (++n >= npages)
|
||||
@ -177,7 +174,6 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
|
||||
|
||||
found:
|
||||
SYSV_I(dir)->i_dir_start_lookup = n;
|
||||
*res_page = page;
|
||||
return de;
|
||||
}
|
||||
|
||||
@ -186,7 +182,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
|
||||
struct inode *dir = d_inode(dentry->d_parent);
|
||||
const char * name = dentry->d_name.name;
|
||||
int namelen = dentry->d_name.len;
|
||||
struct page *page = NULL;
|
||||
struct folio *folio = NULL;
|
||||
struct sysv_dir_entry * de;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
unsigned long n;
|
||||
@ -196,7 +192,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
|
||||
|
||||
/* We take care of directory expansion in the same loop */
|
||||
for (n = 0; n <= npages; n++) {
|
||||
kaddr = dir_get_page(dir, n, &page);
|
||||
kaddr = dir_get_folio(dir, n, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
return PTR_ERR(kaddr);
|
||||
de = (struct sysv_dir_entry *)kaddr;
|
||||
@ -206,49 +202,49 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
|
||||
goto got_it;
|
||||
err = -EEXIST;
|
||||
if (namecompare(namelen, SYSV_NAMELEN, name, de->name))
|
||||
goto out_page;
|
||||
goto out_folio;
|
||||
de++;
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
|
||||
got_it:
|
||||
pos = page_offset(page) + offset_in_page(de);
|
||||
lock_page(page);
|
||||
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
|
||||
pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
folio_lock(folio);
|
||||
err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
memcpy (de->name, name, namelen);
|
||||
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
|
||||
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
|
||||
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
|
||||
dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
mark_inode_dirty(dir);
|
||||
err = sysv_handle_dirsync(dir);
|
||||
out_page:
|
||||
unmap_and_put_page(page, kaddr);
|
||||
out_folio:
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return err;
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
goto out_page;
|
||||
folio_unlock(folio);
|
||||
goto out_folio;
|
||||
}
|
||||
|
||||
int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
|
||||
int sysv_delete_entry(struct sysv_dir_entry *de, struct folio *folio)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
loff_t pos = page_offset(page) + offset_in_page(de);
|
||||
struct inode *inode = folio->mapping->host;
|
||||
loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
int err;
|
||||
|
||||
lock_page(page);
|
||||
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
|
||||
folio_lock(folio);
|
||||
err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
return err;
|
||||
}
|
||||
de->inode = 0;
|
||||
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
|
||||
dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
|
||||
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
||||
mark_inode_dirty(inode);
|
||||
return sysv_handle_dirsync(inode);
|
||||
@ -256,33 +252,33 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
|
||||
|
||||
int sysv_make_empty(struct inode *inode, struct inode *dir)
|
||||
{
|
||||
struct page *page = grab_cache_page(inode->i_mapping, 0);
|
||||
struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
|
||||
struct sysv_dir_entry * de;
|
||||
char *base;
|
||||
char *kaddr;
|
||||
int err;
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
err = sysv_prepare_chunk(folio, 0, 2 * SYSV_DIRSIZE);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
goto fail;
|
||||
}
|
||||
base = kmap_local_page(page);
|
||||
memset(base, 0, PAGE_SIZE);
|
||||
kaddr = kmap_local_folio(folio, 0);
|
||||
memset(kaddr, 0, folio_size(folio));
|
||||
|
||||
de = (struct sysv_dir_entry *) base;
|
||||
de = (struct sysv_dir_entry *)kaddr;
|
||||
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
|
||||
strcpy(de->name,".");
|
||||
de++;
|
||||
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino);
|
||||
strcpy(de->name,"..");
|
||||
|
||||
kunmap_local(base);
|
||||
dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
|
||||
kunmap_local(kaddr);
|
||||
dir_commit_chunk(folio, 0, 2 * SYSV_DIRSIZE);
|
||||
err = sysv_handle_dirsync(inode);
|
||||
fail:
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -292,19 +288,19 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
|
||||
int sysv_empty_dir(struct inode * inode)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct page *page = NULL;
|
||||
struct folio *folio = NULL;
|
||||
unsigned long i, npages = dir_pages(inode);
|
||||
char *kaddr;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct sysv_dir_entry *de;
|
||||
|
||||
kaddr = dir_get_page(inode, i, &page);
|
||||
kaddr = dir_get_folio(inode, i, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
|
||||
de = (struct sysv_dir_entry *)kaddr;
|
||||
kaddr += PAGE_SIZE-SYSV_DIRSIZE;
|
||||
kaddr += folio_size(folio) - SYSV_DIRSIZE;
|
||||
|
||||
for ( ;(char *)de <= kaddr; de++) {
|
||||
if (!de->inode)
|
||||
@ -321,46 +317,46 @@ int sysv_empty_dir(struct inode * inode)
|
||||
if (de->name[1] != '.' || de->name[2])
|
||||
goto not_empty;
|
||||
}
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 1;
|
||||
|
||||
not_empty:
|
||||
unmap_and_put_page(page, kaddr);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Releases the page */
|
||||
int sysv_set_link(struct sysv_dir_entry *de, struct page *page,
|
||||
int sysv_set_link(struct sysv_dir_entry *de, struct folio *folio,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct inode *dir = page->mapping->host;
|
||||
loff_t pos = page_offset(page) + offset_in_page(de);
|
||||
struct inode *dir = folio->mapping->host;
|
||||
loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
int err;
|
||||
|
||||
lock_page(page);
|
||||
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
|
||||
folio_lock(folio);
|
||||
err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
return err;
|
||||
}
|
||||
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
|
||||
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
|
||||
dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
mark_inode_dirty(dir);
|
||||
return sysv_handle_dirsync(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calls to dir_get_page()/unmap_and_put_page() must be nested according to the
|
||||
* Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
|
||||
* rules documented in mm/highmem.rst.
|
||||
*
|
||||
* sysv_dotdot() acts as a call to dir_get_page() and must be treated
|
||||
* sysv_dotdot() acts as a call to dir_get_folio() and must be treated
|
||||
* accordingly for nesting purposes.
|
||||
*/
|
||||
struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p)
|
||||
struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct folio **foliop)
|
||||
{
|
||||
struct sysv_dir_entry *de = dir_get_page(dir, 0, p);
|
||||
struct sysv_dir_entry *de = dir_get_folio(dir, 0, foliop);
|
||||
|
||||
if (IS_ERR(de))
|
||||
return NULL;
|
||||
@ -370,13 +366,13 @@ struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p)
|
||||
|
||||
ino_t sysv_inode_by_name(struct dentry *dentry)
|
||||
{
|
||||
struct page *page;
|
||||
struct sysv_dir_entry *de = sysv_find_entry (dentry, &page);
|
||||
struct folio *folio;
|
||||
struct sysv_dir_entry *de = sysv_find_entry (dentry, &folio);
|
||||
ino_t res = 0;
|
||||
|
||||
if (de) {
|
||||
res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode);
|
||||
unmap_and_put_page(page, de);
|
||||
folio_release_kmap(folio, de);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -466,9 +466,9 @@ static int sysv_read_folio(struct file *file, struct folio *folio)
|
||||
return block_read_full_folio(folio, get_block);
|
||||
}
|
||||
|
||||
int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(page, pos, len, get_block);
|
||||
return __block_write_begin(folio, pos, len, get_block);
|
||||
}
|
||||
|
||||
static void sysv_write_failed(struct address_space *mapping, loff_t to)
|
||||
@ -483,11 +483,11 @@ static void sysv_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int sysv_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, get_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, get_block);
|
||||
if (unlikely(ret))
|
||||
sysv_write_failed(mapping, pos + len);
|
||||
|
||||
|
@ -151,20 +151,20 @@ static int sysv_mkdir(struct mnt_idmap *idmap, struct inode *dir,
|
||||
static int sysv_unlink(struct inode * dir, struct dentry * dentry)
|
||||
{
|
||||
struct inode * inode = d_inode(dentry);
|
||||
struct page * page;
|
||||
struct folio *folio;
|
||||
struct sysv_dir_entry * de;
|
||||
int err;
|
||||
|
||||
de = sysv_find_entry(dentry, &page);
|
||||
de = sysv_find_entry(dentry, &folio);
|
||||
if (!de)
|
||||
return -ENOENT;
|
||||
|
||||
err = sysv_delete_entry(de, page);
|
||||
err = sysv_delete_entry(de, folio);
|
||||
if (!err) {
|
||||
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
|
||||
inode_dec_link_count(inode);
|
||||
}
|
||||
unmap_and_put_page(page, de);
|
||||
folio_release_kmap(folio, de);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -194,28 +194,28 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
{
|
||||
struct inode * old_inode = d_inode(old_dentry);
|
||||
struct inode * new_inode = d_inode(new_dentry);
|
||||
struct page * dir_page = NULL;
|
||||
struct folio *dir_folio;
|
||||
struct sysv_dir_entry * dir_de = NULL;
|
||||
struct page * old_page;
|
||||
struct folio *old_folio;
|
||||
struct sysv_dir_entry * old_de;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (flags & ~RENAME_NOREPLACE)
|
||||
return -EINVAL;
|
||||
|
||||
old_de = sysv_find_entry(old_dentry, &old_page);
|
||||
old_de = sysv_find_entry(old_dentry, &old_folio);
|
||||
if (!old_de)
|
||||
goto out;
|
||||
|
||||
if (S_ISDIR(old_inode->i_mode)) {
|
||||
err = -EIO;
|
||||
dir_de = sysv_dotdot(old_inode, &dir_page);
|
||||
dir_de = sysv_dotdot(old_inode, &dir_folio);
|
||||
if (!dir_de)
|
||||
goto out_old;
|
||||
}
|
||||
|
||||
if (new_inode) {
|
||||
struct page * new_page;
|
||||
struct folio *new_folio;
|
||||
struct sysv_dir_entry * new_de;
|
||||
|
||||
err = -ENOTEMPTY;
|
||||
@ -223,11 +223,11 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
goto out_dir;
|
||||
|
||||
err = -ENOENT;
|
||||
new_de = sysv_find_entry(new_dentry, &new_page);
|
||||
new_de = sysv_find_entry(new_dentry, &new_folio);
|
||||
if (!new_de)
|
||||
goto out_dir;
|
||||
err = sysv_set_link(new_de, new_page, old_inode);
|
||||
unmap_and_put_page(new_page, new_de);
|
||||
err = sysv_set_link(new_de, new_folio, old_inode);
|
||||
folio_release_kmap(new_folio, new_de);
|
||||
if (err)
|
||||
goto out_dir;
|
||||
inode_set_ctime_current(new_inode);
|
||||
@ -242,23 +242,23 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
inode_inc_link_count(new_dir);
|
||||
}
|
||||
|
||||
err = sysv_delete_entry(old_de, old_page);
|
||||
err = sysv_delete_entry(old_de, old_folio);
|
||||
if (err)
|
||||
goto out_dir;
|
||||
|
||||
mark_inode_dirty(old_inode);
|
||||
|
||||
if (dir_de) {
|
||||
err = sysv_set_link(dir_de, dir_page, new_dir);
|
||||
err = sysv_set_link(dir_de, dir_folio, new_dir);
|
||||
if (!err)
|
||||
inode_dec_link_count(old_dir);
|
||||
}
|
||||
|
||||
out_dir:
|
||||
if (dir_de)
|
||||
unmap_and_put_page(dir_page, dir_de);
|
||||
folio_release_kmap(dir_folio, dir_de);
|
||||
out_old:
|
||||
unmap_and_put_page(old_page, old_de);
|
||||
folio_release_kmap(old_folio, old_de);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -133,8 +133,8 @@ extern void sysv_free_block(struct super_block *, sysv_zone_t);
|
||||
extern unsigned long sysv_count_free_blocks(struct super_block *);
|
||||
|
||||
/* itree.c */
|
||||
extern void sysv_truncate(struct inode *);
|
||||
extern int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len);
|
||||
void sysv_truncate(struct inode *);
|
||||
int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
|
||||
|
||||
/* inode.c */
|
||||
extern struct inode *sysv_iget(struct super_block *, unsigned int);
|
||||
@ -148,15 +148,15 @@ extern void sysv_destroy_icache(void);
|
||||
|
||||
|
||||
/* dir.c */
|
||||
extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **);
|
||||
extern int sysv_add_link(struct dentry *, struct inode *);
|
||||
extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *);
|
||||
extern int sysv_make_empty(struct inode *, struct inode *);
|
||||
extern int sysv_empty_dir(struct inode *);
|
||||
extern int sysv_set_link(struct sysv_dir_entry *, struct page *,
|
||||
struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct folio **);
|
||||
int sysv_add_link(struct dentry *, struct inode *);
|
||||
int sysv_delete_entry(struct sysv_dir_entry *, struct folio *);
|
||||
int sysv_make_empty(struct inode *, struct inode *);
|
||||
int sysv_empty_dir(struct inode *);
|
||||
int sysv_set_link(struct sysv_dir_entry *, struct folio *,
|
||||
struct inode *);
|
||||
extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **);
|
||||
extern ino_t sysv_inode_by_name(struct dentry *);
|
||||
struct sysv_dir_entry *sysv_dotdot(struct inode *, struct folio **);
|
||||
ino_t sysv_inode_by_name(struct dentry *);
|
||||
|
||||
|
||||
extern const struct inode_operations sysv_file_inode_operations;
|
||||
|
@ -211,7 +211,7 @@ static void release_existing_page_budget(struct ubifs_info *c)
|
||||
}
|
||||
|
||||
static int write_begin_slow(struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct page **pagep)
|
||||
loff_t pos, unsigned len, struct folio **foliop)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||
@ -298,7 +298,7 @@ static int write_begin_slow(struct address_space *mapping,
|
||||
ubifs_release_dirty_inode_budget(c, ui);
|
||||
}
|
||||
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -414,7 +414,7 @@ static int allocate_budget(struct ubifs_info *c, struct folio *folio,
|
||||
*/
|
||||
static int ubifs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||
@ -483,7 +483,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
return write_begin_slow(mapping, pos, len, pagep);
|
||||
return write_begin_slow(mapping, pos, len, foliop);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -492,7 +492,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
|
||||
* with @ui->ui_mutex locked if we are appending pages, and unlocked
|
||||
* otherwise. This is an optimization (slightly hacky though).
|
||||
*/
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -524,9 +524,8 @@ static void cancel_budget(struct ubifs_info *c, struct folio *folio,
|
||||
|
||||
static int ubifs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct inode *inode = mapping->host;
|
||||
struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||
|
@ -62,7 +62,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
|
||||
end = size & ~PAGE_MASK;
|
||||
else
|
||||
end = PAGE_SIZE;
|
||||
err = __block_write_begin(&folio->page, 0, end, udf_get_block);
|
||||
err = __block_write_begin(folio, 0, end, udf_get_block);
|
||||
if (err) {
|
||||
folio_unlock(folio);
|
||||
ret = vmf_fs_error(err);
|
||||
|
@ -246,14 +246,14 @@ static void udf_readahead(struct readahead_control *rac)
|
||||
|
||||
static int udf_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct udf_inode_info *iinfo = UDF_I(file_inode(file));
|
||||
struct folio *folio;
|
||||
int ret;
|
||||
|
||||
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
||||
ret = block_write_begin(mapping, pos, len, pagep,
|
||||
ret = block_write_begin(mapping, pos, len, foliop,
|
||||
udf_get_block);
|
||||
if (unlikely(ret))
|
||||
udf_write_failed(mapping, pos + len);
|
||||
@ -265,7 +265,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
*pagep = &folio->page;
|
||||
*foliop = folio;
|
||||
if (!folio_test_uptodate(folio))
|
||||
udf_adinicb_read_folio(folio);
|
||||
return 0;
|
||||
@ -273,16 +273,14 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int udf_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct folio *folio;
|
||||
loff_t last_pos;
|
||||
|
||||
if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
|
||||
return generic_write_end(file, mapping, pos, len, copied, page,
|
||||
return generic_write_end(file, mapping, pos, len, copied, folio,
|
||||
fsdata);
|
||||
folio = page_folio(page);
|
||||
last_pos = pos + copied;
|
||||
if (last_pos > inode->i_size)
|
||||
i_size_write(inode, last_pos);
|
||||
|
229
fs/ufs/dir.c
229
fs/ufs/dir.c
@ -42,18 +42,18 @@ static inline int ufs_match(struct super_block *sb, int len,
|
||||
return !memcmp(name, de->d_name, len);
|
||||
}
|
||||
|
||||
static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct inode *dir = mapping->host;
|
||||
|
||||
inode_inc_iversion(dir);
|
||||
block_write_end(NULL, mapping, pos, len, len, page, NULL);
|
||||
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||
if (pos+len > dir->i_size) {
|
||||
i_size_write(dir, pos+len);
|
||||
mark_inode_dirty(dir);
|
||||
}
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
static int ufs_handle_dirsync(struct inode *dir)
|
||||
@ -66,22 +66,16 @@ static int ufs_handle_dirsync(struct inode *dir)
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void ufs_put_page(struct page *page)
|
||||
{
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
|
||||
{
|
||||
ino_t res = 0;
|
||||
struct ufs_dir_entry *de;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
de = ufs_find_entry(dir, qstr, &page);
|
||||
de = ufs_find_entry(dir, qstr, &folio);
|
||||
if (de) {
|
||||
res = fs32_to_cpu(dir->i_sb, de->d_ino);
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, de);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -89,43 +83,40 @@ ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
|
||||
|
||||
/* Releases the page */
|
||||
void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
|
||||
struct page *page, struct inode *inode,
|
||||
struct folio *folio, struct inode *inode,
|
||||
bool update_times)
|
||||
{
|
||||
loff_t pos = page_offset(page) +
|
||||
(char *) de - (char *) page_address(page);
|
||||
loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen);
|
||||
int err;
|
||||
|
||||
lock_page(page);
|
||||
err = ufs_prepare_chunk(page, pos, len);
|
||||
folio_lock(folio);
|
||||
err = ufs_prepare_chunk(folio, pos, len);
|
||||
BUG_ON(err);
|
||||
|
||||
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
|
||||
ufs_set_de_type(dir->i_sb, de, inode->i_mode);
|
||||
|
||||
ufs_commit_chunk(page, pos, len);
|
||||
ufs_put_page(page);
|
||||
ufs_commit_chunk(folio, pos, len);
|
||||
folio_release_kmap(folio, de);
|
||||
if (update_times)
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
mark_inode_dirty(dir);
|
||||
ufs_handle_dirsync(dir);
|
||||
}
|
||||
|
||||
|
||||
static bool ufs_check_page(struct page *page)
|
||||
static bool ufs_check_folio(struct folio *folio, char *kaddr)
|
||||
{
|
||||
struct inode *dir = page->mapping->host;
|
||||
struct inode *dir = folio->mapping->host;
|
||||
struct super_block *sb = dir->i_sb;
|
||||
char *kaddr = page_address(page);
|
||||
unsigned offs, rec_len;
|
||||
unsigned limit = PAGE_SIZE;
|
||||
unsigned limit = folio_size(folio);
|
||||
const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
|
||||
struct ufs_dir_entry *p;
|
||||
char *error;
|
||||
|
||||
if ((dir->i_size >> PAGE_SHIFT) == page->index) {
|
||||
limit = dir->i_size & ~PAGE_MASK;
|
||||
if (dir->i_size < folio_pos(folio) + limit) {
|
||||
limit = offset_in_folio(folio, dir->i_size);
|
||||
if (limit & chunk_mask)
|
||||
goto Ebadsize;
|
||||
if (!limit)
|
||||
@ -150,13 +141,13 @@ static bool ufs_check_page(struct page *page)
|
||||
if (offs != limit)
|
||||
goto Eend;
|
||||
out:
|
||||
SetPageChecked(page);
|
||||
folio_set_checked(folio);
|
||||
return true;
|
||||
|
||||
/* Too bad, we had an error */
|
||||
|
||||
Ebadsize:
|
||||
ufs_error(sb, "ufs_check_page",
|
||||
ufs_error(sb, __func__,
|
||||
"size of directory #%lu is not a multiple of chunk size",
|
||||
dir->i_ino
|
||||
);
|
||||
@ -176,36 +167,40 @@ static bool ufs_check_page(struct page *page)
|
||||
Einumber:
|
||||
error = "inode out of bounds";
|
||||
bad_entry:
|
||||
ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
|
||||
"offset=%lu, rec_len=%d, name_len=%d",
|
||||
dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
|
||||
ufs_error(sb, __func__, "bad entry in directory #%lu: %s - "
|
||||
"offset=%llu, rec_len=%d, name_len=%d",
|
||||
dir->i_ino, error, folio_pos(folio) + offs,
|
||||
rec_len, ufs_get_de_namlen(sb, p));
|
||||
goto fail;
|
||||
Eend:
|
||||
p = (struct ufs_dir_entry *)(kaddr + offs);
|
||||
ufs_error(sb, __func__,
|
||||
"entry in directory #%lu spans the page boundary"
|
||||
"offset=%lu",
|
||||
dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
|
||||
"offset=%llu",
|
||||
dir->i_ino, folio_pos(folio) + offs);
|
||||
fail:
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct page *ufs_get_page(struct inode *dir, unsigned long n)
|
||||
static void *ufs_get_folio(struct inode *dir, unsigned long n,
|
||||
struct folio **foliop)
|
||||
{
|
||||
struct address_space *mapping = dir->i_mapping;
|
||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||
if (!IS_ERR(page)) {
|
||||
kmap(page);
|
||||
if (unlikely(!PageChecked(page))) {
|
||||
if (!ufs_check_page(page))
|
||||
struct folio *folio = read_mapping_folio(mapping, n, NULL);
|
||||
void *kaddr;
|
||||
|
||||
if (IS_ERR(folio))
|
||||
return ERR_CAST(folio);
|
||||
kaddr = kmap_local_folio(folio, 0);
|
||||
if (unlikely(!folio_test_checked(folio))) {
|
||||
if (!ufs_check_folio(folio, kaddr))
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
return page;
|
||||
*foliop = folio;
|
||||
return kaddr;
|
||||
|
||||
fail:
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
@ -231,17 +226,14 @@ ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
|
||||
fs16_to_cpu(sb, p->d_reclen));
|
||||
}
|
||||
|
||||
struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
|
||||
struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop)
|
||||
{
|
||||
struct page *page = ufs_get_page(dir, 0);
|
||||
struct ufs_dir_entry *de = NULL;
|
||||
struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop);
|
||||
|
||||
if (!IS_ERR(page)) {
|
||||
de = ufs_next_entry(dir->i_sb,
|
||||
(struct ufs_dir_entry *)page_address(page));
|
||||
*p = page;
|
||||
}
|
||||
return de;
|
||||
if (!IS_ERR(de))
|
||||
return ufs_next_entry(dir->i_sb, de);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -253,7 +245,7 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
|
||||
* Entry is guaranteed to be valid.
|
||||
*/
|
||||
struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
|
||||
struct page **res_page)
|
||||
struct folio **foliop)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
const unsigned char *name = qstr->name;
|
||||
@ -261,7 +253,6 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
|
||||
unsigned reclen = UFS_DIR_REC_LEN(namelen);
|
||||
unsigned long start, n;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
struct page *page = NULL;
|
||||
struct ufs_inode_info *ui = UFS_I(dir);
|
||||
struct ufs_dir_entry *de;
|
||||
|
||||
@ -270,27 +261,23 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
|
||||
if (npages == 0 || namelen > UFS_MAXNAMLEN)
|
||||
goto out;
|
||||
|
||||
/* OFFSET_CACHE */
|
||||
*res_page = NULL;
|
||||
|
||||
start = ui->i_dir_start_lookup;
|
||||
|
||||
if (start >= npages)
|
||||
start = 0;
|
||||
n = start;
|
||||
do {
|
||||
char *kaddr;
|
||||
page = ufs_get_page(dir, n);
|
||||
if (!IS_ERR(page)) {
|
||||
kaddr = page_address(page);
|
||||
de = (struct ufs_dir_entry *) kaddr;
|
||||
char *kaddr = ufs_get_folio(dir, n, foliop);
|
||||
|
||||
if (!IS_ERR(kaddr)) {
|
||||
de = (struct ufs_dir_entry *)kaddr;
|
||||
kaddr += ufs_last_byte(dir, n) - reclen;
|
||||
while ((char *) de <= kaddr) {
|
||||
if (ufs_match(sb, namelen, name, de))
|
||||
goto found;
|
||||
de = ufs_next_entry(sb, de);
|
||||
}
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(*foliop, kaddr);
|
||||
}
|
||||
if (++n >= npages)
|
||||
n = 0;
|
||||
@ -299,7 +286,6 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
*res_page = page;
|
||||
ui->i_dir_start_lookup = n;
|
||||
return de;
|
||||
}
|
||||
@ -316,11 +302,10 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
|
||||
unsigned reclen = UFS_DIR_REC_LEN(namelen);
|
||||
const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
|
||||
unsigned short rec_len, name_len;
|
||||
struct page *page = NULL;
|
||||
struct folio *folio = NULL;
|
||||
struct ufs_dir_entry *de;
|
||||
unsigned long npages = dir_pages(dir);
|
||||
unsigned long n;
|
||||
char *kaddr;
|
||||
loff_t pos;
|
||||
int err;
|
||||
|
||||
@ -328,21 +313,19 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
|
||||
|
||||
/*
|
||||
* We take care of directory expansion in the same loop.
|
||||
* This code plays outside i_size, so it locks the page
|
||||
* This code plays outside i_size, so it locks the folio
|
||||
* to protect that region.
|
||||
*/
|
||||
for (n = 0; n <= npages; n++) {
|
||||
char *kaddr = ufs_get_folio(dir, n, &folio);
|
||||
char *dir_end;
|
||||
|
||||
page = ufs_get_page(dir, n);
|
||||
err = PTR_ERR(page);
|
||||
if (IS_ERR(page))
|
||||
goto out;
|
||||
lock_page(page);
|
||||
kaddr = page_address(page);
|
||||
if (IS_ERR(kaddr))
|
||||
return PTR_ERR(kaddr);
|
||||
folio_lock(folio);
|
||||
dir_end = kaddr + ufs_last_byte(dir, n);
|
||||
de = (struct ufs_dir_entry *)kaddr;
|
||||
kaddr += PAGE_SIZE - reclen;
|
||||
kaddr += folio_size(folio) - reclen;
|
||||
while ((char *)de <= kaddr) {
|
||||
if ((char *)de == dir_end) {
|
||||
/* We hit i_size */
|
||||
@ -369,16 +352,15 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
|
||||
goto got_it;
|
||||
de = (struct ufs_dir_entry *) ((char *) de + rec_len);
|
||||
}
|
||||
unlock_page(page);
|
||||
ufs_put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
|
||||
got_it:
|
||||
pos = page_offset(page) +
|
||||
(char*)de - (char*)page_address(page);
|
||||
err = ufs_prepare_chunk(page, pos, rec_len);
|
||||
pos = folio_pos(folio) + offset_in_folio(folio, de);
|
||||
err = ufs_prepare_chunk(folio, pos, rec_len);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
if (de->d_ino) {
|
||||
@ -395,18 +377,17 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
|
||||
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
|
||||
ufs_set_de_type(sb, de, inode->i_mode);
|
||||
|
||||
ufs_commit_chunk(page, pos, rec_len);
|
||||
ufs_commit_chunk(folio, pos, rec_len);
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
|
||||
mark_inode_dirty(dir);
|
||||
err = ufs_handle_dirsync(dir);
|
||||
/* OFFSET_CACHE */
|
||||
out_put:
|
||||
ufs_put_page(page);
|
||||
out:
|
||||
folio_release_kmap(folio, de);
|
||||
return err;
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
@ -444,19 +425,18 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
|
||||
return 0;
|
||||
|
||||
for ( ; n < npages; n++, offset = 0) {
|
||||
char *kaddr, *limit;
|
||||
struct ufs_dir_entry *de;
|
||||
struct folio *folio;
|
||||
char *kaddr = ufs_get_folio(inode, n, &folio);
|
||||
char *limit;
|
||||
|
||||
struct page *page = ufs_get_page(inode, n);
|
||||
|
||||
if (IS_ERR(page)) {
|
||||
if (IS_ERR(kaddr)) {
|
||||
ufs_error(sb, __func__,
|
||||
"bad page in #%lu",
|
||||
inode->i_ino);
|
||||
ctx->pos += PAGE_SIZE - offset;
|
||||
return -EIO;
|
||||
return PTR_ERR(kaddr);
|
||||
}
|
||||
kaddr = page_address(page);
|
||||
if (unlikely(need_revalidate)) {
|
||||
if (offset) {
|
||||
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
|
||||
@ -482,13 +462,13 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
|
||||
ufs_get_de_namlen(sb, de),
|
||||
fs32_to_cpu(sb, de->d_ino),
|
||||
d_type)) {
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, de);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
ctx->pos += fs16_to_cpu(sb, de->d_reclen);
|
||||
}
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -499,19 +479,23 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
|
||||
* previous entry.
|
||||
*/
|
||||
int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
|
||||
struct page * page)
|
||||
struct folio *folio)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
char *kaddr = page_address(page);
|
||||
unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
|
||||
unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
|
||||
size_t from, to;
|
||||
char *kaddr;
|
||||
loff_t pos;
|
||||
struct ufs_dir_entry *pde = NULL;
|
||||
struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
|
||||
struct ufs_dir_entry *de, *pde = NULL;
|
||||
int err;
|
||||
|
||||
UFSD("ENTER\n");
|
||||
|
||||
from = offset_in_folio(folio, dir);
|
||||
to = from + fs16_to_cpu(sb, dir->d_reclen);
|
||||
kaddr = (char *)dir - from;
|
||||
from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
|
||||
de = (struct ufs_dir_entry *) (kaddr + from);
|
||||
|
||||
UFSD("ino %u, reclen %u, namlen %u, name %s\n",
|
||||
fs32_to_cpu(sb, de->d_ino),
|
||||
fs16_to_cpu(sb, de->d_reclen),
|
||||
@ -528,21 +512,20 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
|
||||
de = ufs_next_entry(sb, de);
|
||||
}
|
||||
if (pde)
|
||||
from = (char*)pde - (char*)page_address(page);
|
||||
|
||||
pos = page_offset(page) + from;
|
||||
lock_page(page);
|
||||
err = ufs_prepare_chunk(page, pos, to - from);
|
||||
from = offset_in_folio(folio, pde);
|
||||
pos = folio_pos(folio) + from;
|
||||
folio_lock(folio);
|
||||
err = ufs_prepare_chunk(folio, pos, to - from);
|
||||
BUG_ON(err);
|
||||
if (pde)
|
||||
pde->d_reclen = cpu_to_fs16(sb, to - from);
|
||||
dir->d_ino = 0;
|
||||
ufs_commit_chunk(page, pos, to - from);
|
||||
ufs_commit_chunk(folio, pos, to - from);
|
||||
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
||||
mark_inode_dirty(inode);
|
||||
err = ufs_handle_dirsync(inode);
|
||||
out:
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
UFSD("EXIT\n");
|
||||
return err;
|
||||
}
|
||||
@ -551,26 +534,25 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
|
||||
{
|
||||
struct super_block * sb = dir->i_sb;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page = grab_cache_page(mapping, 0);
|
||||
struct folio *folio = filemap_grab_folio(mapping, 0);
|
||||
const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
|
||||
struct ufs_dir_entry * de;
|
||||
char *base;
|
||||
int err;
|
||||
char *kaddr;
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
err = ufs_prepare_chunk(page, 0, chunk_size);
|
||||
err = ufs_prepare_chunk(folio, 0, chunk_size);
|
||||
if (err) {
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
kmap(page);
|
||||
base = (char*)page_address(page);
|
||||
memset(base, 0, PAGE_SIZE);
|
||||
kaddr = kmap_local_folio(folio, 0);
|
||||
memset(kaddr, 0, folio_size(folio));
|
||||
|
||||
de = (struct ufs_dir_entry *) base;
|
||||
de = (struct ufs_dir_entry *)kaddr;
|
||||
|
||||
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
|
||||
ufs_set_de_type(sb, de, inode->i_mode);
|
||||
@ -584,12 +566,12 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
|
||||
de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1));
|
||||
ufs_set_de_namlen(sb, de, 2);
|
||||
strcpy (de->d_name, "..");
|
||||
kunmap(page);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
ufs_commit_chunk(page, 0, chunk_size);
|
||||
ufs_commit_chunk(folio, 0, chunk_size);
|
||||
err = ufs_handle_dirsync(inode);
|
||||
fail:
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -599,18 +581,17 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
|
||||
int ufs_empty_dir(struct inode * inode)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct page *page = NULL;
|
||||
struct folio *folio;
|
||||
char *kaddr;
|
||||
unsigned long i, npages = dir_pages(inode);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
char *kaddr;
|
||||
struct ufs_dir_entry *de;
|
||||
page = ufs_get_page(inode, i);
|
||||
|
||||
if (IS_ERR(page))
|
||||
kaddr = ufs_get_folio(inode, i, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
|
||||
kaddr = page_address(page);
|
||||
de = (struct ufs_dir_entry *)kaddr;
|
||||
kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
|
||||
|
||||
@ -637,12 +618,12 @@ int ufs_empty_dir(struct inode * inode)
|
||||
}
|
||||
de = ufs_next_entry(sb, de);
|
||||
}
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
}
|
||||
return 1;
|
||||
|
||||
not_empty:
|
||||
ufs_put_page(page);
|
||||
folio_release_kmap(folio, kaddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -479,9 +479,9 @@ static int ufs_read_folio(struct file *file, struct folio *folio)
|
||||
return block_read_full_folio(folio, ufs_getfrag_block);
|
||||
}
|
||||
|
||||
int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(page, pos, len, ufs_getfrag_block);
|
||||
return __block_write_begin(folio, pos, len, ufs_getfrag_block);
|
||||
}
|
||||
|
||||
static void ufs_truncate_blocks(struct inode *);
|
||||
@ -498,11 +498,11 @@ static void ufs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
static int ufs_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block);
|
||||
ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
|
||||
if (unlikely(ret))
|
||||
ufs_write_failed(mapping, pos + len);
|
||||
|
||||
@ -511,11 +511,11 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
||||
static int ufs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||
if (ret < len)
|
||||
ufs_write_failed(mapping, pos + len);
|
||||
return ret;
|
||||
|
@ -209,14 +209,14 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
struct inode * inode = d_inode(dentry);
|
||||
struct ufs_dir_entry *de;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
int err = -ENOENT;
|
||||
|
||||
de = ufs_find_entry(dir, &dentry->d_name, &page);
|
||||
de = ufs_find_entry(dir, &dentry->d_name, &folio);
|
||||
if (!de)
|
||||
goto out;
|
||||
|
||||
err = ufs_delete_entry(dir, de, page);
|
||||
err = ufs_delete_entry(dir, de, folio);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -249,28 +249,28 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
{
|
||||
struct inode *old_inode = d_inode(old_dentry);
|
||||
struct inode *new_inode = d_inode(new_dentry);
|
||||
struct page *dir_page = NULL;
|
||||
struct folio *dir_folio = NULL;
|
||||
struct ufs_dir_entry * dir_de = NULL;
|
||||
struct page *old_page;
|
||||
struct folio *old_folio;
|
||||
struct ufs_dir_entry *old_de;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (flags & ~RENAME_NOREPLACE)
|
||||
return -EINVAL;
|
||||
|
||||
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
|
||||
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
|
||||
if (!old_de)
|
||||
goto out;
|
||||
|
||||
if (S_ISDIR(old_inode->i_mode)) {
|
||||
err = -EIO;
|
||||
dir_de = ufs_dotdot(old_inode, &dir_page);
|
||||
dir_de = ufs_dotdot(old_inode, &dir_folio);
|
||||
if (!dir_de)
|
||||
goto out_old;
|
||||
}
|
||||
|
||||
if (new_inode) {
|
||||
struct page *new_page;
|
||||
struct folio *new_folio;
|
||||
struct ufs_dir_entry *new_de;
|
||||
|
||||
err = -ENOTEMPTY;
|
||||
@ -278,10 +278,10 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
goto out_dir;
|
||||
|
||||
err = -ENOENT;
|
||||
new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
|
||||
new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_folio);
|
||||
if (!new_de)
|
||||
goto out_dir;
|
||||
ufs_set_link(new_dir, new_de, new_page, old_inode, 1);
|
||||
ufs_set_link(new_dir, new_de, new_folio, old_inode, 1);
|
||||
inode_set_ctime_current(new_inode);
|
||||
if (dir_de)
|
||||
drop_nlink(new_inode);
|
||||
@ -300,29 +300,24 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
*/
|
||||
inode_set_ctime_current(old_inode);
|
||||
|
||||
ufs_delete_entry(old_dir, old_de, old_page);
|
||||
ufs_delete_entry(old_dir, old_de, old_folio);
|
||||
mark_inode_dirty(old_inode);
|
||||
|
||||
if (dir_de) {
|
||||
if (old_dir != new_dir)
|
||||
ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
|
||||
else {
|
||||
kunmap(dir_page);
|
||||
put_page(dir_page);
|
||||
}
|
||||
ufs_set_link(old_inode, dir_de, dir_folio, new_dir, 0);
|
||||
else
|
||||
folio_release_kmap(dir_folio, new_dir);
|
||||
inode_dec_link_count(old_dir);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
||||
out_dir:
|
||||
if (dir_de) {
|
||||
kunmap(dir_page);
|
||||
put_page(dir_page);
|
||||
}
|
||||
if (dir_de)
|
||||
folio_release_kmap(dir_folio, dir_de);
|
||||
out_old:
|
||||
kunmap(old_page);
|
||||
put_page(old_page);
|
||||
folio_release_kmap(old_folio, old_de);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
20
fs/ufs/ufs.h
20
fs/ufs/ufs.h
@ -99,15 +99,17 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
|
||||
|
||||
/* dir.c */
|
||||
extern const struct inode_operations ufs_dir_inode_operations;
|
||||
extern int ufs_add_link (struct dentry *, struct inode *);
|
||||
extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
|
||||
extern int ufs_make_empty(struct inode *, struct inode *);
|
||||
extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
|
||||
extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
|
||||
extern int ufs_empty_dir (struct inode *);
|
||||
extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
|
||||
extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
|
||||
struct page *page, struct inode *inode, bool update_times);
|
||||
|
||||
int ufs_add_link(struct dentry *, struct inode *);
|
||||
ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
|
||||
int ufs_make_empty(struct inode *, struct inode *);
|
||||
struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *,
|
||||
struct folio **);
|
||||
int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct folio *);
|
||||
int ufs_empty_dir(struct inode *);
|
||||
struct ufs_dir_entry *ufs_dotdot(struct inode *, struct folio **);
|
||||
void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
|
||||
struct folio *folio, struct inode *inode, bool update_times);
|
||||
|
||||
/* file.c */
|
||||
extern const struct inode_operations ufs_file_inode_operations;
|
||||
|
@ -250,9 +250,9 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
|
||||
}
|
||||
}
|
||||
|
||||
extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
|
||||
extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
|
||||
extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
|
||||
dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
|
||||
void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
|
||||
int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
|
||||
|
||||
/*
|
||||
* These functions manipulate ufs buffers
|
||||
|
@ -300,23 +300,23 @@ static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
|
||||
|
||||
static int vboxsf_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned int len, unsigned int copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct vboxsf_handle *sf_handle = file->private_data;
|
||||
unsigned int from = pos & ~PAGE_MASK;
|
||||
size_t from = offset_in_folio(folio, pos);
|
||||
u32 nwritten = len;
|
||||
u8 *buf;
|
||||
int err;
|
||||
|
||||
/* zero the stale part of the page if we did a short copy */
|
||||
if (!PageUptodate(page) && copied < len)
|
||||
zero_user(page, from + copied, len - copied);
|
||||
/* zero the stale part of the folio if we did a short copy */
|
||||
if (!folio_test_uptodate(folio) && copied < len)
|
||||
folio_zero_range(folio, from + copied, len - copied);
|
||||
|
||||
buf = kmap(page);
|
||||
buf = kmap(&folio->page);
|
||||
err = vboxsf_write(sf_handle->root, sf_handle->handle,
|
||||
pos, &nwritten, buf + from);
|
||||
kunmap(page);
|
||||
kunmap(&folio->page);
|
||||
|
||||
if (err) {
|
||||
nwritten = 0;
|
||||
@ -326,16 +326,16 @@ static int vboxsf_write_end(struct file *file, struct address_space *mapping,
|
||||
/* mtime changed */
|
||||
VBOXSF_I(inode)->force_restat = 1;
|
||||
|
||||
if (!PageUptodate(page) && nwritten == PAGE_SIZE)
|
||||
SetPageUptodate(page);
|
||||
if (!folio_test_uptodate(folio) && nwritten == folio_size(folio))
|
||||
folio_mark_uptodate(folio);
|
||||
|
||||
pos += nwritten;
|
||||
if (pos > inode->i_size)
|
||||
i_size_write(inode, pos);
|
||||
|
||||
out:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
return nwritten;
|
||||
}
|
||||
@ -343,7 +343,7 @@ static int vboxsf_write_end(struct file *file, struct address_space *mapping,
|
||||
/*
|
||||
* Note simple_write_begin does not read the page from disk on partial writes
|
||||
* this is ok since vboxsf_write_end only writes the written parts of the
|
||||
* page and it does not call SetPageUptodate for partial writes.
|
||||
* page and it does not call folio_mark_uptodate for partial writes.
|
||||
*/
|
||||
const struct address_space_operations vboxsf_reg_aops = {
|
||||
.read_folio = vboxsf_read_folio,
|
||||
|
@ -257,18 +257,18 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
|
||||
int block_read_full_folio(struct folio *, get_block_t *);
|
||||
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
||||
struct page **pagep, get_block_t *get_block);
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
struct folio **foliop, get_block_t *get_block);
|
||||
int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
|
||||
get_block_t *get_block);
|
||||
int block_write_end(struct file *, struct address_space *,
|
||||
loff_t, unsigned, unsigned,
|
||||
struct page *, void *);
|
||||
loff_t, unsigned len, unsigned copied,
|
||||
struct folio *, void *);
|
||||
int generic_write_end(struct file *, struct address_space *,
|
||||
loff_t, unsigned, unsigned,
|
||||
struct page *, void *);
|
||||
loff_t, unsigned len, unsigned copied,
|
||||
struct folio *, void *);
|
||||
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
|
||||
int cont_write_begin(struct file *, struct address_space *, loff_t,
|
||||
unsigned, struct page **, void **,
|
||||
unsigned, struct folio **, void **,
|
||||
get_block_t *, loff_t *);
|
||||
int generic_cont_expand_simple(struct inode *inode, loff_t size);
|
||||
void block_commit_write(struct page *page, unsigned int from, unsigned int to);
|
||||
|
@ -408,10 +408,10 @@ struct address_space_operations {
|
||||
|
||||
int (*write_begin)(struct file *, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata);
|
||||
struct folio **foliop, void **fsdata);
|
||||
int (*write_end)(struct file *, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata);
|
||||
struct folio *folio, void *fsdata);
|
||||
|
||||
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
|
||||
sector_t (*bmap)(struct address_space *, sector_t);
|
||||
@ -3363,7 +3363,7 @@ extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
|
||||
extern int simple_empty(struct dentry *);
|
||||
extern int simple_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata);
|
||||
struct folio **foliop, void **fsdata);
|
||||
extern const struct address_space_operations ram_aops;
|
||||
extern int always_delete_dentry(const struct dentry *);
|
||||
extern struct inode *alloc_anon_inode(struct super_block *);
|
||||
|
@ -3987,7 +3987,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
|
||||
ssize_t written = 0;
|
||||
|
||||
do {
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
size_t offset; /* Offset into folio */
|
||||
size_t bytes; /* Bytes to write to folio */
|
||||
@ -4017,11 +4016,10 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
|
||||
}
|
||||
|
||||
status = a_ops->write_begin(file, mapping, pos, bytes,
|
||||
&page, &fsdata);
|
||||
&folio, &fsdata);
|
||||
if (unlikely(status < 0))
|
||||
break;
|
||||
|
||||
folio = page_folio(page);
|
||||
offset = offset_in_folio(folio, pos);
|
||||
if (bytes > folio_size(folio) - offset)
|
||||
bytes = folio_size(folio) - offset;
|
||||
@ -4033,7 +4031,7 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
|
||||
flush_dcache_folio(folio);
|
||||
|
||||
status = a_ops->write_end(file, mapping, pos, bytes, copied,
|
||||
page, fsdata);
|
||||
folio, fsdata);
|
||||
if (unlikely(status != copied)) {
|
||||
iov_iter_revert(i, copied - max(status, 0L));
|
||||
if (unlikely(status < 0))
|
||||
|
11
mm/shmem.c
11
mm/shmem.c
@ -2878,7 +2878,7 @@ static const struct inode_operations shmem_short_symlink_operations;
|
||||
static int
|
||||
shmem_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
struct page **pagep, void **fsdata)
|
||||
struct folio **foliop, void **fsdata)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
@ -2899,23 +2899,22 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*pagep = folio_file_page(folio, index);
|
||||
if (PageHWPoison(*pagep)) {
|
||||
if (folio_test_hwpoison(folio) ||
|
||||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
*pagep = NULL;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
*foliop = folio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
shmem_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
if (pos + copied > inode->i_size)
|
||||
|
Loading…
Reference in New Issue
Block a user