mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
Major changes for 3.14 include support for the newly added ZERO_RANGE
and COLLAPSE_RANGE fallocate operations, and scalability improvements in the jbd2 layer and in xattr handling when the extended attributes spill over into an external block. Other than that, the usual clean ups and minor bug fixes. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABCAAGBQJTPbD2AAoJENNvdpvBGATwDmUQANSfGYIQazB8XKKgtNTMiG/Y Ky7n1JzN9lTX/6nMsqQnbfCweLRmxqpWUBuyKDRHUi8IG0/voXSTFsAOOgz0R15A ERRRWkVvHixLpohuL/iBdEMFHwNZYPGr3jkm0EIgzhtXNgk5DNmiuMwvHmCY27kI kdNZIw9fip/WRNoFLDBGnLGC37aanoHhCIbVlySy5o9LN1pkC8BgXAYV0Rk19SVd bWCudSJEirFEqWS5H8vsBAEm/ioxTjwnNL8tX8qms6orZ6h8yMLFkHoIGWPw3Q15 a0TSUoMyav50Yr59QaDeWx9uaPQVeK41wiYFI2rZOnyG2ts0u0YXs/nLwJqTovgs rzvbdl6cd3Nj++rPi97MTA7iXK96WQPjsDJoeeEgnB0d/qPyTk6mLKgftzLTNgSa ZmWjrB19kr6CMbebMC4L6eqJ8Fr66pCT8c/iue8wc4MUHi7FwHKH64fqWvzp2YT/ +165dqqo2JnUv7tIp6sUi1geun+bmDHLZFXgFa7fNYFtcU3I+uY1mRr3eMVAJndA 2d6ASe/KhQbpVnjKJdQ8/b833ZS3p+zkgVPrd68bBr3t7gUmX91wk+p1ct6rUPLr 700F+q/pQWL8ap0pU9Ht/h3gEJIfmRzTwxlOeYyOwDseqKuS87PSB3BzV3dDunSU DrPKlXwIgva7zq5/S0Vr =4s1Z -----END PGP SIGNATURE----- Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4 Pull ext4 updates from Ted Ts'o: "Major changes for 3.14 include support for the newly added ZERO_RANGE and COLLAPSE_RANGE fallocate operations, and scalability improvements in the jbd2 layer and in xattr handling when the extended attributes spill over into an external block. Other than that, the usual clean ups and minor bug fixes" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (42 commits) ext4: fix premature freeing of partial clusters split across leaf blocks ext4: remove unneeded test of ret variable ext4: fix comment typo ext4: make ext4_block_zero_page_range static ext4: atomically set inode->i_flags in ext4_set_inode_flags() ext4: optimize Hurd tests when reading/writing inodes ext4: kill i_version support for Hurd-castrated file systems ext4: each filesystem creates and uses its own mb_cache fs/mbcache.c: doucple the locking of local from global data fs/mbcache.c: change block and index hash chain to hlist_bl_node ext4: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate ext4: refactor ext4_fallocate code ext4: Update inode i_size after the preallocation ext4: fix partial cluster handling for bigalloc file systems ext4: delete path dealloc code in ext4_ext_handle_uninitialized_extents ext4: only call sync_filesystm() when remounting read-only fs: push sync_filesystem() down to the file system's remount_fs() jbd2: improve error messages for inconsistent journal heads jbd2: minimize region locked by j_list_lock in jbd2_journal_forget() jbd2: minimize region locked by j_list_lock in journal_get_create_access() ...
This commit is contained in:
commit
24e7ea3bea
@ -212,6 +212,7 @@ static int parse_options(struct super_block *sb, char *options)
|
||||
|
||||
static int adfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NODIRATIME;
|
||||
return parse_options(sb, data);
|
||||
}
|
||||
|
@ -530,6 +530,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
|
||||
|
||||
pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
|
||||
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NODIRATIME;
|
||||
|
||||
memcpy(volume, sbi->s_volume, 32);
|
||||
|
@ -913,6 +913,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
static int
|
||||
befs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
if (!(*flags & MS_RDONLY))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
@ -1380,6 +1380,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
|
||||
int ret;
|
||||
|
||||
sync_filesystem(sb);
|
||||
btrfs_remount_prepare(fs_info);
|
||||
|
||||
ret = btrfs_parse_options(root, data);
|
||||
|
@ -541,6 +541,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
|
||||
|
||||
static int cifs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NODIRATIME;
|
||||
return 0;
|
||||
}
|
||||
|
@ -96,6 +96,7 @@ void coda_destroy_inodecache(void)
|
||||
|
||||
static int coda_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NOATIME;
|
||||
return 0;
|
||||
}
|
||||
|
@ -243,6 +243,7 @@ static void cramfs_kill_sb(struct super_block *sb)
|
||||
|
||||
static int cramfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -218,6 +218,7 @@ static int debugfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
int err;
|
||||
struct debugfs_fs_info *fsi = sb->s_fs_info;
|
||||
|
||||
sync_filesystem(sb);
|
||||
err = debugfs_parse_options(data, &fsi->mount_opts);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
@ -313,6 +313,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
|
||||
struct pts_fs_info *fsi = DEVPTS_SB(sb);
|
||||
struct pts_mount_opts *opts = &fsi->mount_opts;
|
||||
|
||||
sync_filesystem(sb);
|
||||
err = parse_mount_options(data, PARSE_REMOUNT, opts);
|
||||
|
||||
/*
|
||||
|
@ -114,6 +114,7 @@ static void destroy_inodecache(void)
|
||||
|
||||
static int efs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1254,6 +1254,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
|
||||
unsigned long old_sb_flags;
|
||||
int err;
|
||||
|
||||
sync_filesystem(sb);
|
||||
spin_lock(&sbi->s_lock);
|
||||
|
||||
/* Store the old options */
|
||||
|
@ -2649,6 +2649,8 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
|
||||
int i;
|
||||
#endif
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
/* Store the original options */
|
||||
old_sb_flags = sb->s_flags;
|
||||
old_opts.s_mount_opt = sbi->s_mount_opt;
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <linux/falloc.h>
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/compat.h>
|
||||
#endif
|
||||
@ -567,6 +568,8 @@ enum {
|
||||
#define EXT4_GET_BLOCKS_NO_LOCK 0x0100
|
||||
/* Do not put hole in extent cache */
|
||||
#define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200
|
||||
/* Convert written extents to unwritten */
|
||||
#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0400
|
||||
|
||||
/*
|
||||
* The bit position of these flags must not overlap with any of the
|
||||
@ -998,6 +1001,8 @@ struct ext4_inode_info {
|
||||
#define EXT4_MOUNT2_STD_GROUP_SIZE 0x00000002 /* We have standard group
|
||||
size of blocksize * 8
|
||||
blocks */
|
||||
#define EXT4_MOUNT2_HURD_COMPAT 0x00000004 /* Support HURD-castrated
|
||||
file systems */
|
||||
|
||||
#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
|
||||
~EXT4_MOUNT_##opt
|
||||
@ -1326,6 +1331,7 @@ struct ext4_sb_info {
|
||||
struct list_head s_es_lru;
|
||||
unsigned long s_es_last_sorted;
|
||||
struct percpu_counter s_extent_cache_cnt;
|
||||
struct mb_cache *s_mb_cache;
|
||||
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
|
||||
|
||||
/* Ratelimit ext4 messages. */
|
||||
@ -2133,8 +2139,6 @@ extern int ext4_writepage_trans_blocks(struct inode *);
|
||||
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
|
||||
extern int ext4_block_truncate_page(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from);
|
||||
extern int ext4_block_zero_page_range(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from, loff_t length);
|
||||
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
|
||||
loff_t lstart, loff_t lend);
|
||||
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
@ -2757,6 +2761,7 @@ extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
|
||||
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
__u64 start, __u64 len);
|
||||
extern int ext4_ext_precache(struct inode *inode);
|
||||
extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
|
||||
|
||||
/* move_extent.c */
|
||||
extern void ext4_double_down_write_data_sem(struct inode *first,
|
||||
@ -2766,6 +2771,8 @@ extern void ext4_double_up_write_data_sem(struct inode *orig_inode,
|
||||
extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
|
||||
__u64 start_orig, __u64 start_donor,
|
||||
__u64 len, __u64 *moved_len);
|
||||
extern int mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
|
||||
struct ext4_extent **extent);
|
||||
|
||||
/* page-io.c */
|
||||
extern int __init ext4_init_pageio(void);
|
||||
|
@ -259,6 +259,16 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
|
||||
if (WARN_ON_ONCE(err)) {
|
||||
ext4_journal_abort_handle(where, line, __func__, bh,
|
||||
handle, err);
|
||||
if (inode == NULL) {
|
||||
pr_err("EXT4: jbd2_journal_dirty_metadata "
|
||||
"failed: handle type %u started at "
|
||||
"line %u, credits %u/%u, errcode %d",
|
||||
handle->h_type,
|
||||
handle->h_line_no,
|
||||
handle->h_requested_credits,
|
||||
handle->h_buffer_credits, err);
|
||||
return err;
|
||||
}
|
||||
ext4_error_inode(inode, where, line,
|
||||
bh->b_blocknr,
|
||||
"journal_dirty_metadata failed: "
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -184,7 +184,7 @@ static void ext4_es_print_tree(struct inode *inode)
|
||||
while (node) {
|
||||
struct extent_status *es;
|
||||
es = rb_entry(node, struct extent_status, rb_node);
|
||||
printk(KERN_DEBUG " [%u/%u) %llu %llx",
|
||||
printk(KERN_DEBUG " [%u/%u) %llu %x",
|
||||
es->es_lblk, es->es_len,
|
||||
ext4_es_pblock(es), ext4_es_status(es));
|
||||
node = rb_next(node);
|
||||
@ -445,8 +445,8 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
|
||||
pr_warn("ES insert assertion failed for "
|
||||
"inode: %lu we can find an extent "
|
||||
"at block [%d/%d/%llu/%c], but we "
|
||||
"want to add an delayed/hole extent "
|
||||
"[%d/%d/%llu/%llx]\n",
|
||||
"want to add a delayed/hole extent "
|
||||
"[%d/%d/%llu/%x]\n",
|
||||
inode->i_ino, ee_block, ee_len,
|
||||
ee_start, ee_status ? 'u' : 'w',
|
||||
es->es_lblk, es->es_len,
|
||||
@ -486,8 +486,8 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
|
||||
if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
|
||||
pr_warn("ES insert assertion failed for inode: %lu "
|
||||
"can't find an extent at block %d but we want "
|
||||
"to add an written/unwritten extent "
|
||||
"[%d/%d/%llu/%llx]\n", inode->i_ino,
|
||||
"to add a written/unwritten extent "
|
||||
"[%d/%d/%llu/%x]\n", inode->i_ino,
|
||||
es->es_lblk, es->es_lblk, es->es_len,
|
||||
ext4_es_pblock(es), ext4_es_status(es));
|
||||
}
|
||||
@ -524,7 +524,7 @@ static void ext4_es_insert_extent_ind_check(struct inode *inode,
|
||||
*/
|
||||
pr_warn("ES insert assertion failed for inode: %lu "
|
||||
"We can find blocks but we want to add a "
|
||||
"delayed/hole extent [%d/%d/%llu/%llx]\n",
|
||||
"delayed/hole extent [%d/%d/%llu/%x]\n",
|
||||
inode->i_ino, es->es_lblk, es->es_len,
|
||||
ext4_es_pblock(es), ext4_es_status(es));
|
||||
return;
|
||||
@ -554,7 +554,7 @@ static void ext4_es_insert_extent_ind_check(struct inode *inode,
|
||||
if (ext4_es_is_written(es)) {
|
||||
pr_warn("ES insert assertion failed for inode: %lu "
|
||||
"We can't find the block but we want to add "
|
||||
"an written extent [%d/%d/%llu/%llx]\n",
|
||||
"a written extent [%d/%d/%llu/%x]\n",
|
||||
inode->i_ino, es->es_lblk, es->es_len,
|
||||
ext4_es_pblock(es), ext4_es_status(es));
|
||||
return;
|
||||
@ -658,8 +658,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
|
||||
newes.es_lblk = lblk;
|
||||
newes.es_len = len;
|
||||
ext4_es_store_pblock(&newes, pblk);
|
||||
ext4_es_store_status(&newes, status);
|
||||
ext4_es_store_pblock_status(&newes, pblk, status);
|
||||
trace_ext4_es_insert_extent(inode, &newes);
|
||||
|
||||
ext4_es_insert_extent_check(inode, &newes);
|
||||
@ -699,8 +698,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
|
||||
newes.es_lblk = lblk;
|
||||
newes.es_len = len;
|
||||
ext4_es_store_pblock(&newes, pblk);
|
||||
ext4_es_store_status(&newes, status);
|
||||
ext4_es_store_pblock_status(&newes, pblk, status);
|
||||
trace_ext4_es_cache_extent(inode, &newes);
|
||||
|
||||
if (!len)
|
||||
@ -812,13 +810,13 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
|
||||
newes.es_lblk = end + 1;
|
||||
newes.es_len = len2;
|
||||
block = 0x7FDEADBEEF;
|
||||
if (ext4_es_is_written(&orig_es) ||
|
||||
ext4_es_is_unwritten(&orig_es)) {
|
||||
ext4_es_is_unwritten(&orig_es))
|
||||
block = ext4_es_pblock(&orig_es) +
|
||||
orig_es.es_len - len2;
|
||||
ext4_es_store_pblock(&newes, block);
|
||||
}
|
||||
ext4_es_store_status(&newes, ext4_es_status(&orig_es));
|
||||
ext4_es_store_pblock_status(&newes, block,
|
||||
ext4_es_status(&orig_es));
|
||||
err = __es_insert_extent(inode, &newes);
|
||||
if (err) {
|
||||
es->es_lblk = orig_es.es_lblk;
|
||||
|
@ -129,6 +129,15 @@ static inline void ext4_es_store_status(struct extent_status *es,
|
||||
(es->es_pblk & ~ES_MASK));
|
||||
}
|
||||
|
||||
static inline void ext4_es_store_pblock_status(struct extent_status *es,
|
||||
ext4_fsblk_t pb,
|
||||
unsigned int status)
|
||||
{
|
||||
es->es_pblk = (((ext4_fsblk_t)
|
||||
(status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
|
||||
(pb & ~ES_MASK));
|
||||
}
|
||||
|
||||
extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi);
|
||||
extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
|
||||
extern void ext4_es_lru_add(struct inode *inode);
|
||||
|
120
fs/ext4/inode.c
120
fs/ext4/inode.c
@ -504,6 +504,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
{
|
||||
struct extent_status es;
|
||||
int retval;
|
||||
int ret = 0;
|
||||
#ifdef ES_AGGRESSIVE_TEST
|
||||
struct ext4_map_blocks orig_map;
|
||||
|
||||
@ -515,6 +516,12 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
"logical block %lu\n", inode->i_ino, flags, map->m_len,
|
||||
(unsigned long) map->m_lblk);
|
||||
|
||||
/*
|
||||
* ext4_map_blocks returns an int, and m_len is an unsigned int
|
||||
*/
|
||||
if (unlikely(map->m_len > INT_MAX))
|
||||
map->m_len = INT_MAX;
|
||||
|
||||
/* Lookup extent status tree firstly */
|
||||
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
|
||||
ext4_es_lru_add(inode);
|
||||
@ -553,7 +560,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
EXT4_GET_BLOCKS_KEEP_SIZE);
|
||||
}
|
||||
if (retval > 0) {
|
||||
int ret;
|
||||
unsigned int status;
|
||||
|
||||
if (unlikely(retval != map->m_len)) {
|
||||
@ -580,7 +586,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
|
||||
found:
|
||||
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
|
||||
int ret = check_block_validity(inode, map);
|
||||
ret = check_block_validity(inode, map);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
@ -597,7 +603,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
* with buffer head unmapped.
|
||||
*/
|
||||
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
|
||||
return retval;
|
||||
/*
|
||||
* If we need to convert extent to unwritten
|
||||
* we continue and do the actual work in
|
||||
* ext4_ext_map_blocks()
|
||||
*/
|
||||
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
|
||||
return retval;
|
||||
|
||||
/*
|
||||
* Here we clear m_flags because after allocating an new extent,
|
||||
@ -653,7 +665,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
|
||||
|
||||
if (retval > 0) {
|
||||
int ret;
|
||||
unsigned int status;
|
||||
|
||||
if (unlikely(retval != map->m_len)) {
|
||||
@ -688,7 +699,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
has_zeroout:
|
||||
up_write((&EXT4_I(inode)->i_data_sem));
|
||||
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
|
||||
int ret = check_block_validity(inode, map);
|
||||
ret = check_block_validity(inode, map);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
@ -3312,26 +3323,6 @@ void ext4_set_aops(struct inode *inode)
|
||||
inode->i_mapping->a_ops = &ext4_aops;
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
|
||||
* up to the end of the block which corresponds to `from'.
|
||||
* This required during truncate. We need to physically zero the tail end
|
||||
* of that block so it doesn't yield old data if the file is later grown.
|
||||
*/
|
||||
int ext4_block_truncate_page(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from)
|
||||
{
|
||||
unsigned offset = from & (PAGE_CACHE_SIZE-1);
|
||||
unsigned length;
|
||||
unsigned blocksize;
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
blocksize = inode->i_sb->s_blocksize;
|
||||
length = blocksize - (offset & (blocksize - 1));
|
||||
|
||||
return ext4_block_zero_page_range(handle, mapping, from, length);
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_block_zero_page_range() zeros out a mapping of length 'length'
|
||||
* starting from file offset 'from'. The range to be zero'd must
|
||||
@ -3339,7 +3330,7 @@ int ext4_block_truncate_page(handle_t *handle,
|
||||
* the end of the block it will be shortened to end of the block
|
||||
* that cooresponds to 'from'
|
||||
*/
|
||||
int ext4_block_zero_page_range(handle_t *handle,
|
||||
static int ext4_block_zero_page_range(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from, loff_t length)
|
||||
{
|
||||
ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
|
||||
@ -3429,6 +3420,26 @@ int ext4_block_zero_page_range(handle_t *handle,
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
|
||||
* up to the end of the block which corresponds to `from'.
|
||||
* This required during truncate. We need to physically zero the tail end
|
||||
* of that block so it doesn't yield old data if the file is later grown.
|
||||
*/
|
||||
int ext4_block_truncate_page(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from)
|
||||
{
|
||||
unsigned offset = from & (PAGE_CACHE_SIZE-1);
|
||||
unsigned length;
|
||||
unsigned blocksize;
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
blocksize = inode->i_sb->s_blocksize;
|
||||
length = blocksize - (offset & (blocksize - 1));
|
||||
|
||||
return ext4_block_zero_page_range(handle, mapping, from, length);
|
||||
}
|
||||
|
||||
int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
|
||||
loff_t lstart, loff_t length)
|
||||
{
|
||||
@ -3502,7 +3513,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
trace_ext4_punch_hole(inode, offset, length);
|
||||
trace_ext4_punch_hole(inode, offset, length, 0);
|
||||
|
||||
/*
|
||||
* Write out all dirty pages to avoid race conditions
|
||||
@ -3609,6 +3620,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
if (IS_SYNC(inode))
|
||||
ext4_handle_sync(handle);
|
||||
|
||||
/* Now release the pages again to reduce race window */
|
||||
if (last_block_offset > first_block_offset)
|
||||
truncate_pagecache_range(inode, first_block_offset,
|
||||
last_block_offset);
|
||||
|
||||
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
out_stop:
|
||||
@ -3682,7 +3699,7 @@ void ext4_truncate(struct inode *inode)
|
||||
|
||||
/*
|
||||
* There is a possibility that we're either freeing the inode
|
||||
* or it completely new indode. In those cases we might not
|
||||
* or it's a completely new inode. In those cases we might not
|
||||
* have i_mutex locked because it's not necessary.
|
||||
*/
|
||||
if (!(inode->i_state & (I_NEW|I_FREEING)))
|
||||
@ -3934,8 +3951,8 @@ void ext4_set_inode_flags(struct inode *inode)
|
||||
new_fl |= S_NOATIME;
|
||||
if (flags & EXT4_DIRSYNC_FL)
|
||||
new_fl |= S_DIRSYNC;
|
||||
set_mask_bits(&inode->i_flags,
|
||||
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
|
||||
inode_set_flags(inode, new_fl,
|
||||
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
|
||||
}
|
||||
|
||||
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
|
||||
@ -4154,11 +4171,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
||||
EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
|
||||
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
|
||||
|
||||
inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
|
||||
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
|
||||
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
|
||||
inode->i_version |=
|
||||
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
|
||||
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
|
||||
inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
|
||||
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
|
||||
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
|
||||
inode->i_version |=
|
||||
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
@ -4328,8 +4347,7 @@ static int ext4_do_update_inode(handle_t *handle,
|
||||
goto out_brelse;
|
||||
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
|
||||
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
|
||||
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
|
||||
cpu_to_le32(EXT4_OS_HURD))
|
||||
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
|
||||
raw_inode->i_file_acl_high =
|
||||
cpu_to_le16(ei->i_file_acl >> 32);
|
||||
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
|
||||
@ -4374,12 +4392,15 @@ static int ext4_do_update_inode(handle_t *handle,
|
||||
raw_inode->i_block[block] = ei->i_data[block];
|
||||
}
|
||||
|
||||
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
|
||||
if (ei->i_extra_isize) {
|
||||
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
|
||||
raw_inode->i_version_hi =
|
||||
cpu_to_le32(inode->i_version >> 32);
|
||||
raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
|
||||
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
|
||||
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
|
||||
if (ei->i_extra_isize) {
|
||||
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
|
||||
raw_inode->i_version_hi =
|
||||
cpu_to_le32(inode->i_version >> 32);
|
||||
raw_inode->i_extra_isize =
|
||||
cpu_to_le16(ei->i_extra_isize);
|
||||
}
|
||||
}
|
||||
|
||||
ext4_inode_csum_set(inode, raw_inode, ei);
|
||||
@ -4446,7 +4467,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (wbc->sync_mode != WB_SYNC_ALL)
|
||||
/*
|
||||
* No need to force transaction in WB_SYNC_NONE mode. Also
|
||||
* ext4_sync_fs() will force the commit after everything is
|
||||
* written.
|
||||
*/
|
||||
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
|
||||
return 0;
|
||||
|
||||
err = ext4_force_commit(inode->i_sb);
|
||||
@ -4456,7 +4482,11 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
err = __ext4_get_inode_loc(inode, &iloc, 0);
|
||||
if (err)
|
||||
return err;
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
/*
|
||||
* sync(2) will flush the whole buffer cache. No need to do
|
||||
* it here separately for each inode.
|
||||
*/
|
||||
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
|
||||
sync_dirty_buffer(iloc.bh);
|
||||
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
|
||||
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
|
||||
|
@ -104,21 +104,15 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
||||
struct ext4_inode_info *ei_bl;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
||||
if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode)) {
|
||||
err = -EINVAL;
|
||||
goto swap_boot_out;
|
||||
}
|
||||
if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
|
||||
return -EINVAL;
|
||||
|
||||
if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
|
||||
err = -EPERM;
|
||||
goto swap_boot_out;
|
||||
}
|
||||
if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
|
||||
if (IS_ERR(inode_bl)) {
|
||||
err = PTR_ERR(inode_bl);
|
||||
goto swap_boot_out;
|
||||
}
|
||||
if (IS_ERR(inode_bl))
|
||||
return PTR_ERR(inode_bl);
|
||||
ei_bl = EXT4_I(inode_bl);
|
||||
|
||||
filemap_flush(inode->i_mapping);
|
||||
@ -193,20 +187,14 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
}
|
||||
}
|
||||
|
||||
ext4_journal_stop(handle);
|
||||
|
||||
ext4_double_up_write_data_sem(inode, inode_bl);
|
||||
|
||||
journal_err_out:
|
||||
ext4_inode_resume_unlocked_dio(inode);
|
||||
ext4_inode_resume_unlocked_dio(inode_bl);
|
||||
|
||||
unlock_two_nondirectories(inode, inode_bl);
|
||||
|
||||
iput(inode_bl);
|
||||
|
||||
swap_boot_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1808,6 +1808,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
||||
ext4_lock_group(ac->ac_sb, group);
|
||||
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
|
||||
ac->ac_g_ex.fe_len, &ex);
|
||||
ex.fe_logical = 0xDEADFA11; /* debug value */
|
||||
|
||||
if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
|
||||
ext4_fsblk_t start;
|
||||
@ -1936,7 +1937,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
ex.fe_logical = 0xDEADC0DE; /* debug value */
|
||||
ext4_mb_measure_extent(ac, &ex, e4b);
|
||||
|
||||
i += ex.fe_len;
|
||||
@ -1977,6 +1978,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
|
||||
max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
|
||||
if (max >= sbi->s_stripe) {
|
||||
ac->ac_found++;
|
||||
ex.fe_logical = 0xDEADF00D; /* debug value */
|
||||
ac->ac_b_ex = ex;
|
||||
ext4_mb_use_best_found(ac, e4b);
|
||||
break;
|
||||
@ -4006,8 +4008,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
|
||||
(unsigned long)ac->ac_b_ex.fe_len,
|
||||
(unsigned long)ac->ac_b_ex.fe_logical,
|
||||
(int)ac->ac_criteria);
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
|
||||
ac->ac_ex_scanned, ac->ac_found);
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
|
||||
ngroups = ext4_get_groups_count(sb);
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
|
@ -48,7 +48,7 @@ extern ushort ext4_mballoc_debug;
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
#define mb_debug(n, fmt, a...)
|
||||
#define mb_debug(n, fmt, a...) no_printk(fmt, ## a)
|
||||
#endif
|
||||
|
||||
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
|
||||
@ -175,8 +175,6 @@ struct ext4_allocation_context {
|
||||
/* copy of the best found extent taken before preallocation efforts */
|
||||
struct ext4_free_extent ac_f_ex;
|
||||
|
||||
/* number of iterations done. we have to track to limit searching */
|
||||
unsigned long ac_ex_scanned;
|
||||
__u16 ac_groups_scanned;
|
||||
__u16 ac_found;
|
||||
__u16 ac_tail;
|
||||
|
@ -76,7 +76,7 @@ copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
|
||||
* ext4_ext_path structure refers to the last extent, or a negative error
|
||||
* value on failure.
|
||||
*/
|
||||
static int
|
||||
int
|
||||
mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
|
||||
struct ext4_extent **extent)
|
||||
{
|
||||
@ -861,8 +861,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
|
||||
}
|
||||
if (!buffer_mapped(bh)) {
|
||||
zero_user(page, block_start, blocksize);
|
||||
if (!err)
|
||||
set_buffer_uptodate(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ static struct kset *ext4_kset;
|
||||
static struct ext4_lazy_init *ext4_li_info;
|
||||
static struct mutex ext4_li_mtx;
|
||||
static struct ext4_features *ext4_feat;
|
||||
static int ext4_mballoc_ready;
|
||||
|
||||
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
|
||||
unsigned long journal_devnum);
|
||||
@ -845,6 +846,10 @@ static void ext4_put_super(struct super_block *sb)
|
||||
invalidate_bdev(sbi->journal_bdev);
|
||||
ext4_blkdev_remove(sbi);
|
||||
}
|
||||
if (sbi->s_mb_cache) {
|
||||
ext4_xattr_destroy_cache(sbi->s_mb_cache);
|
||||
sbi->s_mb_cache = NULL;
|
||||
}
|
||||
if (sbi->s_mmp_tsk)
|
||||
kthread_stop(sbi->s_mmp_tsk);
|
||||
sb->s_fs_info = NULL;
|
||||
@ -940,7 +945,7 @@ static void init_once(void *foo)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
static int __init init_inodecache(void)
|
||||
{
|
||||
ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
|
||||
sizeof(struct ext4_inode_info),
|
||||
@ -3575,6 +3580,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
"feature flags set on rev 0 fs, "
|
||||
"running e2fsck is recommended");
|
||||
|
||||
if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
|
||||
set_opt2(sb, HURD_COMPAT);
|
||||
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
|
||||
EXT4_FEATURE_INCOMPAT_64BIT)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"The Hurd can't support 64-bit file systems");
|
||||
goto failed_mount;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_EXT2_SB(sb)) {
|
||||
if (ext2_feature_set_ok(sb))
|
||||
ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
|
||||
@ -4010,6 +4025,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
|
||||
|
||||
no_journal:
|
||||
if (ext4_mballoc_ready) {
|
||||
sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
|
||||
if (!sbi->s_mb_cache) {
|
||||
ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
|
||||
goto failed_mount_wq;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the # of file system overhead blocks from the
|
||||
* superblock if present.
|
||||
@ -4835,6 +4858,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||||
}
|
||||
|
||||
if (*flags & MS_RDONLY) {
|
||||
err = sync_filesystem(sb);
|
||||
if (err < 0)
|
||||
goto restore_opts;
|
||||
err = dquot_suspend(sb, -1);
|
||||
if (err < 0)
|
||||
goto restore_opts;
|
||||
@ -5515,12 +5541,10 @@ static int __init ext4_init_fs(void)
|
||||
goto out4;
|
||||
|
||||
err = ext4_init_mballoc();
|
||||
if (err)
|
||||
goto out3;
|
||||
|
||||
err = ext4_init_xattr();
|
||||
if (err)
|
||||
goto out2;
|
||||
else
|
||||
ext4_mballoc_ready = 1;
|
||||
err = init_inodecache();
|
||||
if (err)
|
||||
goto out1;
|
||||
@ -5536,10 +5560,9 @@ static int __init ext4_init_fs(void)
|
||||
unregister_as_ext3();
|
||||
destroy_inodecache();
|
||||
out1:
|
||||
ext4_exit_xattr();
|
||||
out2:
|
||||
ext4_mballoc_ready = 0;
|
||||
ext4_exit_mballoc();
|
||||
out3:
|
||||
out2:
|
||||
ext4_exit_feat_adverts();
|
||||
out4:
|
||||
if (ext4_proc_root)
|
||||
@ -5562,7 +5585,6 @@ static void __exit ext4_exit_fs(void)
|
||||
unregister_as_ext3();
|
||||
unregister_filesystem(&ext4_fs_type);
|
||||
destroy_inodecache();
|
||||
ext4_exit_xattr();
|
||||
ext4_exit_mballoc();
|
||||
ext4_exit_feat_adverts();
|
||||
remove_proc_entry("fs/ext4", NULL);
|
||||
|
@ -81,7 +81,7 @@
|
||||
# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
static void ext4_xattr_cache_insert(struct buffer_head *);
|
||||
static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
|
||||
static struct buffer_head *ext4_xattr_cache_find(struct inode *,
|
||||
struct ext4_xattr_header *,
|
||||
struct mb_cache_entry **);
|
||||
@ -90,8 +90,6 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *,
|
||||
static int ext4_xattr_list(struct dentry *dentry, char *buffer,
|
||||
size_t buffer_size);
|
||||
|
||||
static struct mb_cache *ext4_xattr_cache;
|
||||
|
||||
static const struct xattr_handler *ext4_xattr_handler_map[] = {
|
||||
[EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
|
||||
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
||||
@ -117,6 +115,9 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
#define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \
|
||||
inode->i_sb->s_fs_info)->s_mb_cache)
|
||||
|
||||
static __le32 ext4_xattr_block_csum(struct inode *inode,
|
||||
sector_t block_nr,
|
||||
struct ext4_xattr_header *hdr)
|
||||
@ -265,6 +266,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
||||
struct ext4_xattr_entry *entry;
|
||||
size_t size;
|
||||
int error;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
|
||||
name_index, name, buffer, (long)buffer_size);
|
||||
@ -286,7 +288,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
||||
error = -EIO;
|
||||
goto cleanup;
|
||||
}
|
||||
ext4_xattr_cache_insert(bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache, bh);
|
||||
entry = BFIRST(bh);
|
||||
error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
|
||||
if (error == -EIO)
|
||||
@ -409,6 +411,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct buffer_head *bh = NULL;
|
||||
int error;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
|
||||
buffer, (long)buffer_size);
|
||||
@ -430,7 +433,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
error = -EIO;
|
||||
goto cleanup;
|
||||
}
|
||||
ext4_xattr_cache_insert(bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache, bh);
|
||||
error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
|
||||
|
||||
cleanup:
|
||||
@ -526,8 +529,9 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
||||
{
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
int error = 0;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
|
||||
ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
|
||||
error = ext4_journal_get_write_access(handle, bh);
|
||||
if (error)
|
||||
goto out;
|
||||
@ -567,12 +571,13 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
|
||||
size_t *min_offs, void *base, int *total)
|
||||
{
|
||||
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
|
||||
*total += EXT4_XATTR_LEN(last->e_name_len);
|
||||
if (!last->e_value_block && last->e_value_size) {
|
||||
size_t offs = le16_to_cpu(last->e_value_offs);
|
||||
if (offs < *min_offs)
|
||||
*min_offs = offs;
|
||||
}
|
||||
if (total)
|
||||
*total += EXT4_XATTR_LEN(last->e_name_len);
|
||||
}
|
||||
return (*min_offs - ((void *)last - base) - sizeof(__u32));
|
||||
}
|
||||
@ -745,13 +750,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
struct ext4_xattr_search *s = &bs->s;
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
int error = 0;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
#define header(x) ((struct ext4_xattr_header *)(x))
|
||||
|
||||
if (i->value && i->value_len > sb->s_blocksize)
|
||||
return -ENOSPC;
|
||||
if (s->base) {
|
||||
ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
|
||||
ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
|
||||
bs->bh->b_blocknr);
|
||||
error = ext4_journal_get_write_access(handle, bs->bh);
|
||||
if (error)
|
||||
@ -769,7 +775,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
if (!IS_LAST_ENTRY(s->first))
|
||||
ext4_xattr_rehash(header(s->base),
|
||||
s->here);
|
||||
ext4_xattr_cache_insert(bs->bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache,
|
||||
bs->bh);
|
||||
}
|
||||
unlock_buffer(bs->bh);
|
||||
if (error == -EIO)
|
||||
@ -905,7 +912,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
memcpy(new_bh->b_data, s->base, new_bh->b_size);
|
||||
set_buffer_uptodate(new_bh);
|
||||
unlock_buffer(new_bh);
|
||||
ext4_xattr_cache_insert(new_bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
|
||||
error = ext4_handle_dirty_xattr_block(handle,
|
||||
inode, new_bh);
|
||||
if (error)
|
||||
@ -1228,7 +1235,7 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
||||
struct ext4_xattr_block_find *bs = NULL;
|
||||
char *buffer = NULL, *b_entry_name = NULL;
|
||||
size_t min_offs, free;
|
||||
int total_ino, total_blk;
|
||||
int total_ino;
|
||||
void *base, *start, *end;
|
||||
int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
|
||||
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
|
||||
@ -1286,8 +1293,7 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
||||
first = BFIRST(bh);
|
||||
end = bh->b_data + bh->b_size;
|
||||
min_offs = end - base;
|
||||
free = ext4_xattr_free_space(first, &min_offs, base,
|
||||
&total_blk);
|
||||
free = ext4_xattr_free_space(first, &min_offs, base, NULL);
|
||||
if (free < new_extra_isize) {
|
||||
if (!tried_min_extra_isize && s_min_extra_isize) {
|
||||
tried_min_extra_isize++;
|
||||
@ -1495,13 +1501,13 @@ ext4_xattr_put_super(struct super_block *sb)
|
||||
* Returns 0, or a negative error number on failure.
|
||||
*/
|
||||
static void
|
||||
ext4_xattr_cache_insert(struct buffer_head *bh)
|
||||
ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
|
||||
{
|
||||
__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
|
||||
struct mb_cache_entry *ce;
|
||||
int error;
|
||||
|
||||
ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
|
||||
ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
|
||||
if (!ce) {
|
||||
ea_bdebug(bh, "out of memory");
|
||||
return;
|
||||
@ -1573,12 +1579,13 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
|
||||
{
|
||||
__u32 hash = le32_to_cpu(header->h_hash);
|
||||
struct mb_cache_entry *ce;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
if (!header->h_hash)
|
||||
return NULL; /* never share */
|
||||
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
|
||||
again:
|
||||
ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
|
||||
ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
|
||||
hash);
|
||||
while (ce) {
|
||||
struct buffer_head *bh;
|
||||
@ -1676,19 +1683,17 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
|
||||
|
||||
#undef BLOCK_HASH_SHIFT
|
||||
|
||||
int __init
|
||||
ext4_init_xattr(void)
|
||||
#define HASH_BUCKET_BITS 10
|
||||
|
||||
struct mb_cache *
|
||||
ext4_xattr_create_cache(char *name)
|
||||
{
|
||||
ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
|
||||
if (!ext4_xattr_cache)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
return mb_cache_create(name, HASH_BUCKET_BITS);
|
||||
}
|
||||
|
||||
void
|
||||
ext4_exit_xattr(void)
|
||||
void ext4_xattr_destroy_cache(struct mb_cache *cache)
|
||||
{
|
||||
if (ext4_xattr_cache)
|
||||
mb_cache_destroy(ext4_xattr_cache);
|
||||
ext4_xattr_cache = NULL;
|
||||
if (cache)
|
||||
mb_cache_destroy(cache);
|
||||
}
|
||||
|
||||
|
@ -110,9 +110,6 @@ extern void ext4_xattr_put_super(struct super_block *);
|
||||
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
||||
struct ext4_inode *raw_inode, handle_t *handle);
|
||||
|
||||
extern int __init ext4_init_xattr(void);
|
||||
extern void ext4_exit_xattr(void);
|
||||
|
||||
extern const struct xattr_handler *ext4_xattr_handlers[];
|
||||
|
||||
extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
||||
@ -124,6 +121,9 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
|
||||
struct ext4_xattr_info *i,
|
||||
struct ext4_xattr_ibody_find *is);
|
||||
|
||||
extern struct mb_cache *ext4_xattr_create_cache(char *name);
|
||||
extern void ext4_xattr_destroy_cache(struct mb_cache *);
|
||||
|
||||
#ifdef CONFIG_EXT4_FS_SECURITY
|
||||
extern int ext4_init_security(handle_t *handle, struct inode *inode,
|
||||
struct inode *dir, const struct qstr *qstr);
|
||||
|
@ -568,6 +568,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
struct f2fs_mount_info org_mount_opt;
|
||||
int err, active_logs;
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
/*
|
||||
* Save the old mount options in case we
|
||||
* need to restore them.
|
||||
|
@ -635,6 +635,8 @@ static int fat_remount(struct super_block *sb, int *flags, char *data)
|
||||
struct msdos_sb_info *sbi = MSDOS_SB(sb);
|
||||
*flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
/* make sure we update state on remount. */
|
||||
new_rdonly = *flags & MS_RDONLY;
|
||||
if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
|
||||
|
@ -124,6 +124,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
|
||||
|
||||
static int vxfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -135,6 +135,7 @@ static void fuse_evict_inode(struct inode *inode)
|
||||
|
||||
static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
if (*flags & MS_MANDLOCK)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1167,6 +1167,8 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
struct gfs2_tune *gt = &sdp->sd_tune;
|
||||
int error;
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
spin_lock(>->gt_spin);
|
||||
args.ar_commit = gt->gt_logd_secs;
|
||||
args.ar_quota_quantum = gt->gt_quota_quantum;
|
||||
|
@ -112,6 +112,7 @@ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
|
||||
static int hfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NODIRATIME;
|
||||
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
|
@ -323,6 +323,7 @@ static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
|
||||
static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
if (!(*flags & MS_RDONLY)) {
|
||||
|
@ -421,6 +421,8 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
|
||||
struct hpfs_sb_info *sbi = hpfs_sb(s);
|
||||
char *new_opts = kstrdup(data, GFP_KERNEL);
|
||||
|
||||
sync_filesystem(s);
|
||||
|
||||
*flags |= MS_NOATIME;
|
||||
|
||||
hpfs_lock(s);
|
||||
|
31
fs/inode.c
31
fs/inode.c
@ -1898,3 +1898,34 @@ void inode_dio_done(struct inode *inode)
|
||||
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
|
||||
}
|
||||
EXPORT_SYMBOL(inode_dio_done);
|
||||
|
||||
/*
|
||||
* inode_set_flags - atomically set some inode flags
|
||||
*
|
||||
* Note: the caller should be holding i_mutex, or else be sure that
|
||||
* they have exclusive access to the inode structure (i.e., while the
|
||||
* inode is being instantiated). The reason for the cmpxchg() loop
|
||||
* --- which wouldn't be necessary if all code paths which modify
|
||||
* i_flags actually followed this rule, is that there is at least one
|
||||
* code path which doesn't today --- for example,
|
||||
* __generic_file_aio_write() calls file_remove_suid() without holding
|
||||
* i_mutex --- so we use cmpxchg() out of an abundance of caution.
|
||||
*
|
||||
* In the long run, i_mutex is overkill, and we should probably look
|
||||
* at using the i_lock spinlock to protect i_flags, and then make sure
|
||||
* it is so documented in include/linux/fs.h and that all code follows
|
||||
* the locking convention!!
|
||||
*/
|
||||
void inode_set_flags(struct inode *inode, unsigned int flags,
|
||||
unsigned int mask)
|
||||
{
|
||||
unsigned int old_flags, new_flags;
|
||||
|
||||
WARN_ON_ONCE(flags & ~mask);
|
||||
do {
|
||||
old_flags = ACCESS_ONCE(inode->i_flags);
|
||||
new_flags = (old_flags & ~mask) | flags;
|
||||
} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
|
||||
new_flags) != old_flags));
|
||||
}
|
||||
EXPORT_SYMBOL(inode_set_flags);
|
||||
|
@ -117,6 +117,7 @@ static void destroy_inodecache(void)
|
||||
|
||||
static int isofs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
if (!(*flags & MS_RDONLY))
|
||||
return -EROFS;
|
||||
return 0;
|
||||
|
@ -555,7 +555,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
blk_start_plug(&plug);
|
||||
jbd2_journal_write_revoke_records(journal, commit_transaction,
|
||||
&log_bufs, WRITE_SYNC);
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
jbd_debug(3, "JBD2: commit phase 2b\n");
|
||||
|
||||
@ -582,7 +581,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
err = 0;
|
||||
bufs = 0;
|
||||
descriptor = NULL;
|
||||
blk_start_plug(&plug);
|
||||
while (commit_transaction->t_buffers) {
|
||||
|
||||
/* Find the next buffer to be journaled... */
|
||||
@ -1067,6 +1065,25 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
goto restart_loop;
|
||||
}
|
||||
|
||||
/* Add the transaction to the checkpoint list
|
||||
* __journal_remove_checkpoint() can not destroy transaction
|
||||
* under us because it is not marked as T_FINISHED yet */
|
||||
if (journal->j_checkpoint_transactions == NULL) {
|
||||
journal->j_checkpoint_transactions = commit_transaction;
|
||||
commit_transaction->t_cpnext = commit_transaction;
|
||||
commit_transaction->t_cpprev = commit_transaction;
|
||||
} else {
|
||||
commit_transaction->t_cpnext =
|
||||
journal->j_checkpoint_transactions;
|
||||
commit_transaction->t_cpprev =
|
||||
commit_transaction->t_cpnext->t_cpprev;
|
||||
commit_transaction->t_cpnext->t_cpprev =
|
||||
commit_transaction;
|
||||
commit_transaction->t_cpprev->t_cpnext =
|
||||
commit_transaction;
|
||||
}
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
|
||||
/* Done with this transaction! */
|
||||
|
||||
jbd_debug(3, "JBD2: commit phase 7\n");
|
||||
@ -1085,24 +1102,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
atomic_read(&commit_transaction->t_handle_count);
|
||||
trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
|
||||
commit_transaction->t_tid, &stats.run);
|
||||
|
||||
/*
|
||||
* Calculate overall stats
|
||||
*/
|
||||
spin_lock(&journal->j_history_lock);
|
||||
journal->j_stats.ts_tid++;
|
||||
if (commit_transaction->t_requested)
|
||||
journal->j_stats.ts_requested++;
|
||||
journal->j_stats.run.rs_wait += stats.run.rs_wait;
|
||||
journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
|
||||
journal->j_stats.run.rs_running += stats.run.rs_running;
|
||||
journal->j_stats.run.rs_locked += stats.run.rs_locked;
|
||||
journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
|
||||
journal->j_stats.run.rs_logging += stats.run.rs_logging;
|
||||
journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
|
||||
journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
|
||||
journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
|
||||
spin_unlock(&journal->j_history_lock);
|
||||
stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
|
||||
|
||||
commit_transaction->t_state = T_COMMIT_CALLBACK;
|
||||
J_ASSERT(commit_transaction == journal->j_committing_transaction);
|
||||
@ -1122,24 +1122,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
|
||||
write_unlock(&journal->j_state_lock);
|
||||
|
||||
if (journal->j_checkpoint_transactions == NULL) {
|
||||
journal->j_checkpoint_transactions = commit_transaction;
|
||||
commit_transaction->t_cpnext = commit_transaction;
|
||||
commit_transaction->t_cpprev = commit_transaction;
|
||||
} else {
|
||||
commit_transaction->t_cpnext =
|
||||
journal->j_checkpoint_transactions;
|
||||
commit_transaction->t_cpprev =
|
||||
commit_transaction->t_cpnext->t_cpprev;
|
||||
commit_transaction->t_cpnext->t_cpprev =
|
||||
commit_transaction;
|
||||
commit_transaction->t_cpprev->t_cpnext =
|
||||
commit_transaction;
|
||||
}
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
/* Drop all spin_locks because commit_callback may be block.
|
||||
* __journal_remove_checkpoint() can not destroy transaction
|
||||
* under us because it is not marked as T_FINISHED yet */
|
||||
if (journal->j_commit_callback)
|
||||
journal->j_commit_callback(journal, commit_transaction);
|
||||
|
||||
@ -1150,7 +1132,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
write_lock(&journal->j_state_lock);
|
||||
spin_lock(&journal->j_list_lock);
|
||||
commit_transaction->t_state = T_FINISHED;
|
||||
/* Recheck checkpoint lists after j_list_lock was dropped */
|
||||
/* Check if the transaction can be dropped now that we are finished */
|
||||
if (commit_transaction->t_checkpoint_list == NULL &&
|
||||
commit_transaction->t_checkpoint_io_list == NULL) {
|
||||
__jbd2_journal_drop_transaction(journal, commit_transaction);
|
||||
@ -1159,4 +1141,21 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
write_unlock(&journal->j_state_lock);
|
||||
wake_up(&journal->j_wait_done_commit);
|
||||
|
||||
/*
|
||||
* Calculate overall stats
|
||||
*/
|
||||
spin_lock(&journal->j_history_lock);
|
||||
journal->j_stats.ts_tid++;
|
||||
journal->j_stats.ts_requested += stats.ts_requested;
|
||||
journal->j_stats.run.rs_wait += stats.run.rs_wait;
|
||||
journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
|
||||
journal->j_stats.run.rs_running += stats.run.rs_running;
|
||||
journal->j_stats.run.rs_locked += stats.run.rs_locked;
|
||||
journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
|
||||
journal->j_stats.run.rs_logging += stats.run.rs_logging;
|
||||
journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
|
||||
journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
|
||||
journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
|
||||
spin_unlock(&journal->j_history_lock);
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ EXPORT_SYMBOL(__jbd2_debug);
|
||||
#endif
|
||||
|
||||
/* Checksumming functions */
|
||||
int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
|
||||
static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
|
||||
{
|
||||
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
||||
return 1;
|
||||
@ -143,7 +143,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
|
||||
return cpu_to_be32(csum);
|
||||
}
|
||||
|
||||
int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
|
||||
static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
|
||||
{
|
||||
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
||||
return 1;
|
||||
@ -151,7 +151,7 @@ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
|
||||
return sb->s_checksum == jbd2_superblock_csum(j, sb);
|
||||
}
|
||||
|
||||
void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
|
||||
static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
|
||||
{
|
||||
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
||||
return;
|
||||
@ -302,8 +302,8 @@ static void journal_kill_thread(journal_t *journal)
|
||||
journal->j_flags |= JBD2_UNMOUNT;
|
||||
|
||||
while (journal->j_task) {
|
||||
wake_up(&journal->j_wait_commit);
|
||||
write_unlock(&journal->j_state_lock);
|
||||
wake_up(&journal->j_wait_commit);
|
||||
wait_event(journal->j_wait_done_commit, journal->j_task == NULL);
|
||||
write_lock(&journal->j_state_lock);
|
||||
}
|
||||
@ -710,8 +710,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
|
||||
while (tid_gt(tid, journal->j_commit_sequence)) {
|
||||
jbd_debug(1, "JBD2: want %d, j_commit_sequence=%d\n",
|
||||
tid, journal->j_commit_sequence);
|
||||
wake_up(&journal->j_wait_commit);
|
||||
read_unlock(&journal->j_state_lock);
|
||||
wake_up(&journal->j_wait_commit);
|
||||
wait_event(journal->j_wait_done_commit,
|
||||
!tid_gt(tid, journal->j_commit_sequence));
|
||||
read_lock(&journal->j_state_lock);
|
||||
|
@ -1073,7 +1073,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
|
||||
* reused here.
|
||||
*/
|
||||
jbd_lock_bh_state(bh);
|
||||
spin_lock(&journal->j_list_lock);
|
||||
J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
|
||||
jh->b_transaction == NULL ||
|
||||
(jh->b_transaction == journal->j_committing_transaction &&
|
||||
@ -1096,12 +1095,14 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
|
||||
jh->b_modified = 0;
|
||||
|
||||
JBUFFER_TRACE(jh, "file as BJ_Reserved");
|
||||
spin_lock(&journal->j_list_lock);
|
||||
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
|
||||
} else if (jh->b_transaction == journal->j_committing_transaction) {
|
||||
/* first access by this transaction */
|
||||
jh->b_modified = 0;
|
||||
|
||||
JBUFFER_TRACE(jh, "set next transaction");
|
||||
spin_lock(&journal->j_list_lock);
|
||||
jh->b_next_transaction = transaction;
|
||||
}
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
@ -1312,7 +1313,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
||||
journal->j_running_transaction)) {
|
||||
printk(KERN_ERR "JBD2: %s: "
|
||||
"jh->b_transaction (%llu, %p, %u) != "
|
||||
"journal->j_running_transaction (%p, %u)",
|
||||
"journal->j_running_transaction (%p, %u)\n",
|
||||
journal->j_devname,
|
||||
(unsigned long long) bh->b_blocknr,
|
||||
jh->b_transaction,
|
||||
@ -1335,30 +1336,25 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
||||
*/
|
||||
if (jh->b_transaction != transaction) {
|
||||
JBUFFER_TRACE(jh, "already on other transaction");
|
||||
if (unlikely(jh->b_transaction !=
|
||||
journal->j_committing_transaction)) {
|
||||
printk(KERN_ERR "JBD2: %s: "
|
||||
"jh->b_transaction (%llu, %p, %u) != "
|
||||
"journal->j_committing_transaction (%p, %u)",
|
||||
if (unlikely(((jh->b_transaction !=
|
||||
journal->j_committing_transaction)) ||
|
||||
(jh->b_next_transaction != transaction))) {
|
||||
printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
|
||||
"bad jh for block %llu: "
|
||||
"transaction (%p, %u), "
|
||||
"jh->b_transaction (%p, %u), "
|
||||
"jh->b_next_transaction (%p, %u), jlist %u\n",
|
||||
journal->j_devname,
|
||||
(unsigned long long) bh->b_blocknr,
|
||||
transaction, transaction->t_tid,
|
||||
jh->b_transaction,
|
||||
jh->b_transaction ? jh->b_transaction->t_tid : 0,
|
||||
journal->j_committing_transaction,
|
||||
journal->j_committing_transaction ?
|
||||
journal->j_committing_transaction->t_tid : 0);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (unlikely(jh->b_next_transaction != transaction)) {
|
||||
printk(KERN_ERR "JBD2: %s: "
|
||||
"jh->b_next_transaction (%llu, %p, %u) != "
|
||||
"transaction (%p, %u)",
|
||||
journal->j_devname,
|
||||
(unsigned long long) bh->b_blocknr,
|
||||
jh->b_transaction ?
|
||||
jh->b_transaction->t_tid : 0,
|
||||
jh->b_next_transaction,
|
||||
jh->b_next_transaction ?
|
||||
jh->b_next_transaction->t_tid : 0,
|
||||
transaction, transaction->t_tid);
|
||||
jh->b_jlist);
|
||||
WARN_ON(1);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
/* And this case is illegal: we can't reuse another
|
||||
@ -1415,7 +1411,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
||||
BUFFER_TRACE(bh, "entry");
|
||||
|
||||
jbd_lock_bh_state(bh);
|
||||
spin_lock(&journal->j_list_lock);
|
||||
|
||||
if (!buffer_jbd(bh))
|
||||
goto not_jbd;
|
||||
@ -1468,6 +1463,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
||||
* we know to remove the checkpoint after we commit.
|
||||
*/
|
||||
|
||||
spin_lock(&journal->j_list_lock);
|
||||
if (jh->b_cp_transaction) {
|
||||
__jbd2_journal_temp_unlink_buffer(jh);
|
||||
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
|
||||
@ -1480,6 +1476,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
} else if (jh->b_transaction) {
|
||||
J_ASSERT_JH(jh, (jh->b_transaction ==
|
||||
journal->j_committing_transaction));
|
||||
@ -1491,7 +1488,9 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
||||
|
||||
if (jh->b_next_transaction) {
|
||||
J_ASSERT(jh->b_next_transaction == transaction);
|
||||
spin_lock(&journal->j_list_lock);
|
||||
jh->b_next_transaction = NULL;
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
|
||||
/*
|
||||
* only drop a reference if this transaction modified
|
||||
@ -1503,7 +1502,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
||||
}
|
||||
|
||||
not_jbd:
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
jbd_unlock_bh_state(bh);
|
||||
__brelse(bh);
|
||||
drop:
|
||||
@ -1821,11 +1819,11 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
|
||||
if (buffer_locked(bh) || buffer_dirty(bh))
|
||||
goto out;
|
||||
|
||||
if (jh->b_next_transaction != NULL)
|
||||
if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
|
||||
goto out;
|
||||
|
||||
spin_lock(&journal->j_list_lock);
|
||||
if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
|
||||
if (jh->b_cp_transaction != NULL) {
|
||||
/* written-back checkpointed metadata buffer */
|
||||
JBUFFER_TRACE(jh, "remove from checkpoint list");
|
||||
__jbd2_journal_remove_checkpoint(jh);
|
||||
|
@ -243,6 +243,7 @@ static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
||||
int err;
|
||||
|
||||
sync_filesystem(sb);
|
||||
err = jffs2_parse_options(c, data);
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
@ -418,6 +418,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
int flag = JFS_SBI(sb)->flag;
|
||||
int ret;
|
||||
|
||||
sync_filesystem(sb);
|
||||
if (!parse_options(data, sb, &newLVSize, &flag)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
540
fs/mbcache.c
540
fs/mbcache.c
@ -26,6 +26,41 @@
|
||||
* back on the lru list.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Lock descriptions and usage:
|
||||
*
|
||||
* Each hash chain of both the block and index hash tables now contains
|
||||
* a built-in lock used to serialize accesses to the hash chain.
|
||||
*
|
||||
* Accesses to global data structures mb_cache_list and mb_cache_lru_list
|
||||
* are serialized via the global spinlock mb_cache_spinlock.
|
||||
*
|
||||
* Each mb_cache_entry contains a spinlock, e_entry_lock, to serialize
|
||||
* accesses to its local data, such as e_used and e_queued.
|
||||
*
|
||||
* Lock ordering:
|
||||
*
|
||||
* Each block hash chain's lock has the highest lock order, followed by an
|
||||
* index hash chain's lock, mb_cache_bg_lock (used to implement mb_cache_entry's
|
||||
* lock), and mb_cach_spinlock, with the lowest order. While holding
|
||||
* either a block or index hash chain lock, a thread can acquire an
|
||||
* mc_cache_bg_lock, which in turn can also acquire mb_cache_spinlock.
|
||||
*
|
||||
* Synchronization:
|
||||
*
|
||||
* Since both mb_cache_entry_get and mb_cache_entry_find scan the block and
|
||||
* index hash chian, it needs to lock the corresponding hash chain. For each
|
||||
* mb_cache_entry within the chain, it needs to lock the mb_cache_entry to
|
||||
* prevent either any simultaneous release or free on the entry and also
|
||||
* to serialize accesses to either the e_used or e_queued member of the entry.
|
||||
*
|
||||
* To avoid having a dangling reference to an already freed
|
||||
* mb_cache_entry, an mb_cache_entry is only freed when it is not on a
|
||||
* block hash chain and also no longer being referenced, both e_used,
|
||||
* and e_queued are 0's. When an mb_cache_entry is explicitly freed it is
|
||||
* first removed from a block hash chain.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
@ -34,9 +69,10 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list_bl.h>
|
||||
#include <linux/mbcache.h>
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/blockgroup_lock.h>
|
||||
|
||||
#ifdef MB_CACHE_DEBUG
|
||||
# define mb_debug(f...) do { \
|
||||
@ -57,8 +93,14 @@
|
||||
|
||||
#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
|
||||
|
||||
#define MB_CACHE_ENTRY_LOCK_BITS __builtin_log2(NR_BG_LOCKS)
|
||||
#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
|
||||
(hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
|
||||
|
||||
static struct blockgroup_lock *mb_cache_bg_lock;
|
||||
static struct kmem_cache *mb_cache_kmem_cache;
|
||||
|
||||
MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
|
||||
MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
|
||||
MODULE_LICENSE("GPL");
|
||||
@ -86,58 +128,110 @@ static LIST_HEAD(mb_cache_list);
|
||||
static LIST_HEAD(mb_cache_lru_list);
|
||||
static DEFINE_SPINLOCK(mb_cache_spinlock);
|
||||
|
||||
static inline void
|
||||
__spin_lock_mb_cache_entry(struct mb_cache_entry *ce)
|
||||
{
|
||||
spin_lock(bgl_lock_ptr(mb_cache_bg_lock,
|
||||
MB_CACHE_ENTRY_LOCK_INDEX(ce)));
|
||||
}
|
||||
|
||||
static inline void
|
||||
__spin_unlock_mb_cache_entry(struct mb_cache_entry *ce)
|
||||
{
|
||||
spin_unlock(bgl_lock_ptr(mb_cache_bg_lock,
|
||||
MB_CACHE_ENTRY_LOCK_INDEX(ce)));
|
||||
}
|
||||
|
||||
static inline int
|
||||
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
|
||||
__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
|
||||
{
|
||||
return !list_empty(&ce->e_block_list);
|
||||
return !hlist_bl_unhashed(&ce->e_block_list);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
|
||||
static inline void
|
||||
__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
|
||||
{
|
||||
if (__mb_cache_entry_is_hashed(ce)) {
|
||||
list_del_init(&ce->e_block_list);
|
||||
list_del(&ce->e_index.o_list);
|
||||
}
|
||||
if (__mb_cache_entry_is_block_hashed(ce))
|
||||
hlist_bl_del_init(&ce->e_block_list);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
|
||||
{
|
||||
return !hlist_bl_unhashed(&ce->e_index.o_list);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
|
||||
{
|
||||
if (__mb_cache_entry_is_index_hashed(ce))
|
||||
hlist_bl_del_init(&ce->e_index.o_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mb_cache_entry_unhash_unlock()
|
||||
*
|
||||
* This function is called to unhash both the block and index hash
|
||||
* chain.
|
||||
* It assumes both the block and index hash chain is locked upon entry.
|
||||
* It also unlock both hash chains both exit
|
||||
*/
|
||||
static inline void
|
||||
__mb_cache_entry_unhash_unlock(struct mb_cache_entry *ce)
|
||||
{
|
||||
__mb_cache_entry_unhash_index(ce);
|
||||
hlist_bl_unlock(ce->e_index_hash_p);
|
||||
__mb_cache_entry_unhash_block(ce);
|
||||
hlist_bl_unlock(ce->e_block_hash_p);
|
||||
}
|
||||
|
||||
static void
|
||||
__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
|
||||
{
|
||||
struct mb_cache *cache = ce->e_cache;
|
||||
|
||||
mb_assert(!(ce->e_used || ce->e_queued));
|
||||
mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)));
|
||||
kmem_cache_free(cache->c_entry_cache, ce);
|
||||
atomic_dec(&cache->c_entry_count);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
|
||||
__releases(mb_cache_spinlock)
|
||||
__mb_cache_entry_release(struct mb_cache_entry *ce)
|
||||
{
|
||||
/* First lock the entry to serialize access to its local data. */
|
||||
__spin_lock_mb_cache_entry(ce);
|
||||
/* Wake up all processes queuing for this cache entry. */
|
||||
if (ce->e_queued)
|
||||
wake_up_all(&mb_cache_queue);
|
||||
if (ce->e_used >= MB_CACHE_WRITER)
|
||||
ce->e_used -= MB_CACHE_WRITER;
|
||||
/*
|
||||
* Make sure that all cache entries on lru_list have
|
||||
* both e_used and e_qued of 0s.
|
||||
*/
|
||||
ce->e_used--;
|
||||
if (!(ce->e_used || ce->e_queued)) {
|
||||
if (!__mb_cache_entry_is_hashed(ce))
|
||||
if (!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))) {
|
||||
if (!__mb_cache_entry_is_block_hashed(ce)) {
|
||||
__spin_unlock_mb_cache_entry(ce);
|
||||
goto forget;
|
||||
mb_assert(list_empty(&ce->e_lru_list));
|
||||
list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
|
||||
}
|
||||
/*
|
||||
* Need access to lru list, first drop entry lock,
|
||||
* then reacquire the lock in the proper order.
|
||||
*/
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
if (list_empty(&ce->e_lru_list))
|
||||
list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
}
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
__spin_unlock_mb_cache_entry(ce);
|
||||
return;
|
||||
forget:
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
mb_assert(list_empty(&ce->e_lru_list));
|
||||
__mb_cache_entry_forget(ce, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* mb_cache_shrink_scan() memory pressure callback
|
||||
*
|
||||
@ -160,17 +254,34 @@ mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
|
||||
mb_debug("trying to free %d entries", nr_to_scan);
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
|
||||
while ((nr_to_scan-- > 0) && !list_empty(&mb_cache_lru_list)) {
|
||||
struct mb_cache_entry *ce =
|
||||
list_entry(mb_cache_lru_list.next,
|
||||
struct mb_cache_entry, e_lru_list);
|
||||
list_move_tail(&ce->e_lru_list, &free_list);
|
||||
__mb_cache_entry_unhash(ce);
|
||||
freed++;
|
||||
struct mb_cache_entry, e_lru_list);
|
||||
list_del_init(&ce->e_lru_list);
|
||||
if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))
|
||||
continue;
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
/* Prevent any find or get operation on the entry */
|
||||
hlist_bl_lock(ce->e_block_hash_p);
|
||||
hlist_bl_lock(ce->e_index_hash_p);
|
||||
/* Ignore if it is touched by a find/get */
|
||||
if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt) ||
|
||||
!list_empty(&ce->e_lru_list)) {
|
||||
hlist_bl_unlock(ce->e_index_hash_p);
|
||||
hlist_bl_unlock(ce->e_block_hash_p);
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
continue;
|
||||
}
|
||||
__mb_cache_entry_unhash_unlock(ce);
|
||||
list_add_tail(&ce->e_lru_list, &free_list);
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
}
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
|
||||
__mb_cache_entry_forget(entry, gfp_mask);
|
||||
freed++;
|
||||
}
|
||||
return freed;
|
||||
}
|
||||
@ -215,29 +326,40 @@ mb_cache_create(const char *name, int bucket_bits)
|
||||
int n, bucket_count = 1 << bucket_bits;
|
||||
struct mb_cache *cache = NULL;
|
||||
|
||||
if (!mb_cache_bg_lock) {
|
||||
mb_cache_bg_lock = kmalloc(sizeof(struct blockgroup_lock),
|
||||
GFP_KERNEL);
|
||||
if (!mb_cache_bg_lock)
|
||||
return NULL;
|
||||
bgl_lock_init(mb_cache_bg_lock);
|
||||
}
|
||||
|
||||
cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
|
||||
if (!cache)
|
||||
return NULL;
|
||||
cache->c_name = name;
|
||||
atomic_set(&cache->c_entry_count, 0);
|
||||
cache->c_bucket_bits = bucket_bits;
|
||||
cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
|
||||
GFP_KERNEL);
|
||||
cache->c_block_hash = kmalloc(bucket_count *
|
||||
sizeof(struct hlist_bl_head), GFP_KERNEL);
|
||||
if (!cache->c_block_hash)
|
||||
goto fail;
|
||||
for (n=0; n<bucket_count; n++)
|
||||
INIT_LIST_HEAD(&cache->c_block_hash[n]);
|
||||
cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
|
||||
GFP_KERNEL);
|
||||
INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
|
||||
cache->c_index_hash = kmalloc(bucket_count *
|
||||
sizeof(struct hlist_bl_head), GFP_KERNEL);
|
||||
if (!cache->c_index_hash)
|
||||
goto fail;
|
||||
for (n=0; n<bucket_count; n++)
|
||||
INIT_LIST_HEAD(&cache->c_index_hash[n]);
|
||||
cache->c_entry_cache = kmem_cache_create(name,
|
||||
sizeof(struct mb_cache_entry), 0,
|
||||
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
|
||||
if (!cache->c_entry_cache)
|
||||
goto fail2;
|
||||
INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
|
||||
if (!mb_cache_kmem_cache) {
|
||||
mb_cache_kmem_cache = kmem_cache_create(name,
|
||||
sizeof(struct mb_cache_entry), 0,
|
||||
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
|
||||
if (!mb_cache_kmem_cache)
|
||||
goto fail2;
|
||||
}
|
||||
cache->c_entry_cache = mb_cache_kmem_cache;
|
||||
|
||||
/*
|
||||
* Set an upper limit on the number of cache entries so that the hash
|
||||
@ -273,21 +395,47 @@ void
|
||||
mb_cache_shrink(struct block_device *bdev)
|
||||
{
|
||||
LIST_HEAD(free_list);
|
||||
struct list_head *l, *ltmp;
|
||||
struct list_head *l;
|
||||
struct mb_cache_entry *ce, *tmp;
|
||||
|
||||
l = &mb_cache_lru_list;
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
|
||||
struct mb_cache_entry *ce =
|
||||
list_entry(l, struct mb_cache_entry, e_lru_list);
|
||||
while (!list_is_last(l, &mb_cache_lru_list)) {
|
||||
l = l->next;
|
||||
ce = list_entry(l, struct mb_cache_entry, e_lru_list);
|
||||
if (ce->e_bdev == bdev) {
|
||||
list_move_tail(&ce->e_lru_list, &free_list);
|
||||
__mb_cache_entry_unhash(ce);
|
||||
list_del_init(&ce->e_lru_list);
|
||||
if (ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt))
|
||||
continue;
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
/*
|
||||
* Prevent any find or get operation on the entry.
|
||||
*/
|
||||
hlist_bl_lock(ce->e_block_hash_p);
|
||||
hlist_bl_lock(ce->e_index_hash_p);
|
||||
/* Ignore if it is touched by a find/get */
|
||||
if (ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt) ||
|
||||
!list_empty(&ce->e_lru_list)) {
|
||||
hlist_bl_unlock(ce->e_index_hash_p);
|
||||
hlist_bl_unlock(ce->e_block_hash_p);
|
||||
l = &mb_cache_lru_list;
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
continue;
|
||||
}
|
||||
__mb_cache_entry_unhash_unlock(ce);
|
||||
mb_assert(!(ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt)));
|
||||
list_add_tail(&ce->e_lru_list, &free_list);
|
||||
l = &mb_cache_lru_list;
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
list_for_each_safe(l, ltmp, &free_list) {
|
||||
__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
|
||||
e_lru_list), GFP_KERNEL);
|
||||
|
||||
list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
|
||||
__mb_cache_entry_forget(ce, GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -303,23 +451,27 @@ void
|
||||
mb_cache_destroy(struct mb_cache *cache)
|
||||
{
|
||||
LIST_HEAD(free_list);
|
||||
struct list_head *l, *ltmp;
|
||||
struct mb_cache_entry *ce, *tmp;
|
||||
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
|
||||
struct mb_cache_entry *ce =
|
||||
list_entry(l, struct mb_cache_entry, e_lru_list);
|
||||
if (ce->e_cache == cache) {
|
||||
list_for_each_entry_safe(ce, tmp, &mb_cache_lru_list, e_lru_list) {
|
||||
if (ce->e_cache == cache)
|
||||
list_move_tail(&ce->e_lru_list, &free_list);
|
||||
__mb_cache_entry_unhash(ce);
|
||||
}
|
||||
}
|
||||
list_del(&cache->c_cache_list);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
|
||||
list_for_each_safe(l, ltmp, &free_list) {
|
||||
__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
|
||||
e_lru_list), GFP_KERNEL);
|
||||
list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
|
||||
list_del_init(&ce->e_lru_list);
|
||||
/*
|
||||
* Prevent any find or get operation on the entry.
|
||||
*/
|
||||
hlist_bl_lock(ce->e_block_hash_p);
|
||||
hlist_bl_lock(ce->e_index_hash_p);
|
||||
mb_assert(!(ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt)));
|
||||
__mb_cache_entry_unhash_unlock(ce);
|
||||
__mb_cache_entry_forget(ce, GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (atomic_read(&cache->c_entry_count) > 0) {
|
||||
@ -328,8 +480,10 @@ mb_cache_destroy(struct mb_cache *cache)
|
||||
atomic_read(&cache->c_entry_count));
|
||||
}
|
||||
|
||||
kmem_cache_destroy(cache->c_entry_cache);
|
||||
|
||||
if (list_empty(&mb_cache_list)) {
|
||||
kmem_cache_destroy(mb_cache_kmem_cache);
|
||||
mb_cache_kmem_cache = NULL;
|
||||
}
|
||||
kfree(cache->c_index_hash);
|
||||
kfree(cache->c_block_hash);
|
||||
kfree(cache);
|
||||
@ -346,28 +500,61 @@ mb_cache_destroy(struct mb_cache *cache)
|
||||
struct mb_cache_entry *
|
||||
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
|
||||
{
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
struct mb_cache_entry *ce;
|
||||
|
||||
if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
|
||||
struct list_head *l;
|
||||
|
||||
l = &mb_cache_lru_list;
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
if (!list_empty(&mb_cache_lru_list)) {
|
||||
ce = list_entry(mb_cache_lru_list.next,
|
||||
struct mb_cache_entry, e_lru_list);
|
||||
list_del_init(&ce->e_lru_list);
|
||||
__mb_cache_entry_unhash(ce);
|
||||
while (!list_is_last(l, &mb_cache_lru_list)) {
|
||||
l = l->next;
|
||||
ce = list_entry(l, struct mb_cache_entry, e_lru_list);
|
||||
if (ce->e_cache == cache) {
|
||||
list_del_init(&ce->e_lru_list);
|
||||
if (ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt))
|
||||
continue;
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
/*
|
||||
* Prevent any find or get operation on the
|
||||
* entry.
|
||||
*/
|
||||
hlist_bl_lock(ce->e_block_hash_p);
|
||||
hlist_bl_lock(ce->e_index_hash_p);
|
||||
/* Ignore if it is touched by a find/get */
|
||||
if (ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt) ||
|
||||
!list_empty(&ce->e_lru_list)) {
|
||||
hlist_bl_unlock(ce->e_index_hash_p);
|
||||
hlist_bl_unlock(ce->e_block_hash_p);
|
||||
l = &mb_cache_lru_list;
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
continue;
|
||||
}
|
||||
mb_assert(list_empty(&ce->e_lru_list));
|
||||
mb_assert(!(ce->e_used || ce->e_queued ||
|
||||
atomic_read(&ce->e_refcnt)));
|
||||
__mb_cache_entry_unhash_unlock(ce);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
}
|
||||
if (!ce) {
|
||||
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
|
||||
if (!ce)
|
||||
return NULL;
|
||||
atomic_inc(&cache->c_entry_count);
|
||||
INIT_LIST_HEAD(&ce->e_lru_list);
|
||||
INIT_LIST_HEAD(&ce->e_block_list);
|
||||
ce->e_cache = cache;
|
||||
ce->e_queued = 0;
|
||||
}
|
||||
|
||||
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
|
||||
if (!ce)
|
||||
return NULL;
|
||||
atomic_inc(&cache->c_entry_count);
|
||||
INIT_LIST_HEAD(&ce->e_lru_list);
|
||||
INIT_HLIST_BL_NODE(&ce->e_block_list);
|
||||
INIT_HLIST_BL_NODE(&ce->e_index.o_list);
|
||||
ce->e_cache = cache;
|
||||
ce->e_queued = 0;
|
||||
atomic_set(&ce->e_refcnt, 0);
|
||||
found:
|
||||
ce->e_block_hash_p = &cache->c_block_hash[0];
|
||||
ce->e_index_hash_p = &cache->c_index_hash[0];
|
||||
ce->e_used = 1 + MB_CACHE_WRITER;
|
||||
return ce;
|
||||
}
|
||||
@ -393,29 +580,38 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
|
||||
{
|
||||
struct mb_cache *cache = ce->e_cache;
|
||||
unsigned int bucket;
|
||||
struct list_head *l;
|
||||
int error = -EBUSY;
|
||||
struct hlist_bl_node *l;
|
||||
struct hlist_bl_head *block_hash_p;
|
||||
struct hlist_bl_head *index_hash_p;
|
||||
struct mb_cache_entry *lce;
|
||||
|
||||
mb_assert(ce);
|
||||
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
|
||||
cache->c_bucket_bits);
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_for_each_prev(l, &cache->c_block_hash[bucket]) {
|
||||
struct mb_cache_entry *ce =
|
||||
list_entry(l, struct mb_cache_entry, e_block_list);
|
||||
if (ce->e_bdev == bdev && ce->e_block == block)
|
||||
goto out;
|
||||
block_hash_p = &cache->c_block_hash[bucket];
|
||||
hlist_bl_lock(block_hash_p);
|
||||
hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
|
||||
if (lce->e_bdev == bdev && lce->e_block == block) {
|
||||
hlist_bl_unlock(block_hash_p);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
__mb_cache_entry_unhash(ce);
|
||||
mb_assert(!__mb_cache_entry_is_block_hashed(ce));
|
||||
__mb_cache_entry_unhash_block(ce);
|
||||
__mb_cache_entry_unhash_index(ce);
|
||||
ce->e_bdev = bdev;
|
||||
ce->e_block = block;
|
||||
list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
|
||||
ce->e_block_hash_p = block_hash_p;
|
||||
ce->e_index.o_key = key;
|
||||
hlist_bl_add_head(&ce->e_block_list, block_hash_p);
|
||||
hlist_bl_unlock(block_hash_p);
|
||||
bucket = hash_long(key, cache->c_bucket_bits);
|
||||
list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
|
||||
error = 0;
|
||||
out:
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
return error;
|
||||
index_hash_p = &cache->c_index_hash[bucket];
|
||||
hlist_bl_lock(index_hash_p);
|
||||
ce->e_index_hash_p = index_hash_p;
|
||||
hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
|
||||
hlist_bl_unlock(index_hash_p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -429,24 +625,26 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
|
||||
void
|
||||
mb_cache_entry_release(struct mb_cache_entry *ce)
|
||||
{
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
__mb_cache_entry_release_unlock(ce);
|
||||
__mb_cache_entry_release(ce);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* mb_cache_entry_free()
|
||||
*
|
||||
* This is equivalent to the sequence mb_cache_entry_takeout() --
|
||||
* mb_cache_entry_release().
|
||||
*/
|
||||
void
|
||||
mb_cache_entry_free(struct mb_cache_entry *ce)
|
||||
{
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
mb_assert(ce);
|
||||
mb_assert(list_empty(&ce->e_lru_list));
|
||||
__mb_cache_entry_unhash(ce);
|
||||
__mb_cache_entry_release_unlock(ce);
|
||||
hlist_bl_lock(ce->e_index_hash_p);
|
||||
__mb_cache_entry_unhash_index(ce);
|
||||
hlist_bl_unlock(ce->e_index_hash_p);
|
||||
hlist_bl_lock(ce->e_block_hash_p);
|
||||
__mb_cache_entry_unhash_block(ce);
|
||||
hlist_bl_unlock(ce->e_block_hash_p);
|
||||
__mb_cache_entry_release(ce);
|
||||
}
|
||||
|
||||
|
||||
@ -463,84 +661,110 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
|
||||
sector_t block)
|
||||
{
|
||||
unsigned int bucket;
|
||||
struct list_head *l;
|
||||
struct hlist_bl_node *l;
|
||||
struct mb_cache_entry *ce;
|
||||
struct hlist_bl_head *block_hash_p;
|
||||
|
||||
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
|
||||
cache->c_bucket_bits);
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_for_each(l, &cache->c_block_hash[bucket]) {
|
||||
ce = list_entry(l, struct mb_cache_entry, e_block_list);
|
||||
block_hash_p = &cache->c_block_hash[bucket];
|
||||
/* First serialize access to the block corresponding hash chain. */
|
||||
hlist_bl_lock(block_hash_p);
|
||||
hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
|
||||
mb_assert(ce->e_block_hash_p == block_hash_p);
|
||||
if (ce->e_bdev == bdev && ce->e_block == block) {
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (!list_empty(&ce->e_lru_list))
|
||||
list_del_init(&ce->e_lru_list);
|
||||
|
||||
while (ce->e_used > 0) {
|
||||
ce->e_queued++;
|
||||
prepare_to_wait(&mb_cache_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
schedule();
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
ce->e_queued--;
|
||||
/*
|
||||
* Prevent a free from removing the entry.
|
||||
*/
|
||||
atomic_inc(&ce->e_refcnt);
|
||||
hlist_bl_unlock(block_hash_p);
|
||||
__spin_lock_mb_cache_entry(ce);
|
||||
atomic_dec(&ce->e_refcnt);
|
||||
if (ce->e_used > 0) {
|
||||
DEFINE_WAIT(wait);
|
||||
while (ce->e_used > 0) {
|
||||
ce->e_queued++;
|
||||
prepare_to_wait(&mb_cache_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
__spin_unlock_mb_cache_entry(ce);
|
||||
schedule();
|
||||
__spin_lock_mb_cache_entry(ce);
|
||||
ce->e_queued--;
|
||||
}
|
||||
finish_wait(&mb_cache_queue, &wait);
|
||||
}
|
||||
finish_wait(&mb_cache_queue, &wait);
|
||||
ce->e_used += 1 + MB_CACHE_WRITER;
|
||||
__spin_unlock_mb_cache_entry(ce);
|
||||
|
||||
if (!__mb_cache_entry_is_hashed(ce)) {
|
||||
__mb_cache_entry_release_unlock(ce);
|
||||
if (!list_empty(&ce->e_lru_list)) {
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_del_init(&ce->e_lru_list);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
}
|
||||
if (!__mb_cache_entry_is_block_hashed(ce)) {
|
||||
__mb_cache_entry_release(ce);
|
||||
return NULL;
|
||||
}
|
||||
goto cleanup;
|
||||
return ce;
|
||||
}
|
||||
}
|
||||
ce = NULL;
|
||||
|
||||
cleanup:
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
return ce;
|
||||
hlist_bl_unlock(block_hash_p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
|
||||
|
||||
static struct mb_cache_entry *
|
||||
__mb_cache_entry_find(struct list_head *l, struct list_head *head,
|
||||
__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
|
||||
struct block_device *bdev, unsigned int key)
|
||||
{
|
||||
while (l != head) {
|
||||
|
||||
/* The index hash chain is alredy acquire by caller. */
|
||||
while (l != NULL) {
|
||||
struct mb_cache_entry *ce =
|
||||
list_entry(l, struct mb_cache_entry, e_index.o_list);
|
||||
hlist_bl_entry(l, struct mb_cache_entry,
|
||||
e_index.o_list);
|
||||
mb_assert(ce->e_index_hash_p == head);
|
||||
if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (!list_empty(&ce->e_lru_list))
|
||||
list_del_init(&ce->e_lru_list);
|
||||
|
||||
/*
|
||||
* Prevent a free from removing the entry.
|
||||
*/
|
||||
atomic_inc(&ce->e_refcnt);
|
||||
hlist_bl_unlock(head);
|
||||
__spin_lock_mb_cache_entry(ce);
|
||||
atomic_dec(&ce->e_refcnt);
|
||||
ce->e_used++;
|
||||
/* Incrementing before holding the lock gives readers
|
||||
priority over writers. */
|
||||
ce->e_used++;
|
||||
while (ce->e_used >= MB_CACHE_WRITER) {
|
||||
ce->e_queued++;
|
||||
prepare_to_wait(&mb_cache_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
schedule();
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
ce->e_queued--;
|
||||
}
|
||||
finish_wait(&mb_cache_queue, &wait);
|
||||
if (ce->e_used >= MB_CACHE_WRITER) {
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (!__mb_cache_entry_is_hashed(ce)) {
|
||||
__mb_cache_entry_release_unlock(ce);
|
||||
while (ce->e_used >= MB_CACHE_WRITER) {
|
||||
ce->e_queued++;
|
||||
prepare_to_wait(&mb_cache_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
__spin_unlock_mb_cache_entry(ce);
|
||||
schedule();
|
||||
__spin_lock_mb_cache_entry(ce);
|
||||
ce->e_queued--;
|
||||
}
|
||||
finish_wait(&mb_cache_queue, &wait);
|
||||
}
|
||||
__spin_unlock_mb_cache_entry(ce);
|
||||
if (!list_empty(&ce->e_lru_list)) {
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_del_init(&ce->e_lru_list);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
}
|
||||
if (!__mb_cache_entry_is_block_hashed(ce)) {
|
||||
__mb_cache_entry_release(ce);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
return ce;
|
||||
}
|
||||
l = l->next;
|
||||
}
|
||||
hlist_bl_unlock(head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -562,13 +786,17 @@ mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
|
||||
unsigned int key)
|
||||
{
|
||||
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
|
||||
struct list_head *l;
|
||||
struct mb_cache_entry *ce;
|
||||
struct hlist_bl_node *l;
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
struct hlist_bl_head *index_hash_p;
|
||||
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
l = cache->c_index_hash[bucket].next;
|
||||
ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
index_hash_p = &cache->c_index_hash[bucket];
|
||||
hlist_bl_lock(index_hash_p);
|
||||
if (!hlist_bl_empty(index_hash_p)) {
|
||||
l = hlist_bl_first(index_hash_p);
|
||||
ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
|
||||
} else
|
||||
hlist_bl_unlock(index_hash_p);
|
||||
return ce;
|
||||
}
|
||||
|
||||
@ -597,13 +825,17 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev,
|
||||
{
|
||||
struct mb_cache *cache = prev->e_cache;
|
||||
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
|
||||
struct list_head *l;
|
||||
struct hlist_bl_node *l;
|
||||
struct mb_cache_entry *ce;
|
||||
struct hlist_bl_head *index_hash_p;
|
||||
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
index_hash_p = &cache->c_index_hash[bucket];
|
||||
mb_assert(prev->e_index_hash_p == index_hash_p);
|
||||
hlist_bl_lock(index_hash_p);
|
||||
mb_assert(!hlist_bl_empty(index_hash_p));
|
||||
l = prev->e_index.o_list.next;
|
||||
ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
|
||||
__mb_cache_entry_release_unlock(prev);
|
||||
ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
|
||||
__mb_cache_entry_release(prev);
|
||||
return ce;
|
||||
}
|
||||
|
||||
|
@ -123,6 +123,7 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
|
||||
struct minix_sb_info * sbi = minix_sb(sb);
|
||||
struct minix_super_block * ms;
|
||||
|
||||
sync_filesystem(sb);
|
||||
ms = sbi->s_ms;
|
||||
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
|
@ -99,6 +99,7 @@ static void destroy_inodecache(void)
|
||||
|
||||
static int ncp_remount(struct super_block *sb, int *flags, char* data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NODIRATIME;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2215,6 +2215,8 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
|
||||
struct nfs4_mount_data *options4 = (struct nfs4_mount_data *)raw_data;
|
||||
u32 nfsvers = nfss->nfs_client->rpc_ops->version;
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
/*
|
||||
* Userspace mount programs that send binary options generally send
|
||||
* them populated with default values. We have no way to know which
|
||||
|
@ -1129,6 +1129,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
unsigned long old_mount_opt;
|
||||
int err;
|
||||
|
||||
sync_filesystem(sb);
|
||||
old_sb_flags = sb->s_flags;
|
||||
old_mount_opt = nilfs->ns_mount_opt;
|
||||
|
||||
|
@ -468,6 +468,8 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
|
||||
|
||||
ntfs_debug("Entering with remount options string: %s", opt);
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
#ifndef NTFS_RW
|
||||
/* For read-only compiled driver, enforce read-only flag. */
|
||||
*flags |= MS_RDONLY;
|
||||
|
@ -634,6 +634,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
|
||||
struct ocfs2_super *osb = OCFS2_SB(sb);
|
||||
u32 tmp;
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
|
||||
!ocfs2_check_set_options(sb, &parsed_options)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -368,6 +368,7 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
|
||||
|
||||
static int openprom_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_NOATIME;
|
||||
return 0;
|
||||
}
|
||||
|
@ -92,6 +92,8 @@ static int proc_parse_options(char *options, struct pid_namespace *pid)
|
||||
int proc_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct pid_namespace *pid = sb->s_fs_info;
|
||||
|
||||
sync_filesystem(sb);
|
||||
return !proc_parse_options(data, pid);
|
||||
}
|
||||
|
||||
|
@ -249,6 +249,7 @@ static void parse_options(char *options)
|
||||
|
||||
static int pstore_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
parse_options(data);
|
||||
|
||||
return 0;
|
||||
|
@ -44,6 +44,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct qnx4_sb_info *qs;
|
||||
|
||||
sync_filesystem(sb);
|
||||
qs = qnx4_sb(sb);
|
||||
qs->Version = QNX4_VERSION;
|
||||
*flags |= MS_RDONLY;
|
||||
|
@ -55,6 +55,7 @@ static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
|
||||
|
||||
static int qnx6_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1318,6 +1318,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
|
||||
int i;
|
||||
#endif
|
||||
|
||||
sync_filesystem(s);
|
||||
reiserfs_write_lock(s);
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
|
@ -432,6 +432,7 @@ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
*/
|
||||
static int romfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -371,6 +371,7 @@ static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
|
||||
static int squashfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
sync_filesystem(sb);
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -719,8 +719,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
|
||||
}
|
||||
}
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
if (sb->s_op->remount_fs) {
|
||||
retval = sb->s_op->remount_fs(sb, &flags, data);
|
||||
if (retval) {
|
||||
|
@ -60,6 +60,7 @@ static int sysv_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct sysv_sb_info *sbi = SYSV_SB(sb);
|
||||
|
||||
sync_filesystem(sb);
|
||||
if (sbi->s_forced_ro)
|
||||
*flags |= MS_RDONLY;
|
||||
return 0;
|
||||
|
@ -1827,6 +1827,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
int err;
|
||||
struct ubifs_info *c = sb->s_fs_info;
|
||||
|
||||
sync_filesystem(sb);
|
||||
dbg_gen("old flags %#lx, new flags %#x", sb->s_flags, *flags);
|
||||
|
||||
err = ubifs_parse_options(c, data, 1);
|
||||
|
@ -646,6 +646,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
|
||||
int error = 0;
|
||||
struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
|
||||
|
||||
sync_filesystem(sb);
|
||||
if (lvidiu) {
|
||||
int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
|
||||
if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
|
||||
|
@ -1280,6 +1280,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||
unsigned new_mount_opt, ufstype;
|
||||
unsigned flags;
|
||||
|
||||
sync_filesystem(sb);
|
||||
lock_ufs(sb);
|
||||
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||
uspi = UFS_SB(sb)->s_uspi;
|
||||
|
@ -1197,6 +1197,7 @@ xfs_fs_remount(
|
||||
char *p;
|
||||
int error;
|
||||
|
||||
sync_filesystem(sb);
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
int token;
|
||||
|
||||
|
@ -2572,6 +2572,9 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
|
||||
void inode_dio_wait(struct inode *inode);
|
||||
void inode_dio_done(struct inode *inode);
|
||||
|
||||
extern void inode_set_flags(struct inode *inode, unsigned int flags,
|
||||
unsigned int mask);
|
||||
|
||||
extern const struct file_operations generic_ro_fops;
|
||||
|
||||
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
|
||||
|
@ -3,19 +3,21 @@
|
||||
|
||||
(C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
|
||||
*/
|
||||
|
||||
struct mb_cache_entry {
|
||||
struct list_head e_lru_list;
|
||||
struct mb_cache *e_cache;
|
||||
unsigned short e_used;
|
||||
unsigned short e_queued;
|
||||
atomic_t e_refcnt;
|
||||
struct block_device *e_bdev;
|
||||
sector_t e_block;
|
||||
struct list_head e_block_list;
|
||||
struct hlist_bl_node e_block_list;
|
||||
struct {
|
||||
struct list_head o_list;
|
||||
struct hlist_bl_node o_list;
|
||||
unsigned int o_key;
|
||||
} e_index;
|
||||
struct hlist_bl_head *e_block_hash_p;
|
||||
struct hlist_bl_head *e_index_hash_p;
|
||||
};
|
||||
|
||||
struct mb_cache {
|
||||
@ -25,8 +27,8 @@ struct mb_cache {
|
||||
int c_max_entries;
|
||||
int c_bucket_bits;
|
||||
struct kmem_cache *c_entry_cache;
|
||||
struct list_head *c_block_hash;
|
||||
struct list_head *c_index_hash;
|
||||
struct hlist_bl_head *c_block_hash;
|
||||
struct hlist_bl_head *c_index_hash;
|
||||
};
|
||||
|
||||
/* Functions on caches */
|
||||
|
@ -16,6 +16,15 @@ struct mpage_da_data;
|
||||
struct ext4_map_blocks;
|
||||
struct extent_status;
|
||||
|
||||
/* shim until we merge in the xfs_collapse_range branch */
|
||||
#ifndef FALLOC_FL_COLLAPSE_RANGE
|
||||
#define FALLOC_FL_COLLAPSE_RANGE 0x08
|
||||
#endif
|
||||
|
||||
#ifndef FALLOC_FL_ZERO_RANGE
|
||||
#define FALLOC_FL_ZERO_RANGE 0x10
|
||||
#endif
|
||||
|
||||
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
|
||||
|
||||
#define show_mballoc_flags(flags) __print_flags(flags, "|", \
|
||||
@ -68,6 +77,13 @@ struct extent_status;
|
||||
{ EXTENT_STATUS_DELAYED, "D" }, \
|
||||
{ EXTENT_STATUS_HOLE, "H" })
|
||||
|
||||
#define show_falloc_mode(mode) __print_flags(mode, "|", \
|
||||
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
|
||||
{ FALLOC_FL_PUNCH_HOLE, "PUNCH_HOLE"}, \
|
||||
{ FALLOC_FL_NO_HIDE_STALE, "NO_HIDE_STALE"}, \
|
||||
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
|
||||
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
|
||||
|
||||
|
||||
TRACE_EVENT(ext4_free_inode,
|
||||
TP_PROTO(struct inode *inode),
|
||||
@ -1328,7 +1344,7 @@ TRACE_EVENT(ext4_direct_IO_exit,
|
||||
__entry->rw, __entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ext4_fallocate_enter,
|
||||
DECLARE_EVENT_CLASS(ext4__fallocate_mode,
|
||||
TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
|
||||
|
||||
TP_ARGS(inode, offset, len, mode),
|
||||
@ -1336,23 +1352,45 @@ TRACE_EVENT(ext4_fallocate_enter,
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( loff_t, pos )
|
||||
__field( loff_t, len )
|
||||
__field( loff_t, offset )
|
||||
__field( loff_t, len )
|
||||
__field( int, mode )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->pos = offset;
|
||||
__entry->offset = offset;
|
||||
__entry->len = len;
|
||||
__entry->mode = mode;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
|
||||
TP_printk("dev %d,%d ino %lu offset %lld len %lld mode %s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long) __entry->ino, __entry->pos,
|
||||
__entry->len, __entry->mode)
|
||||
(unsigned long) __entry->ino,
|
||||
__entry->offset, __entry->len,
|
||||
show_falloc_mode(__entry->mode))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ext4__fallocate_mode, ext4_fallocate_enter,
|
||||
|
||||
TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
|
||||
|
||||
TP_ARGS(inode, offset, len, mode)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ext4__fallocate_mode, ext4_punch_hole,
|
||||
|
||||
TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
|
||||
|
||||
TP_ARGS(inode, offset, len, mode)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ext4__fallocate_mode, ext4_zero_range,
|
||||
|
||||
TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
|
||||
|
||||
TP_ARGS(inode, offset, len, mode)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ext4_fallocate_exit,
|
||||
@ -1384,31 +1422,6 @@ TRACE_EVENT(ext4_fallocate_exit,
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ext4_punch_hole,
|
||||
TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
|
||||
|
||||
TP_ARGS(inode, offset, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( loff_t, offset )
|
||||
__field( loff_t, len )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->offset = offset;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d ino %lu offset %lld len %lld",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long) __entry->ino,
|
||||
__entry->offset, __entry->len)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ext4_unlink_enter,
|
||||
TP_PROTO(struct inode *parent, struct dentry *dentry),
|
||||
|
||||
@ -2410,6 +2423,31 @@ TRACE_EVENT(ext4_es_shrink_exit,
|
||||
__entry->shrunk_nr, __entry->cache_cnt)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ext4_collapse_range,
|
||||
TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
|
||||
|
||||
TP_ARGS(inode, offset, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(ino_t, ino)
|
||||
__field(loff_t, offset)
|
||||
__field(loff_t, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->offset = offset;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d ino %lu offset %lld len %lld",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long) __entry->ino,
|
||||
__entry->offset, __entry->len)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_EXT4_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
Loading…
Reference in New Issue
Block a user