mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-18 03:06:43 +00:00
gfs2: Evict inodes cooperatively
Add a gfs2_evict_inodes() helper that evicts inodes cooperatively across the cluster. This avoids running into timeouts during unmount unnecessarily. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
parent
6b388abc33
commit
b88beb9a24
@ -607,6 +607,7 @@ enum {
|
||||
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
|
||||
withdrawing */
|
||||
SDF_DEACTIVATING = 15,
|
||||
SDF_EVICTING = 16,
|
||||
};
|
||||
|
||||
enum gfs2_freeze_state {
|
||||
|
@ -1728,6 +1728,55 @@ static int gfs2_meta_init_fs_context(struct fs_context *fc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_evict_inodes - evict inodes cooperatively
|
||||
* @sb: the superblock
|
||||
*
|
||||
* When evicting an inode with a zero link count, we are trying to upgrade the
|
||||
* inode's iopen glock from SH to EX mode in order to determine if we can
|
||||
* delete the inode. The other nodes are supposed to evict the inode from
|
||||
* their caches if they can, and to poke the inode's inode glock if they cannot
|
||||
* do so. Either behavior allows gfs2_upgrade_iopen_glock() to proceed
|
||||
* quickly, but if the other nodes are not cooperating, the lock upgrading
|
||||
* attempt will time out. Since inodes are evicted sequentially, this can add
|
||||
* up quickly.
|
||||
*
|
||||
* Function evict_inodes() tries to keep the s_inode_list_lock list locked over
|
||||
* a long time, which prevents other inodes from being evicted concurrently.
|
||||
* This precludes the cooperative behavior we are looking for. This special
|
||||
* version of evict_inodes() avoids that.
|
||||
*
|
||||
* Modeled after drop_pagecache_sb().
|
||||
*/
|
||||
static void gfs2_evict_inodes(struct super_block *sb)
|
||||
{
|
||||
struct inode *inode, *toput_inode = NULL;
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
|
||||
set_bit(SDF_EVICTING, &sdp->sd_flags);
|
||||
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
||||
spin_lock(&inode->i_lock);
|
||||
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) &&
|
||||
!need_resched()) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
continue;
|
||||
}
|
||||
atomic_inc(&inode->i_count);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
||||
iput(toput_inode);
|
||||
toput_inode = inode;
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
iput(toput_inode);
|
||||
}
|
||||
|
||||
static void gfs2_kill_sb(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
@ -1744,6 +1793,8 @@ static void gfs2_kill_sb(struct super_block *sb)
|
||||
sdp->sd_master_dir = NULL;
|
||||
shrink_dcache_sb(sb);
|
||||
|
||||
gfs2_evict_inodes(sb);
|
||||
|
||||
/*
|
||||
* Flush and then drain the delete workqueue here (via
|
||||
* destroy_workqueue()) to ensure that any delete work that
|
||||
|
@ -935,6 +935,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
static int gfs2_drop_inode(struct inode *inode)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
|
||||
if (inode->i_nlink &&
|
||||
gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
@ -959,6 +960,12 @@ static int gfs2_drop_inode(struct inode *inode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* No longer cache inodes when trying to evict them all.
|
||||
*/
|
||||
if (test_bit(SDF_EVICTING, &sdp->sd_flags))
|
||||
return 1;
|
||||
|
||||
return generic_drop_inode(inode);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user