mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 05:02:31 +00:00
xfs: refactor the inode recycling code
Hoist the code in xfs_iget_cache_hit that restores the VFS inode state to an xfs_inode that was previously vfs-destroyed. The next patch will add a new set of state flags, so we need the helper to avoid duplication. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
This commit is contained in:
parent
956f6daa84
commit
ff7bebeb91
@ -355,14 +355,14 @@ xfs_reinit_inode(
|
||||
struct xfs_mount *mp,
|
||||
struct inode *inode)
|
||||
{
|
||||
int error;
|
||||
uint32_t nlink = inode->i_nlink;
|
||||
uint32_t generation = inode->i_generation;
|
||||
uint64_t version = inode_peek_iversion(inode);
|
||||
umode_t mode = inode->i_mode;
|
||||
dev_t dev = inode->i_rdev;
|
||||
kuid_t uid = inode->i_uid;
|
||||
kgid_t gid = inode->i_gid;
|
||||
int error;
|
||||
uint32_t nlink = inode->i_nlink;
|
||||
uint32_t generation = inode->i_generation;
|
||||
uint64_t version = inode_peek_iversion(inode);
|
||||
umode_t mode = inode->i_mode;
|
||||
dev_t dev = inode->i_rdev;
|
||||
kuid_t uid = inode->i_uid;
|
||||
kgid_t gid = inode->i_gid;
|
||||
|
||||
error = inode_init_always(mp->m_super, inode);
|
||||
|
||||
@ -376,6 +376,74 @@ xfs_reinit_inode(
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Carefully nudge an inode whose VFS state has been torn down back into a
|
||||
* usable state. Drops the i_flags_lock and the rcu read lock.
|
||||
*/
|
||||
static int
|
||||
xfs_iget_recycle(
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_inode *ip) __releases(&ip->i_flags_lock)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
int error;
|
||||
|
||||
trace_xfs_iget_recycle(ip);
|
||||
|
||||
/*
|
||||
* We need to make it look like the inode is being reclaimed to prevent
|
||||
* the actual reclaim workers from stomping over us while we recycle
|
||||
* the inode. We can't clear the radix tree tag yet as it requires
|
||||
* pag_ici_lock to be held exclusive.
|
||||
*/
|
||||
ip->i_flags |= XFS_IRECLAIM;
|
||||
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
|
||||
error = xfs_reinit_inode(mp, inode);
|
||||
if (error) {
|
||||
bool wake;
|
||||
|
||||
/*
|
||||
* Re-initializing the inode failed, and we are in deep
|
||||
* trouble. Try to re-add it to the reclaim list.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
wake = !!__xfs_iflags_test(ip, XFS_INEW);
|
||||
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
|
||||
if (wake)
|
||||
wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
|
||||
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
trace_xfs_iget_recycle_fail(ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
spin_lock(&pag->pag_ici_lock);
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
|
||||
/*
|
||||
* Clear the per-lifetime state in the inode as we are now effectively
|
||||
* a new inode and need to return to the initial state before reuse
|
||||
* occurs.
|
||||
*/
|
||||
ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
|
||||
ip->i_flags |= XFS_INEW;
|
||||
xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
|
||||
XFS_ICI_RECLAIM_TAG);
|
||||
inode->i_state = I_NEW;
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
spin_unlock(&pag->pag_ici_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are allocating a new inode, then check what was returned is
|
||||
* actually a free, empty inode. If we are not allocating an inode,
|
||||
@ -450,7 +518,7 @@ xfs_iget_cache_hit(
|
||||
/*
|
||||
* If we are racing with another cache hit that is currently
|
||||
* instantiating this inode or currently recycling it out of
|
||||
* reclaimabe state, wait for the initialisation to complete
|
||||
* reclaimable state, wait for the initialisation to complete
|
||||
* before continuing.
|
||||
*
|
||||
* XXX(hch): eventually we should do something equivalent to
|
||||
@ -472,64 +540,16 @@ xfs_iget_cache_hit(
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
||||
/*
|
||||
* If IRECLAIMABLE is set, we've torn down the VFS inode already.
|
||||
* Need to carefully get it back into useable state.
|
||||
*/
|
||||
if (ip->i_flags & XFS_IRECLAIMABLE) {
|
||||
trace_xfs_iget_reclaim(ip);
|
||||
|
||||
if (flags & XFS_IGET_INCORE) {
|
||||
error = -EAGAIN;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
|
||||
* from stomping over us while we recycle the inode. We can't
|
||||
* clear the radix tree reclaimable tag yet as it requires
|
||||
* pag_ici_lock to be held exclusive.
|
||||
*/
|
||||
ip->i_flags |= XFS_IRECLAIM;
|
||||
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
|
||||
error = xfs_reinit_inode(mp, inode);
|
||||
if (error) {
|
||||
bool wake;
|
||||
/*
|
||||
* Re-initializing the inode failed, and we are in deep
|
||||
* trouble. Try to re-add it to the reclaim list.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
wake = !!__xfs_iflags_test(ip, XFS_INEW);
|
||||
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
|
||||
if (wake)
|
||||
wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
|
||||
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
|
||||
trace_xfs_iget_reclaim_fail(ip);
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
spin_lock(&pag->pag_ici_lock);
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
|
||||
/*
|
||||
* Clear the per-lifetime state in the inode as we are now
|
||||
* effectively a new inode and need to return to the initial
|
||||
* state before reuse occurs.
|
||||
*/
|
||||
ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
|
||||
ip->i_flags |= XFS_INEW;
|
||||
xfs_perag_clear_inode_tag(pag,
|
||||
XFS_INO_TO_AGINO(pag->pag_mount, ino),
|
||||
XFS_ICI_RECLAIM_TAG);
|
||||
inode->i_state = I_NEW;
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
spin_unlock(&pag->pag_ici_lock);
|
||||
/* Drops i_flags_lock and RCU read lock. */
|
||||
error = xfs_iget_recycle(pag, ip);
|
||||
if (error)
|
||||
return error;
|
||||
} else {
|
||||
/* If the VFS inode is being torn down, pause and try again. */
|
||||
if (!igrab(inode)) {
|
||||
@ -559,7 +579,6 @@ xfs_iget_cache_hit(
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
xfs_iget_cache_miss(
|
||||
struct xfs_mount *mp,
|
||||
|
@ -631,8 +631,8 @@ DEFINE_EVENT(xfs_inode_class, name, \
|
||||
TP_PROTO(struct xfs_inode *ip), \
|
||||
TP_ARGS(ip))
|
||||
DEFINE_INODE_EVENT(xfs_iget_skip);
|
||||
DEFINE_INODE_EVENT(xfs_iget_reclaim);
|
||||
DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
|
||||
DEFINE_INODE_EVENT(xfs_iget_recycle);
|
||||
DEFINE_INODE_EVENT(xfs_iget_recycle_fail);
|
||||
DEFINE_INODE_EVENT(xfs_iget_hit);
|
||||
DEFINE_INODE_EVENT(xfs_iget_miss);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user