2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2005-11-02 03:38:42 +00:00
|
|
|
#include "xfs_fs.h"
|
2013-10-22 23:36:05 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_format.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_log_format.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_inode.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_trans.h"
|
2005-11-02 03:38:42 +00:00
|
|
|
#include "xfs_trans_priv.h"
|
|
|
|
#include "xfs_inode_item.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-12-11 11:35:19 +00:00
|
|
|
#include <linux/iversion.h>
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2010-06-24 01:36:58 +00:00
|
|
|
* Add a locked inode to the transaction.
|
|
|
|
*
|
|
|
|
* The inode must be locked, and it cannot be associated with any transaction.
|
2011-09-19 15:00:54 +00:00
|
|
|
* If lock_flags is non-zero the inode will be unlocked on transaction commit.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_trans_ijoin(
|
2010-06-24 01:36:58 +00:00
|
|
|
struct xfs_trans *tp,
|
2011-09-19 15:00:54 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
uint lock_flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2020-04-30 19:52:19 +00:00
|
|
|
struct xfs_inode_log_item *iip;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-22 07:34:00 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ip->i_itemp == NULL)
|
|
|
|
xfs_inode_item_init(ip, ip->i_mount);
|
|
|
|
iip = ip->i_itemp;
|
2011-09-19 15:00:54 +00:00
|
|
|
|
2010-06-24 01:36:58 +00:00
|
|
|
ASSERT(iip->ili_lock_flags == 0);
|
2011-09-19 15:00:54 +00:00
|
|
|
iip->ili_lock_flags = lock_flags;
|
xfs: Don't allow logging of XFS_ISTALE inodes
In tracking down a problem in this patchset, I discovered we are
reclaiming dirty stale inodes. This wasn't discovered until inodes
were always attached to the cluster buffer and then the rcu callback
that freed inodes was assert failing because the inode still had an
active pointer to the cluster buffer after it had been reclaimed.
Debugging the issue indicated that this was a pre-existing issue
resulting from the way the inodes are handled in xfs_inactive_ifree.
When we free a cluster buffer from xfs_ifree_cluster, all the inodes
in cache are marked XFS_ISTALE. Those that are clean have nothing
else done to them and so eventually get cleaned up by background
reclaim. i.e. it is assumed we'll never dirty/relog an inode marked
XFS_ISTALE.
On journal commit dirty stale inodes as are handled by both
buffer and inode log items to run though xfs_istale_done() and
removed from the AIL (buffer log item commit) or the log item will
simply unpin it because the buffer log item will clean it. What happens
to any specific inode is entirely dependent on which log item wins
the commit race, but the result is the same - stale inodes are
clean, not attached to the cluster buffer, and not in the AIL. Hence
inode reclaim can just free these inodes without further care.
However, if the stale inode is relogged, it gets dirtied again and
relogged into the CIL. Most of the time this isn't an issue, because
relogging simply changes the inode's location in the current
checkpoint. Problems arise, however, when the CIL checkpoints
between two transactions in the xfs_inactive_ifree() deferops
processing. This results in the XFS_ISTALE inode being redirtied
and inserted into the CIL without any of the other stale cluster
buffer infrastructure being in place.
Hence on journal commit, it simply gets unpinned, so it remains
dirty in memory. Everything in inode writeback avoids XFS_ISTALE
inodes so it can't be written back, and it is not tracked in the AIL
so there's not even a trigger to attempt to clean the inode. Hence
the inode just sits dirty in memory until inode reclaim comes along,
sees that it is XFS_ISTALE, and goes to reclaim it. This reclaiming
of a dirty inode caused use after free, list corruptions and other
nasty issues later in this patchset.
Hence this patch addresses a violation of the "never log XFS_ISTALE
inodes" caused by the deferops processing rolling a transaction
and relogging a stale inode in xfs_inactive_free. It also adds a
bunch of asserts to catch this problem in debug kernels so that
we don't reintroduce this problem in future.
Reproducer for this issue was generic/558 on a v4 filesystem.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:48:45 +00:00
|
|
|
ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a log_item_desc to point at the new item.
|
|
|
|
*/
|
2010-06-23 08:11:15 +00:00
|
|
|
xfs_trans_add_item(tp, &iip->ili_item);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-09-28 02:27:25 +00:00
|
|
|
/*
|
|
|
|
* Transactional inode timestamp update. Requires the inode to be locked and
|
|
|
|
* joined to the transaction supplied. Relies on the transaction subsystem to
|
|
|
|
* track dirty state and update/writeback the inode accordingly.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_trans_ichgtime(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
struct inode *inode = VFS_I(ip);
|
2019-11-12 16:20:42 +00:00
|
|
|
struct timespec64 tv;
|
2010-09-28 02:27:25 +00:00
|
|
|
|
|
|
|
ASSERT(tp);
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
|
|
2016-09-14 14:48:06 +00:00
|
|
|
tv = current_time(inode);
|
2010-09-28 02:27:25 +00:00
|
|
|
|
2016-02-09 05:54:58 +00:00
|
|
|
if (flags & XFS_ICHGTIME_MOD)
|
2010-09-28 02:27:25 +00:00
|
|
|
inode->i_mtime = tv;
|
2016-02-09 05:54:58 +00:00
|
|
|
if (flags & XFS_ICHGTIME_CHG)
|
2010-09-28 02:27:25 +00:00
|
|
|
inode->i_ctime = tv;
|
2019-11-12 16:20:42 +00:00
|
|
|
if (flags & XFS_ICHGTIME_CREATE)
|
|
|
|
ip->i_d.di_crtime = tv;
|
2010-09-28 02:27:25 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This is called to mark the fields indicated in fieldmask as needing
|
|
|
|
* to be logged when the transaction is committed. The inode must
|
|
|
|
* already be associated with the given transaction.
|
|
|
|
*
|
|
|
|
* The values for fieldmask are defined in xfs_inode_item.h. We always
|
|
|
|
* log all of the core inode if any of it has changed, and we always log
|
|
|
|
* all of the inline data/extents/b-tree root if any of them has changed.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_trans_log_inode(
|
|
|
|
xfs_trans_t *tp,
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
uint flags)
|
|
|
|
{
|
2018-03-07 01:04:00 +00:00
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ASSERT(ip->i_itemp != NULL);
|
2008-04-22 07:34:00 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
xfs: Don't allow logging of XFS_ISTALE inodes
In tracking down a problem in this patchset, I discovered we are
reclaiming dirty stale inodes. This wasn't discovered until inodes
were always attached to the cluster buffer and then the rcu callback
that freed inodes was assert failing because the inode still had an
active pointer to the cluster buffer after it had been reclaimed.
Debugging the issue indicated that this was a pre-existing issue
resulting from the way the inodes are handled in xfs_inactive_ifree.
When we free a cluster buffer from xfs_ifree_cluster, all the inodes
in cache are marked XFS_ISTALE. Those that are clean have nothing
else done to them and so eventually get cleaned up by background
reclaim. i.e. it is assumed we'll never dirty/relog an inode marked
XFS_ISTALE.
On journal commit dirty stale inodes as are handled by both
buffer and inode log items to run though xfs_istale_done() and
removed from the AIL (buffer log item commit) or the log item will
simply unpin it because the buffer log item will clean it. What happens
to any specific inode is entirely dependent on which log item wins
the commit race, but the result is the same - stale inodes are
clean, not attached to the cluster buffer, and not in the AIL. Hence
inode reclaim can just free these inodes without further care.
However, if the stale inode is relogged, it gets dirtied again and
relogged into the CIL. Most of the time this isn't an issue, because
relogging simply changes the inode's location in the current
checkpoint. Problems arise, however, when the CIL checkpoints
between two transactions in the xfs_inactive_ifree() deferops
processing. This results in the XFS_ISTALE inode being redirtied
and inserted into the CIL without any of the other stale cluster
buffer infrastructure being in place.
Hence on journal commit, it simply gets unpinned, so it remains
dirty in memory. Everything in inode writeback avoids XFS_ISTALE
inodes so it can't be written back, and it is not tracked in the AIL
so there's not even a trigger to attempt to clean the inode. Hence
the inode just sits dirty in memory until inode reclaim comes along,
sees that it is XFS_ISTALE, and goes to reclaim it. This reclaiming
of a dirty inode caused use after free, list corruptions and other
nasty issues later in this patchset.
Hence this patch addresses a violation of the "never log XFS_ISTALE
inodes" caused by the deferops processing rolling a transaction
and relogging a stale inode in xfs_inactive_free. It also adds a
bunch of asserts to catch this problem in debug kernels so that
we don't reintroduce this problem in future.
Reproducer for this issue was generic/558 on a v4 filesystem.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:48:45 +00:00
|
|
|
ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-03-07 01:04:00 +00:00
|
|
|
/*
|
|
|
|
* Don't bother with i_lock for the I_DIRTY_TIME check here, as races
|
|
|
|
* don't matter - we either will need an extra transaction in 24 hours
|
|
|
|
* to log the timestamps, or will clear already cleared fields in the
|
|
|
|
* worst case.
|
|
|
|
*/
|
|
|
|
if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) {
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
xfs: optimise away log forces on timestamp updates for fdatasync
xfs: timestamp updates cause excessive fdatasync log traffic
Sage Weil reported that a ceph test workload was writing to the
log on every fdatasync during an overwrite workload. Event tracing
showed that the only metadata modification being made was the
timestamp updates during the write(2) syscall, but fdatasync(2)
is supposed to ignore them. The key observation was that the
transactions in the log all looked like this:
INODE: #regs: 4 ino: 0x8b flags: 0x45 dsize: 32
And contained a flags field of 0x45 or 0x85, and had data and
attribute forks following the inode core. This means that the
timestamp updates were triggering dirty relogging of previously
logged parts of the inode that hadn't yet been flushed back to
disk.
There are two parts to this problem. The first is that XFS relogs
dirty regions in subsequent transactions, so it carries around the
fields that have been dirtied since the last time the inode was
written back to disk, not since the last time the inode was forced
into the log.
The second part is that on v5 filesystems, the inode change count
update during inode dirtying also sets the XFS_ILOG_CORE flag, so
on v5 filesystems this makes a timestamp update dirty the entire
inode.
As a result when fdatasync is run, it looks at the dirty fields in
the inode, and sees more than just the timestamp flag, even though
the only metadata change since the last fdatasync was just the
timestamps. Hence we force the log on every subsequent fdatasync
even though it is not needed.
To fix this, add a new field to the inode log item that tracks
changes since the last time fsync/fdatasync forced the log to flush
the changes to the journal. This flag is updated when we dirty the
inode, but we do it before updating the change count so it does not
carry the "core dirty" flag from timestamp updates. The fields are
zeroed when the inode is marked clean (due to writeback/freeing) or
when an fsync/datasync forces the log. Hence if we only dirty the
timestamps on the inode between fsync/fdatasync calls, the fdatasync
will not trigger another log force.
Over 100 runs of the test program:
Ext4 baseline:
runtime: 1.63s +/- 0.24s
avg lat: 1.59ms +/- 0.24ms
iops: ~2000
XFS, vanilla kernel:
runtime: 2.45s +/- 0.18s
avg lat: 2.39ms +/- 0.18ms
log forces: ~400/s
iops: ~1000
XFS, patched kernel:
runtime: 1.49s +/- 0.26s
avg lat: 1.46ms +/- 0.25ms
log forces: ~30/s
iops: ~1500
Reported-by: Sage Weil <sage@redhat.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 02:14:59 +00:00
|
|
|
/*
|
|
|
|
* Record the specific change for fdatasync optimisation. This
|
|
|
|
* allows fdatasync to skip log forces for inodes that are only
|
|
|
|
* timestamp dirty. We do this before the change count so that
|
|
|
|
* the core being logged in this case does not impact on fdatasync
|
|
|
|
* behaviour.
|
|
|
|
*/
|
|
|
|
ip->i_itemp->ili_fsync_fields |= flags;
|
|
|
|
|
2013-06-27 06:04:59 +00:00
|
|
|
/*
|
|
|
|
* First time we log the inode in a transaction, bump the inode change
|
2017-12-11 11:35:23 +00:00
|
|
|
* counter if it is configured for this to occur. While we have the
|
|
|
|
* inode locked exclusively for metadata modification, we can usually
|
|
|
|
* avoid setting XFS_ILOG_CORE if no one has queried the value since
|
|
|
|
* the last time it was incremented. If we have XFS_ILOG_CORE already
|
|
|
|
* set however, then go ahead and bump the i_version counter
|
|
|
|
* unconditionally.
|
2013-06-27 06:04:59 +00:00
|
|
|
*/
|
2018-05-09 14:49:37 +00:00
|
|
|
if (!test_and_set_bit(XFS_LI_DIRTY, &ip->i_itemp->ili_item.li_flags) &&
|
2013-06-27 06:04:59 +00:00
|
|
|
IS_I_VERSION(VFS_I(ip))) {
|
2017-12-11 11:35:23 +00:00
|
|
|
if (inode_maybe_inc_iversion(VFS_I(ip), flags & XFS_ILOG_CORE))
|
|
|
|
flags |= XFS_ILOG_CORE;
|
2013-06-27 06:04:59 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
tp->t_flags |= XFS_TRANS_DIRTY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Always OR in the bits from the ili_last_fields field.
|
|
|
|
* This is to coordinate with the xfs_iflush() and xfs_iflush_done()
|
2012-02-29 09:53:54 +00:00
|
|
|
* routines in the eventual clearing of the ili_fields bits.
|
2005-04-16 22:20:36 +00:00
|
|
|
* See the big comment in xfs_iflush() for an explanation of
|
2006-03-28 22:55:14 +00:00
|
|
|
* this coordination mechanism.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
flags |= ip->i_itemp->ili_last_fields;
|
2012-02-29 09:53:54 +00:00
|
|
|
ip->i_itemp->ili_fields |= flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2017-08-28 17:21:03 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
xfs_trans_roll_inode(
|
|
|
|
struct xfs_trans **tpp,
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
|
|
|
|
error = xfs_trans_roll(tpp);
|
|
|
|
if (!error)
|
|
|
|
xfs_trans_ijoin(*tpp, ip, 0);
|
|
|
|
return error;
|
|
|
|
}
|