mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 13:34:30 +00:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
This commit is contained in:
commit
8677d02a45
@ -455,6 +455,8 @@ static inline void
|
||||
xfs_attr_free_item(
|
||||
struct xfs_attr_intent *attr)
|
||||
{
|
||||
ASSERT(attr->xattri_leaf_bp == NULL);
|
||||
|
||||
if (attr->xattri_da_state)
|
||||
xfs_da_state_free(attr->xattri_da_state);
|
||||
xfs_attri_log_nameval_put(attr->xattri_nameval);
|
||||
@ -509,6 +511,10 @@ xfs_attr_cancel_item(
|
||||
struct xfs_attr_intent *attr;
|
||||
|
||||
attr = container_of(item, struct xfs_attr_intent, xattri_list);
|
||||
if (attr->xattri_leaf_bp) {
|
||||
xfs_buf_relse(attr->xattri_leaf_bp);
|
||||
attr->xattri_leaf_bp = NULL;
|
||||
}
|
||||
xfs_attr_free_item(attr);
|
||||
}
|
||||
|
||||
@ -576,7 +582,7 @@ xfs_attri_item_recover(
|
||||
struct xfs_trans_res tres;
|
||||
struct xfs_attri_log_format *attrp;
|
||||
struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
|
||||
int error, ret = 0;
|
||||
int error;
|
||||
int total;
|
||||
int local;
|
||||
struct xfs_attrd_log_item *done_item = NULL;
|
||||
@ -655,13 +661,31 @@ xfs_attri_item_recover(
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
ret = xfs_xattri_finish_update(attr, done_item);
|
||||
if (ret == -EAGAIN) {
|
||||
/* There's more work to do, so add it to this transaction */
|
||||
error = xfs_xattri_finish_update(attr, done_item);
|
||||
if (error == -EAGAIN) {
|
||||
/*
|
||||
* There's more work to do, so add the intent item to this
|
||||
* transaction so that we can continue it later.
|
||||
*/
|
||||
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_ATTR, &attr->xattri_list);
|
||||
} else
|
||||
error = ret;
|
||||
error = xfs_defer_ops_capture_and_commit(tp, capture_list);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* The defer capture structure took its own reference to the
|
||||
* attr leaf buffer and will give that to the continuation
|
||||
* transaction. The attr intent struct drives the continuation
|
||||
* work, so release our refcount on the attr leaf buffer but
|
||||
* retain the pointer in the intent structure.
|
||||
*/
|
||||
if (attr->xattri_leaf_bp)
|
||||
xfs_buf_relse(attr->xattri_leaf_bp);
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_irele(ip);
|
||||
return 0;
|
||||
}
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp);
|
||||
goto out_unlock;
|
||||
@ -670,14 +694,15 @@ xfs_attri_item_recover(
|
||||
error = xfs_defer_ops_capture_and_commit(tp, capture_list);
|
||||
|
||||
out_unlock:
|
||||
if (attr->xattri_leaf_bp)
|
||||
if (attr->xattri_leaf_bp) {
|
||||
xfs_buf_relse(attr->xattri_leaf_bp);
|
||||
attr->xattri_leaf_bp = NULL;
|
||||
}
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_irele(ip);
|
||||
out:
|
||||
if (ret != -EAGAIN)
|
||||
xfs_attr_free_item(attr);
|
||||
xfs_attr_free_item(attr);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -440,7 +440,7 @@ xfs_inodegc_queue_all(
|
||||
for_each_online_cpu(cpu) {
|
||||
gc = per_cpu_ptr(mp->m_inodegc, cpu);
|
||||
if (!llist_empty(&gc->list))
|
||||
queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
|
||||
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1841,8 +1841,8 @@ void
|
||||
xfs_inodegc_worker(
|
||||
struct work_struct *work)
|
||||
{
|
||||
struct xfs_inodegc *gc = container_of(work, struct xfs_inodegc,
|
||||
work);
|
||||
struct xfs_inodegc *gc = container_of(to_delayed_work(work),
|
||||
struct xfs_inodegc, work);
|
||||
struct llist_node *node = llist_del_all(&gc->list);
|
||||
struct xfs_inode *ip, *n;
|
||||
|
||||
@ -1861,6 +1861,20 @@ xfs_inodegc_worker(
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Expedite all pending inodegc work to run immediately. This does not wait for
|
||||
* completion of the work.
|
||||
*/
|
||||
void
|
||||
xfs_inodegc_push(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (!xfs_is_inodegc_enabled(mp))
|
||||
return;
|
||||
trace_xfs_inodegc_push(mp, __return_address);
|
||||
xfs_inodegc_queue_all(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Force all currently queued inode inactivation work to run immediately and
|
||||
* wait for the work to finish.
|
||||
@ -1869,12 +1883,8 @@ void
|
||||
xfs_inodegc_flush(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (!xfs_is_inodegc_enabled(mp))
|
||||
return;
|
||||
|
||||
xfs_inodegc_push(mp);
|
||||
trace_xfs_inodegc_flush(mp, __return_address);
|
||||
|
||||
xfs_inodegc_queue_all(mp);
|
||||
flush_workqueue(mp->m_inodegc_wq);
|
||||
}
|
||||
|
||||
@ -2014,6 +2024,7 @@ xfs_inodegc_queue(
|
||||
struct xfs_inodegc *gc;
|
||||
int items;
|
||||
unsigned int shrinker_hits;
|
||||
unsigned long queue_delay = 1;
|
||||
|
||||
trace_xfs_inode_set_need_inactive(ip);
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
@ -2025,19 +2036,26 @@ xfs_inodegc_queue(
|
||||
items = READ_ONCE(gc->items);
|
||||
WRITE_ONCE(gc->items, items + 1);
|
||||
shrinker_hits = READ_ONCE(gc->shrinker_hits);
|
||||
put_cpu_ptr(gc);
|
||||
|
||||
if (!xfs_is_inodegc_enabled(mp))
|
||||
/*
|
||||
* We queue the work while holding the current CPU so that the work
|
||||
* is scheduled to run on this CPU.
|
||||
*/
|
||||
if (!xfs_is_inodegc_enabled(mp)) {
|
||||
put_cpu_ptr(gc);
|
||||
return;
|
||||
|
||||
if (xfs_inodegc_want_queue_work(ip, items)) {
|
||||
trace_xfs_inodegc_queue(mp, __return_address);
|
||||
queue_work(mp->m_inodegc_wq, &gc->work);
|
||||
}
|
||||
|
||||
if (xfs_inodegc_want_queue_work(ip, items))
|
||||
queue_delay = 0;
|
||||
|
||||
trace_xfs_inodegc_queue(mp, __return_address);
|
||||
mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
|
||||
put_cpu_ptr(gc);
|
||||
|
||||
if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
|
||||
trace_xfs_inodegc_throttle(mp, __return_address);
|
||||
flush_work(&gc->work);
|
||||
flush_delayed_work(&gc->work);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2054,7 +2072,7 @@ xfs_inodegc_cpu_dead(
|
||||
unsigned int count = 0;
|
||||
|
||||
dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
|
||||
cancel_work_sync(&dead_gc->work);
|
||||
cancel_delayed_work_sync(&dead_gc->work);
|
||||
|
||||
if (llist_empty(&dead_gc->list))
|
||||
return;
|
||||
@ -2073,12 +2091,12 @@ xfs_inodegc_cpu_dead(
|
||||
llist_add_batch(first, last, &gc->list);
|
||||
count += READ_ONCE(gc->items);
|
||||
WRITE_ONCE(gc->items, count);
|
||||
put_cpu_ptr(gc);
|
||||
|
||||
if (xfs_is_inodegc_enabled(mp)) {
|
||||
trace_xfs_inodegc_queue(mp, __return_address);
|
||||
queue_work(mp->m_inodegc_wq, &gc->work);
|
||||
mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
|
||||
}
|
||||
put_cpu_ptr(gc);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2173,7 +2191,7 @@ xfs_inodegc_shrinker_scan(
|
||||
unsigned int h = READ_ONCE(gc->shrinker_hits);
|
||||
|
||||
WRITE_ONCE(gc->shrinker_hits, h + 1);
|
||||
queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
|
||||
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
|
||||
no_items = false;
|
||||
}
|
||||
}
|
||||
|
@ -76,6 +76,7 @@ void xfs_blockgc_stop(struct xfs_mount *mp);
|
||||
void xfs_blockgc_start(struct xfs_mount *mp);
|
||||
|
||||
void xfs_inodegc_worker(struct work_struct *work);
|
||||
void xfs_inodegc_push(struct xfs_mount *mp);
|
||||
void xfs_inodegc_flush(struct xfs_mount *mp);
|
||||
void xfs_inodegc_stop(struct xfs_mount *mp);
|
||||
void xfs_inodegc_start(struct xfs_mount *mp);
|
||||
|
@ -131,6 +131,26 @@ xfs_ilock_attr_map_shared(
|
||||
return lock_mode;
|
||||
}
|
||||
|
||||
/*
|
||||
* You can't set both SHARED and EXCL for the same lock,
|
||||
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
|
||||
* XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
|
||||
* to set in lock_flags.
|
||||
*/
|
||||
static inline void
|
||||
xfs_lock_flags_assert(
|
||||
uint lock_flags)
|
||||
{
|
||||
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
||||
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
|
||||
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
||||
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
||||
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
|
||||
ASSERT(lock_flags != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* In addition to i_rwsem in the VFS inode, the xfs inode contains 2
|
||||
* multi-reader locks: invalidate_lock and the i_lock. This routine allows
|
||||
@ -168,18 +188,7 @@ xfs_ilock(
|
||||
{
|
||||
trace_xfs_ilock(ip, lock_flags, _RET_IP_);
|
||||
|
||||
/*
|
||||
* You can't set both SHARED and EXCL for the same lock,
|
||||
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
|
||||
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
|
||||
*/
|
||||
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
||||
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
|
||||
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
||||
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
||||
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
|
||||
xfs_lock_flags_assert(lock_flags);
|
||||
|
||||
if (lock_flags & XFS_IOLOCK_EXCL) {
|
||||
down_write_nested(&VFS_I(ip)->i_rwsem,
|
||||
@ -222,18 +231,7 @@ xfs_ilock_nowait(
|
||||
{
|
||||
trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
|
||||
|
||||
/*
|
||||
* You can't set both SHARED and EXCL for the same lock,
|
||||
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
|
||||
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
|
||||
*/
|
||||
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
||||
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
|
||||
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
||||
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
||||
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
|
||||
xfs_lock_flags_assert(lock_flags);
|
||||
|
||||
if (lock_flags & XFS_IOLOCK_EXCL) {
|
||||
if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
|
||||
@ -291,19 +289,7 @@ xfs_iunlock(
|
||||
xfs_inode_t *ip,
|
||||
uint lock_flags)
|
||||
{
|
||||
/*
|
||||
* You can't set both SHARED and EXCL for the same lock,
|
||||
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
|
||||
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
|
||||
*/
|
||||
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
|
||||
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
|
||||
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
|
||||
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
|
||||
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
||||
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
|
||||
ASSERT(lock_flags != 0);
|
||||
xfs_lock_flags_assert(lock_flags);
|
||||
|
||||
if (lock_flags & XFS_IOLOCK_EXCL)
|
||||
up_write(&VFS_I(ip)->i_rwsem);
|
||||
@ -379,8 +365,8 @@ xfs_isilocked(
|
||||
}
|
||||
|
||||
if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
|
||||
return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
|
||||
(lock_flags & XFS_IOLOCK_SHARED));
|
||||
return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
|
||||
(lock_flags & XFS_MMAPLOCK_SHARED));
|
||||
}
|
||||
|
||||
if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
|
||||
|
@ -61,7 +61,7 @@ struct xfs_error_cfg {
|
||||
*/
|
||||
struct xfs_inodegc {
|
||||
struct llist_head list;
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
|
||||
/* approximate count of inodes in the list */
|
||||
unsigned int items;
|
||||
|
@ -454,9 +454,12 @@ xfs_qm_scall_getquota(
|
||||
struct xfs_dquot *dqp;
|
||||
int error;
|
||||
|
||||
/* Flush inodegc work at the start of a quota reporting scan. */
|
||||
/*
|
||||
* Expedite pending inodegc work at the start of a quota reporting
|
||||
* scan but don't block waiting for it to complete.
|
||||
*/
|
||||
if (id == 0)
|
||||
xfs_inodegc_flush(mp);
|
||||
xfs_inodegc_push(mp);
|
||||
|
||||
/*
|
||||
* Try to get the dquot. We don't want it allocated on disk, so don't
|
||||
@ -498,7 +501,7 @@ xfs_qm_scall_getquota_next(
|
||||
|
||||
/* Flush inodegc work at the start of a quota reporting scan. */
|
||||
if (*id == 0)
|
||||
xfs_inodegc_flush(mp);
|
||||
xfs_inodegc_push(mp);
|
||||
|
||||
error = xfs_qm_dqget_next(mp, *id, type, &dqp);
|
||||
if (error)
|
||||
|
@ -797,8 +797,11 @@ xfs_fs_statfs(
|
||||
xfs_extlen_t lsize;
|
||||
int64_t ffree;
|
||||
|
||||
/* Wait for whatever inactivations are in progress. */
|
||||
xfs_inodegc_flush(mp);
|
||||
/*
|
||||
* Expedite background inodegc but don't wait. We do not want to block
|
||||
* here waiting hours for a billion extent file to be truncated.
|
||||
*/
|
||||
xfs_inodegc_push(mp);
|
||||
|
||||
statp->f_type = XFS_SUPER_MAGIC;
|
||||
statp->f_namelen = MAXNAMELEN - 1;
|
||||
@ -1074,7 +1077,7 @@ xfs_inodegc_init_percpu(
|
||||
gc = per_cpu_ptr(mp->m_inodegc, cpu);
|
||||
init_llist_head(&gc->list);
|
||||
gc->items = 0;
|
||||
INIT_WORK(&gc->work, xfs_inodegc_worker);
|
||||
INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -240,6 +240,7 @@ DEFINE_EVENT(xfs_fs_class, name, \
|
||||
TP_PROTO(struct xfs_mount *mp, void *caller_ip), \
|
||||
TP_ARGS(mp, caller_ip))
|
||||
DEFINE_FS_EVENT(xfs_inodegc_flush);
|
||||
DEFINE_FS_EVENT(xfs_inodegc_push);
|
||||
DEFINE_FS_EVENT(xfs_inodegc_start);
|
||||
DEFINE_FS_EVENT(xfs_inodegc_stop);
|
||||
DEFINE_FS_EVENT(xfs_inodegc_queue);
|
||||
|
Loading…
x
Reference in New Issue
Block a user