mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 18:56:24 +00:00
bcachefs: Clean up btree write buffer write ref handling
__bch2_btree_write_buffer_flush() now assumes a write ref is already held (as called by the transaction commit path); and the wrappers bch2_write_buffer_flush() and flush_sync() take an explicit write ref. This means internally the write buffer code can always use BTREE_INSERT_NOCHECK_RW, instead of in the previous code passing flags around and hoping the NOCHECK_RW flag was always carried around correctly. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
cf5bacb6a5
commit
183bcc89b8
@ -672,7 +672,8 @@ struct btree_trans_buf {
|
||||
x(invalidate) \
|
||||
x(delete_dead_snapshots) \
|
||||
x(snapshot_delete_pagecache) \
|
||||
x(sysfs)
|
||||
x(sysfs) \
|
||||
x(btree_write_buffer)
|
||||
|
||||
enum bch_write_ref {
|
||||
#define x(n) BCH_WRITE_REF_##n,
|
||||
|
@ -961,8 +961,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
|
||||
|
||||
if (wb->state.nr > wb->size * 3 / 4) {
|
||||
bch2_trans_begin(trans);
|
||||
ret = __bch2_btree_write_buffer_flush(trans,
|
||||
flags|BCH_TRANS_COMMIT_no_check_rw, true);
|
||||
ret = __bch2_btree_write_buffer_flush(trans, true);
|
||||
if (!ret) {
|
||||
trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
|
||||
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
|
||||
@ -1077,8 +1076,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
bch2_trans_begin(trans);
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
ret = __bch2_btree_write_buffer_flush(trans,
|
||||
flags|BCH_TRANS_COMMIT_no_check_rw, true);
|
||||
ret = __bch2_btree_write_buffer_flush(trans, true);
|
||||
if (!ret) {
|
||||
trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
|
||||
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
|
||||
|
@ -137,8 +137,7 @@ btree_write_buffered_insert(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
|
||||
bool locked)
|
||||
int __bch2_btree_write_buffer_flush(struct btree_trans *trans, bool locked)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct journal *j = &c->journal;
|
||||
@ -210,8 +209,8 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f
|
||||
iter.path->preserve = false;
|
||||
|
||||
do {
|
||||
ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
|
||||
commit_flags, &write_locked, &fast);
|
||||
ret = bch2_btree_write_buffer_flush_one(trans, &iter, i, 0,
|
||||
&write_locked, &fast);
|
||||
if (!write_locked)
|
||||
bch2_trans_begin(trans);
|
||||
} while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
|
||||
@ -252,9 +251,6 @@ slowpath:
|
||||
btree_write_buffered_journal_cmp,
|
||||
NULL);
|
||||
|
||||
commit_flags &= ~BCH_WATERMARK_MASK;
|
||||
commit_flags |= BCH_WATERMARK_reclaim;
|
||||
|
||||
for (i = keys; i < keys + nr; i++) {
|
||||
if (!i->journal_seq)
|
||||
continue;
|
||||
@ -263,7 +259,8 @@ slowpath:
|
||||
bch2_btree_write_buffer_journal_flush);
|
||||
|
||||
ret = commit_do(trans, NULL, NULL,
|
||||
commit_flags|
|
||||
BCH_WATERMARK_reclaim|
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
BCH_TRANS_COMMIT_no_enospc|
|
||||
BCH_TRANS_COMMIT_no_journal_res|
|
||||
BCH_TRANS_COMMIT_journal_reclaim,
|
||||
@ -279,16 +276,33 @@ int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
|
||||
return -BCH_ERR_erofs_no_writes;
|
||||
|
||||
trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
mutex_lock(&c->btree_write_buffer.flush_lock);
|
||||
return __bch2_btree_write_buffer_flush(trans, 0, true);
|
||||
mutex_lock(&trans->c->btree_write_buffer.flush_lock);
|
||||
int ret = __bch2_btree_write_buffer_flush(trans, true);
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
|
||||
{
|
||||
return __bch2_btree_write_buffer_flush(trans, false);
|
||||
}
|
||||
|
||||
int bch2_btree_write_buffer_flush(struct btree_trans *trans)
|
||||
{
|
||||
return __bch2_btree_write_buffer_flush(trans, 0, false);
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
|
||||
return -BCH_ERR_erofs_no_writes;
|
||||
|
||||
int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_btree_write_buffer_journal_flush(struct journal *j,
|
||||
@ -300,7 +314,7 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j,
|
||||
mutex_lock(&wb->flush_lock);
|
||||
|
||||
return bch2_trans_run(c,
|
||||
__bch2_btree_write_buffer_flush(trans, BCH_TRANS_COMMIT_no_check_rw, true));
|
||||
__bch2_btree_write_buffer_flush(trans, true));
|
||||
}
|
||||
|
||||
static inline u64 btree_write_buffer_ref(int idx)
|
||||
|
@ -2,7 +2,8 @@
|
||||
#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H
|
||||
#define _BCACHEFS_BTREE_WRITE_BUFFER_H
|
||||
|
||||
int __bch2_btree_write_buffer_flush(struct btree_trans *, unsigned, bool);
|
||||
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
|
||||
int __bch2_btree_write_buffer_flush(struct btree_trans *, bool);
|
||||
int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
|
||||
int bch2_btree_write_buffer_flush(struct btree_trans *);
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
|
||||
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_btree_write_buffer_flush(trans);
|
||||
ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -1162,10 +1162,6 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
|
||||
again:
|
||||
need_another_pass = false;
|
||||
|
||||
ret = bch2_btree_write_buffer_flush_sync(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Weird transaction restart handling here because on successful delete,
|
||||
* bch2_inode_rm_snapshot() will return a nested transaction restart,
|
||||
@ -1196,8 +1192,12 @@ again:
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (!ret && need_another_pass)
|
||||
if (!ret && need_another_pass) {
|
||||
ret = bch2_btree_write_buffer_flush_sync(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto again;
|
||||
}
|
||||
err:
|
||||
bch2_trans_put(trans);
|
||||
|
||||
|
@ -154,6 +154,9 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
|
||||
move_buckets_wait(ctxt, buckets_in_flight, false);
|
||||
|
||||
ret = bch2_btree_write_buffer_flush(trans);
|
||||
if (bch2_err_matches(ret, EROFS))
|
||||
return ret;
|
||||
|
||||
if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
|
||||
__func__, bch2_err_str(ret)))
|
||||
return ret;
|
||||
|
Loading…
x
Reference in New Issue
Block a user