bcachefs: Regularize argument passing of btree_trans

btree_trans should always be passed when we have one - iter->trans is
disfavoured. This mainly updates old code in btree_update_interior.c,
some of which predates btree_trans.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2021-07-10 23:22:06 -04:00 committed by Kent Overstreet
parent d38494c462
commit e3a67bdb6e
9 changed files with 73 additions and 57 deletions

View File

@ -800,13 +800,13 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
if (!initial) {
if (max_stale > 64)
bch2_btree_node_rewrite(c, iter,
bch2_btree_node_rewrite(&trans, iter,
b->data->keys.seq,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
else if (!bch2_btree_gc_rewrite_disabled &&
(bch2_btree_gc_always_rewrite || max_stale > 16))
bch2_btree_node_rewrite(c, iter,
bch2_btree_node_rewrite(&trans, iter,
b->data->keys.seq,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);

View File

@ -420,9 +420,11 @@ void bch2_btree_build_aux_trees(struct btree *b)
*
* Returns true if we sorted (i.e. invalidated iterators
*/
void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
void bch2_btree_init_next(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b)
{
struct bch_fs *c = trans->c;
struct btree_node_entry *bne;
bool reinit_iter = false;
@ -1563,7 +1565,7 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
goto err;
ret = bch2_btree_node_update_key(c, iter, b, k.k);
ret = bch2_btree_node_update_key(&trans, iter, b, k.k);
if (ret == -EINTR)
goto retry;
if (ret)

View File

@ -138,8 +138,8 @@ void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
void bch2_btree_node_drop_keys_outside_node(struct btree *);
void bch2_btree_build_aux_trees(struct btree *);
void bch2_btree_init_next(struct bch_fs *, struct btree *,
struct btree_iter *);
void bch2_btree_init_next(struct btree_trans *, struct btree_iter *,
struct btree *);
int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *,
struct btree *, bool);

View File

@ -8,8 +8,8 @@
struct bch_fs;
struct btree;
void bch2_btree_node_lock_for_insert(struct bch_fs *, struct btree *,
struct btree_iter *);
void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_iter *,
struct btree *);
bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_i *);
void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
@ -70,10 +70,10 @@ int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
struct bpos, struct bpos, u64 *);
int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
__le64, unsigned);
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
struct btree *, struct bkey_i *);
int bch2_trans_update(struct btree_trans *, struct btree_iter *,

View File

@ -22,6 +22,10 @@
#include <linux/random.h>
static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
struct btree_iter *, struct btree *,
struct keylist *, unsigned);
/* Debug code: */
/*
@ -1355,8 +1359,9 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
btree_node_interior_verify(as->c, b);
}
static void btree_split(struct btree_update *as, struct btree *b,
struct btree_iter *iter, struct keylist *keys,
static void btree_split(struct btree_update *as,
struct btree_trans *trans, struct btree_iter *iter,
struct btree *b, struct keylist *keys,
unsigned flags)
{
struct bch_fs *c = as->c;
@ -1422,7 +1427,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
if (parent) {
/* Split a non root node */
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags);
bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
} else if (n3) {
bch2_btree_set_root(as, n3, iter);
} else {
@ -1460,7 +1465,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
six_unlock_intent(&n2->c.lock);
six_unlock_intent(&n1->c.lock);
bch2_btree_trans_verify_locks(iter->trans);
bch2_btree_trans_verify_locks(trans);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_split],
start_time);
@ -1494,9 +1499,10 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
* If a split occurred, this function will return early. This can only happen
* for leaf nodes -- inserts into interior nodes have to be atomic.
*/
void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
struct btree_iter *iter, struct keylist *keys,
unsigned flags)
static void bch2_btree_insert_node(struct btree_update *as,
struct btree_trans *trans, struct btree_iter *iter,
struct btree *b, struct keylist *keys,
unsigned flags)
{
struct bch_fs *c = as->c;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
@ -1509,7 +1515,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
bch2_btree_node_lock_for_insert(c, b, iter);
bch2_btree_node_lock_for_insert(trans, iter, b);
if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
bch2_btree_node_unlock_write(b, iter);
@ -1537,12 +1543,14 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
btree_node_interior_verify(c, b);
return;
split:
btree_split(as, b, iter, keys, flags);
btree_split(as, trans, iter, b, keys, flags);
}
int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
int bch2_btree_split_leaf(struct btree_trans *trans,
struct btree_iter *iter,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree *b = iter_l(iter)->b;
struct btree_update *as;
unsigned l;
@ -1553,22 +1561,22 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
if (IS_ERR(as))
return PTR_ERR(as);
btree_split(as, b, iter, NULL, flags);
btree_split(as, trans, iter, b, NULL, flags);
bch2_btree_update_done(as);
for (l = iter->level + 1; btree_iter_node(iter, l) && !ret; l++)
ret = bch2_foreground_maybe_merge(c, iter, l, flags);
ret = bch2_foreground_maybe_merge(trans, iter, l, flags);
return ret;
}
int __bch2_foreground_maybe_merge(struct bch_fs *c,
int __bch2_foreground_maybe_merge(struct btree_trans *trans,
struct btree_iter *iter,
unsigned level,
unsigned flags,
enum btree_node_sibling sib)
{
struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct btree_iter *sib_iter = NULL;
struct btree_update *as;
struct bkey_format_state new_s;
@ -1697,7 +1705,7 @@ int __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_btree_node_write(c, n, SIX_LOCK_intent);
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags);
bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
bch2_btree_update_get_open_buckets(as, n);
@ -1750,9 +1758,11 @@ int __bch2_foreground_maybe_merge(struct bch_fs *c,
/**
* bch_btree_node_rewrite - Rewrite/move a btree node
*/
int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
int bch2_btree_node_rewrite(struct btree_trans *trans,
struct btree_iter *iter,
__le64 seq, unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree *b, *n, *parent;
struct btree_update *as;
int ret;
@ -1795,7 +1805,8 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags);
bch2_btree_insert_node(as, trans, iter, parent,
&as->parent_keys, flags);
} else {
bch2_btree_set_root(as, n, iter);
}
@ -1834,7 +1845,7 @@ void async_btree_node_rewrite_work(struct work_struct *work)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_node_iter(&trans, a->btree_id, a->pos,
BTREE_MAX_DEPTH, a->level, 0);
bch2_btree_node_rewrite(c, iter, a->seq, 0);
bch2_btree_node_rewrite(&trans, iter, a->seq, 0);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
percpu_ref_put(&c->writes);
@ -1867,12 +1878,13 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
queue_work(c->btree_interior_update_worker, &a->work);
}
static void __bch2_btree_node_update_key(struct bch_fs *c,
struct btree_update *as,
static void __bch2_btree_node_update_key(struct btree_update *as,
struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b, struct btree *new_hash,
struct bkey_i *new_key)
{
struct bch_fs *c = as->c;
struct btree *parent;
int ret;
@ -1889,7 +1901,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
}
bch2_keylist_add(&as->parent_keys, new_key);
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, 0);
bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, 0);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
@ -1926,10 +1938,12 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_btree_update_done(as);
}
int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
int bch2_btree_node_update_key(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b,
struct bkey_i *new_key)
{
struct bch_fs *c = trans->c;
struct btree *parent = btree_node_parent(iter, b);
struct btree_update *as = NULL;
struct btree *new_hash = NULL;
@ -1962,7 +1976,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
goto err;
}
__bch2_btree_node_update_key(c, as, iter, b, new_hash, new_key);
__bch2_btree_node_update_key(as, trans, iter, b, new_hash, new_key);
bch2_btree_iter_downgrade(iter);
err:

View File

@ -131,15 +131,12 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *,
struct btree *);
void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
void bch2_btree_insert_node(struct btree_update *, struct btree *,
struct btree_iter *, struct keylist *,
unsigned);
int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned);
int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *,
unsigned, unsigned, enum btree_node_sibling);
static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
struct btree_iter *iter,
unsigned level, unsigned flags,
enum btree_node_sibling sib)
@ -153,20 +150,20 @@ static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
return 0;
b = iter->l[level].b;
if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
return 0;
return __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
return __bch2_foreground_maybe_merge(trans, iter, level, flags, sib);
}
static inline int bch2_foreground_maybe_merge(struct bch_fs *c,
struct btree_iter *iter,
unsigned level,
unsigned flags)
static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
struct btree_iter *iter,
unsigned level,
unsigned flags)
{
return bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
return bch2_foreground_maybe_merge_sibling(trans, iter, level, flags,
btree_prev_sib) ?:
bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
bch2_foreground_maybe_merge_sibling(trans, iter, level, flags,
btree_next_sib);
}

View File

@ -36,9 +36,12 @@ static inline bool same_leaf_as_prev(struct btree_trans *trans,
iter_l(i[0].iter)->b == iter_l(i[-1].iter)->b;
}
inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b)
{
struct bch_fs *c = trans->c;
bch2_btree_node_lock_write(b, iter);
if (btree_iter_type(iter) == BTREE_ITER_CACHED)
@ -53,7 +56,7 @@ inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
* a new bset to insert into:
*/
if (want_new_bset(c, b))
bch2_btree_init_next(c, b, iter);
bch2_btree_init_next(trans, iter, b);
}
/* Inserting into a given leaf node (last stage of insert): */
@ -518,7 +521,7 @@ static noinline int maybe_do_btree_merge(struct btree_trans *trans, struct btree
}
return u64s_delta <= 0
? (bch2_foreground_maybe_merge(trans->c, iter, iter->level,
? (bch2_foreground_maybe_merge(trans, iter, iter->level,
trans->flags & ~BTREE_INSERT_NOUNLOCK) ?: -EINTR)
: 0;
}
@ -608,8 +611,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
trans_for_each_update(trans, i)
if (!same_leaf_as_prev(trans, i))
bch2_btree_node_lock_for_insert(c,
iter_l(i->iter)->b, i->iter);
bch2_btree_node_lock_for_insert(trans, i->iter,
iter_l(i->iter)->b);
ret = bch2_trans_commit_write_locked(trans, stopped_at, trace_ip);
@ -662,7 +665,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
switch (ret) {
case BTREE_INSERT_BTREE_NODE_FULL:
ret = bch2_btree_split_leaf(c, i->iter, flags);
ret = bch2_btree_split_leaf(trans, i->iter, flags);
/*
* if the split succeeded without dropping locks the insert will

View File

@ -139,7 +139,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
break;
}
ret = bch2_btree_node_update_key(c, iter, b, k.k);
ret = bch2_btree_node_update_key(&trans, iter, b, k.k);
if (ret == -EINTR) {
b = bch2_btree_iter_peek_node(iter);
ret = 0;

View File

@ -786,7 +786,7 @@ static int bch2_move_btree(struct bch_fs *c,
BUG();
}
ret = bch2_btree_node_rewrite(c, iter,
ret = bch2_btree_node_rewrite(&trans, iter,
b->data->keys.seq, 0) ?: ret;
next:
bch2_trans_cond_resched(&trans);