bcachefs: Sort & deduplicate updates in bch2_trans_update()

Previously, when doing multiple update in the same transaction commit
that overwrote each other, we relied on doing the updates in the same
order as the bch2_trans_update() calls in order to get the correct
result. But that wasn't correct for triggers; bch2_trans_mark_update()
when marking overwrites would do the wrong thing because it hadn't seen
the update that was being overwritten.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-12-31 19:37:10 -05:00 committed by Kent Overstreet
parent 2d594dfb53
commit 24326cd12a
6 changed files with 159 additions and 164 deletions

View File

@ -1793,10 +1793,9 @@ int bch2_trans_iter_free(struct btree_trans *trans,
static int bch2_trans_realloc_iters(struct btree_trans *trans,
unsigned new_size)
{
void *new_iters, *new_updates, *new_sorted;
void *new_iters, *new_updates;
size_t iters_bytes;
size_t updates_bytes;
size_t sorted_bytes;
new_size = roundup_pow_of_two(new_size);
@ -1811,11 +1810,8 @@ static int bch2_trans_realloc_iters(struct btree_trans *trans,
iters_bytes = sizeof(struct btree_iter) * new_size;
updates_bytes = sizeof(struct btree_insert_entry) * new_size;
sorted_bytes = sizeof(u8) * new_size;
new_iters = kmalloc(iters_bytes +
updates_bytes +
sorted_bytes, GFP_NOFS);
new_iters = kmalloc(iters_bytes + updates_bytes, GFP_NOFS);
if (new_iters)
goto success;
@ -1825,7 +1821,6 @@ static int bch2_trans_realloc_iters(struct btree_trans *trans,
trans->used_mempool = true;
success:
new_updates = new_iters + iters_bytes;
new_sorted = new_updates + updates_bytes;
memcpy(new_iters, trans->iters,
sizeof(struct btree_iter) * trans->nr_iters);
@ -1842,7 +1837,6 @@ success:
trans->iters = new_iters;
trans->updates = new_updates;
trans->updates_sorted = new_sorted;
trans->size = new_size;
if (trans->iters_live) {
@ -1891,6 +1885,7 @@ static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
got_slot:
BUG_ON(trans->iters_linked & (1ULL << idx));
trans->iters_linked |= 1ULL << idx;
trans->iters[idx].flags = 0;
return &trans->iters[idx];
}
@ -1906,6 +1901,9 @@ static inline void btree_iter_copy(struct btree_iter *dst,
if (btree_node_locked(dst, i))
six_lock_increment(&dst->l[i].b->c.lock,
__btree_lock_want(dst, i));
dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
}
static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
@ -1956,7 +1954,6 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
iter = best;
}
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
@ -1968,6 +1965,7 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
BUG_ON(iter->btree_id != btree_id);
BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
BUG_ON(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT);
BUG_ON(trans->iters_live & (1ULL << iter->idx));
trans->iters_live |= 1ULL << iter->idx;
@ -2030,7 +2028,6 @@ struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans,
* it's cheap to copy it again:
*/
trans->iters_touched &= ~(1ULL << iter->idx);
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
return iter;
}
@ -2090,7 +2087,8 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
BTREE_ITER_SET_POS_AFTER_COMMIT);
bch2_trans_unlink_iters(trans);
@ -2099,6 +2097,7 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
trans->iters_touched &= trans->iters_live;
trans->need_reset = 0;
trans->nr_updates = 0;
if (flags & TRANS_RESET_MEM)
@ -2127,7 +2126,6 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
trans->updates_sorted = trans->updates_sorted_onstack;
trans->fs_usage_deltas = NULL;
if (expected_nr_iters > trans->size)

View File

@ -197,6 +197,7 @@ enum btree_iter_type {
*/
#define BTREE_ITER_IS_EXTENTS (1 << 6)
#define BTREE_ITER_ERROR (1 << 7)
#define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
@ -213,12 +214,13 @@ enum btree_iter_uptodate {
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
u8 idx;
struct btree_trans *trans;
struct bpos pos;
struct bpos pos_after_commit;
u16 flags;
u8 idx;
u8 flags;
enum btree_iter_uptodate uptodate:4;
enum btree_id btree_id:4;
unsigned level:4,
@ -246,6 +248,7 @@ static inline enum btree_iter_type btree_iter_type(struct btree_iter *iter)
struct btree_insert_entry {
unsigned trigger_flags;
unsigned trans_triggers_run:1;
struct bkey_i *k;
struct btree_iter *iter;
};
@ -266,6 +269,7 @@ struct btree_trans {
unsigned used_mempool:1;
unsigned error:1;
unsigned nounlock:1;
unsigned need_reset:1;
unsigned mem_top;
unsigned mem_bytes;
@ -273,7 +277,6 @@ struct btree_trans {
struct btree_iter *iters;
struct btree_insert_entry *updates;
u8 *updates_sorted;
/* update path: */
struct journal_res journal_res;
@ -287,7 +290,6 @@ struct btree_trans {
struct btree_iter iters_onstack[2];
struct btree_insert_entry updates_onstack[2];
u8 updates_sorted_onstack[2];
};
#define BTREE_FLAG(flag) \

View File

@ -72,6 +72,8 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *);
int bch2_trans_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, enum btree_trigger_flags);
int __bch2_trans_commit(struct btree_trans *);
/**
@ -96,19 +98,6 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
return __bch2_trans_commit(trans);
}
static inline void bch2_trans_update(struct btree_trans *trans,
struct btree_iter *iter, struct bkey_i *k,
enum btree_trigger_flags flags)
{
EBUG_ON(trans->nr_updates >= trans->nr_iters);
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
trans->updates[trans->nr_updates++] = (struct btree_insert_entry) {
.trigger_flags = flags, .iter = iter, .k = k
};
}
#define __bch2_trans_do(_trans, _disk_res, _journal_seq, \
_flags, _reset_flags, _do) \
({ \

View File

@ -21,18 +21,12 @@
#include <linux/sort.h>
static inline bool same_leaf_as_prev(struct btree_trans *trans,
unsigned idx)
struct btree_insert_entry *i)
{
return idx &&
trans->updates[trans->updates_sorted[idx]].iter->l[0].b ==
trans->updates[trans->updates_sorted[idx - 1]].iter->l[0].b;
return i != trans->updates &&
i[0].iter->l[0].b == i[-1].iter->l[0].b;
}
#define trans_for_each_update_sorted(_trans, _i, _iter) \
for (_iter = 0; \
_iter < _trans->nr_updates && \
(_i = _trans->updates + _trans->updates_sorted[_iter], 1); \
_iter++)
inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
@ -51,28 +45,6 @@ inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
bch2_btree_init_next(c, b, iter);
}
static inline void btree_trans_sort_updates(struct btree_trans *trans)
{
struct btree_insert_entry *l, *r;
unsigned nr = 0, pos;
trans_for_each_update(trans, l) {
for (pos = 0; pos < nr; pos++) {
r = trans->updates + trans->updates_sorted[pos];
if (btree_iter_cmp(l->iter, r->iter) <= 0)
break;
}
memmove(&trans->updates_sorted[pos + 1],
&trans->updates_sorted[pos],
(nr - pos) * sizeof(trans->updates_sorted[0]));
trans->updates_sorted[pos] = l - trans->updates;
nr++;
}
}
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
@ -409,7 +381,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
unsigned iter, u64s = 0;
unsigned u64s = 0;
bool marking = false;
int ret;
@ -426,9 +398,9 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
prefetch(&trans->c->journal.flags);
trans_for_each_update_sorted(trans, i, iter) {
trans_for_each_update(trans, i) {
/* Multiple inserts might go to same leaf: */
if (!same_leaf_as_prev(trans, iter))
if (!same_leaf_as_prev(trans, i))
u64s = 0;
u64s += i->k->k.u64s;
@ -510,7 +482,6 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
{
struct btree_insert_entry *i;
struct btree_iter *iter;
unsigned idx;
int ret;
trans_for_each_update(trans, i)
@ -545,21 +516,15 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
btree_insert_entry_checks(trans, i->iter, i->k);
bch2_btree_trans_verify_locks(trans);
/*
* No more updates can be added - sort updates so we can take write
* locks in the correct order:
*/
btree_trans_sort_updates(trans);
trans_for_each_update_sorted(trans, i, idx)
if (!same_leaf_as_prev(trans, idx))
trans_for_each_update(trans, i)
if (!same_leaf_as_prev(trans, i))
bch2_btree_node_lock_for_insert(trans->c,
i->iter->l[0].b, i->iter);
ret = bch2_trans_commit_write_locked(trans, stopped_at);
trans_for_each_update_sorted(trans, i, idx)
if (!same_leaf_as_prev(trans, idx))
trans_for_each_update(trans, i)
if (!same_leaf_as_prev(trans, i))
bch2_btree_node_unlock_write_inlined(i->iter->l[0].b,
i->iter);
@ -575,8 +540,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
if (trans->flags & BTREE_INSERT_NOUNLOCK)
trans->nounlock = true;
trans_for_each_update_sorted(trans, i, idx)
if (!same_leaf_as_prev(trans, idx))
trans_for_each_update(trans, i)
if (!same_leaf_as_prev(trans, i))
bch2_foreground_maybe_merge(trans->c, i->iter,
0, trans->flags);
@ -708,9 +673,13 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans)
int __bch2_trans_commit(struct btree_trans *trans)
{
struct btree_insert_entry *i = NULL;
struct btree_iter *iter;
bool trans_trigger_run;
unsigned u64s;
int ret = 0;
BUG_ON(trans->need_reset);
if (!trans->nr_updates)
goto out_noupdates;
@ -730,9 +699,29 @@ int __bch2_trans_commit(struct btree_trans *trans)
}
/*
* note: running triggers will append more updates to the list of
* updates as we're walking it:
* Running triggers will append more updates to the list of updates as
* we're walking it:
*/
do {
trans_trigger_run = false;
trans_for_each_update(trans, i) {
if (iter_has_trans_triggers(i->iter) &&
!i->trans_triggers_run) {
i->trans_triggers_run = true;
trans_trigger_run = true;
ret = bch2_trans_mark_update(trans, i->iter, i->k,
i->trigger_flags);
if (unlikely(ret)) {
if (ret == -EINTR)
trace_trans_restart_mark(trans->ip);
goto out;
}
}
}
} while (trans_trigger_run);
trans_for_each_update(trans, i) {
/* we know trans->nounlock won't be set here: */
if (unlikely(!(i->iter->locks_want < 1
@ -743,16 +732,6 @@ int __bch2_trans_commit(struct btree_trans *trans)
goto out;
}
if (iter_has_trans_triggers(i->iter)) {
ret = bch2_trans_mark_update(trans, i->iter, i->k,
i->trigger_flags);
if (unlikely(ret)) {
if (ret == -EINTR)
trace_trans_restart_mark(trans->ip);
goto out;
}
}
u64s = jset_u64s(i->k->k.u64s);
if (0)
trans->journal_preres_u64s += u64s;
@ -768,6 +747,15 @@ retry:
if (ret)
goto err;
trans_for_each_iter(trans, iter)
if ((trans->iters_live & (1ULL << iter->idx)) &&
(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT)) {
if (trans->flags & BTREE_INSERT_NOUNLOCK)
bch2_btree_iter_set_pos_same_leaf(iter, iter->pos_after_commit);
else
bch2_btree_iter_set_pos(iter, iter->pos_after_commit);
}
out:
bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
@ -785,6 +773,76 @@ err:
goto retry;
}
int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_i *k, enum btree_trigger_flags flags)
{
struct btree_insert_entry *i, n = (struct btree_insert_entry) {
.trigger_flags = flags, .iter = iter, .k = k
};
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&k->k)));
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
if (iter->flags & BTREE_ITER_IS_EXTENTS) {
iter->pos_after_commit = k->k.p;
iter->flags |= BTREE_ITER_SET_POS_AFTER_COMMIT;
}
/*
* Pending updates are kept sorted: first, find position of new update:
*/
trans_for_each_update(trans, i)
if (btree_iter_cmp(iter, i->iter) <= 0)
break;
/*
* Now delete/trim any updates the new update overwrites:
*/
if (i > trans->updates &&
i[-1].iter->btree_id == iter->btree_id &&
bkey_cmp(iter->pos, i[-1].k->k.p) < 0)
bch2_cut_back(n.iter->pos, i[-1].k);
while (i < trans->updates + trans->nr_updates &&
iter->btree_id == i->iter->btree_id &&
bkey_cmp(n.k->k.p, i->k->k.p) >= 0)
array_remove_item(trans->updates, trans->nr_updates,
i - trans->updates);
if (i < trans->updates + trans->nr_updates &&
iter->btree_id == i->iter->btree_id &&
bkey_cmp(n.k->k.p, i->iter->pos) > 0) {
/*
* When we have an extent that overwrites the start of another
* update, trimming that extent will mean the iterator's
* position has to change since the iterator position has to
* match the extent's start pos - but we don't want to change
* the iterator pos if some other code is using it, so we may
* need to clone it:
*/
if (trans->iters_live & (1ULL << i->iter->idx)) {
i->iter = bch2_trans_copy_iter(trans, i->iter);
if (IS_ERR(i->iter)) {
trans->need_reset = true;
return PTR_ERR(i->iter);
}
i->iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
bch2_trans_iter_put(trans, i->iter);
}
bch2_cut_front(n.k->k.p, i->k);
bch2_btree_iter_set_pos(i->iter, n.k->k.p);
}
EBUG_ON(trans->nr_updates >= trans->nr_iters);
array_insert_item(trans->updates, trans->nr_updates,
i - trans->updates, n);
return 0;
}
static int __bch2_btree_insert(struct btree_trans *trans,
enum btree_id id, struct bkey_i *k)
{

View File

@ -1433,30 +1433,6 @@ static int trans_get_key(struct btree_trans *trans,
return ret;
}
static void *trans_update_key(struct btree_trans *trans,
struct btree_iter *iter,
unsigned u64s)
{
struct btree_insert_entry *i;
struct bkey_i *new_k;
new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
if (IS_ERR(new_k))
return new_k;
bkey_init(&new_k->k);
new_k->k.p = iter->pos;
trans_for_each_update(trans, i)
if (i->iter == iter) {
i->k = new_k;
return new_k;
}
bch2_trans_update(trans, iter, new_k, 0);
return new_k;
}
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
@ -1540,7 +1516,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
u.data_type = u.dirty_sectors || u.cached_sectors
? data_type : 0;
a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
@ -1548,6 +1524,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
bkey_alloc_init(&a->k_i);
a->k.p = iter->pos;
bch2_alloc_pack(a, u);
bch2_trans_update(trans, iter, &a->k_i, 0);
out:
bch2_trans_iter_put(trans, iter);
return ret;
@ -1562,9 +1539,8 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
struct bkey_i *new_k;
struct bkey_s_c k;
struct bkey_s_stripe s;
struct bkey_i_stripe *s;
int ret = 0;
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
@ -1579,21 +1555,21 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
goto out;
}
new_k = trans_update_key(trans, iter, k.k->u64s);
ret = PTR_ERR_OR_ZERO(new_k);
s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(s);
if (ret)
goto out;
bkey_reassemble(new_k, k);
s = bkey_i_to_s_stripe(new_k);
bkey_reassemble(&s->k_i, k);
stripe_blockcount_set(s.v, p.block,
stripe_blockcount_get(s.v, p.block) +
stripe_blockcount_set(&s->v, p.block,
stripe_blockcount_get(&s->v, p.block) +
sectors);
*nr_data = s.v->nr_blocks - s.v->nr_redundant;
*nr_parity = s.v->nr_redundant;
bch2_bkey_to_replicas(&r->e, s.s_c);
*nr_data = s->v.nr_blocks - s->v.nr_redundant;
*nr_parity = s->v.nr_redundant;
bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
bch2_trans_update(trans, iter, &s->k_i, 0);
out:
bch2_trans_iter_put(trans, iter);
return ret;
@ -1674,7 +1650,6 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
struct bkey_i *new_k;
struct bkey_s_c k;
struct bkey_i_reflink_v *r_v;
s64 ret;
@ -1700,13 +1675,12 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
new_k = trans_update_key(trans, iter, k.k->u64s);
ret = PTR_ERR_OR_ZERO(new_k);
r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(r_v);
if (ret)
goto err;
bkey_reassemble(new_k, k);
r_v = bkey_i_to_reflink_v(new_k);
bkey_reassemble(&r_v->k_i, k);
le64_add_cpu(&r_v->v.refcount,
!(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
@ -1715,6 +1689,8 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
r_v->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&r_v->k, 0);
}
bch2_trans_update(trans, iter, &r_v->k_i, 0);
out:
ret = k.k->p.offset - idx;
err:

View File

@ -2383,7 +2383,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
struct address_space *mapping = inode->v.i_mapping;
struct bkey_on_stack copy;
struct btree_trans trans;
struct btree_iter *src, *dst, *del = NULL;
struct btree_iter *src, *dst;
loff_t shift, new_size;
u64 src_start;
int ret;
@ -2513,29 +2513,6 @@ reassemble:
next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
/*
* If the new and old keys overlap (because we're moving an
* extent that's bigger than the amount we're collapsing by),
* we need to trim the delete key here so they don't overlap
* because overlaps on insertions aren't handled before
* triggers are run, so the overwrite will get double counted
* by the triggers machinery:
*/
if (insert &&
bkey_cmp(bkey_start_pos(&copy.k->k), delete.k.p) < 0) {
bch2_cut_back(bkey_start_pos(&copy.k->k), &delete);
} else if (!insert &&
bkey_cmp(copy.k->k.p,
bkey_start_pos(&delete.k)) > 0) {
bch2_cut_front(copy.k->k.p, &delete);
del = bch2_trans_copy_iter(&trans, src);
BUG_ON(IS_ERR_OR_NULL(del));
bch2_btree_iter_set_pos(del,
bkey_start_pos(&delete.k));
}
if (copy.k->k.size == k.k->size) {
/*
* If we're moving the entire extent, we can skip
@ -2553,18 +2530,13 @@ reassemble:
BUG_ON(ret);
}
bch2_trans_update(&trans, dst, copy.k, trigger_flags);
bch2_trans_update(&trans, del ?: src, &delete, trigger_flags);
ret = bch2_trans_commit(&trans, &disk_res,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL);
ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
bch2_trans_commit(&trans, &disk_res,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL);
bch2_disk_reservation_put(c, &disk_res);
bkey_err:
if (del)
bch2_trans_iter_put(&trans, del);
del = NULL;
if (!ret)
bch2_btree_iter_set_pos(src, next_pos);