mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-10 07:50:04 +00:00
bcachefs: Kill bch2_verify_bucket_evacuated()
With backpointers, it's now impossible for bch2_evacuate_bucket() to be completely reliable: it can race with an extent being partially overwritten or split, which needs a new write buffer flush for the backpointer to be seen. This shouldn't be a real issue in practice; the previous patch added a new tracepoint so we'll be able to see more easily if it is. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
5a21764db1
commit
1af5227c1d
@ -654,85 +654,6 @@ int bch2_move_data(struct bch_fs *c,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
struct bch_backpointer bp;
|
||||
struct bpos bp_pos = POS_MIN;
|
||||
unsigned nr_bps = 0;
|
||||
int ret;
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
bucket, BTREE_ITER_CACHED);
|
||||
again:
|
||||
ret = lockrestart_do(trans,
|
||||
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||
|
||||
if (!ret && k.k->type == KEY_TYPE_alloc_v4) {
|
||||
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
|
||||
|
||||
if (a.v->gen == gen &&
|
||||
a.v->dirty_sectors) {
|
||||
if (a.v->data_type == BCH_DATA_btree) {
|
||||
bch2_trans_unlock(trans);
|
||||
if (bch2_btree_interior_updates_flush(c))
|
||||
goto again;
|
||||
goto failed_to_evacuate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
set_btree_iter_dontneed(&iter);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return;
|
||||
failed_to_evacuate:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
|
||||
return;
|
||||
|
||||
prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, bucket, gen,
|
||||
&bp_pos, &bp,
|
||||
BTREE_ITER_CACHED);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
if (bkey_eq(bp_pos, POS_MAX))
|
||||
break;
|
||||
|
||||
k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
|
||||
ret = bkey_err(k);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
if (!k.k)
|
||||
continue;
|
||||
prt_newline(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (++nr_bps > 10)
|
||||
break;
|
||||
bp_pos = bpos_nosnap_successor(bp_pos);
|
||||
}
|
||||
|
||||
bch2_print_string_as_lines(KERN_ERR, buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
int __bch2_evacuate_bucket(struct btree_trans *trans,
|
||||
struct moving_context *ctxt,
|
||||
struct move_bucket_in_flight *bucket_in_flight,
|
||||
|
@ -36,8 +36,6 @@ struct moving_context {
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
void bch2_verify_bucket_evacuated(struct btree_trans *, struct bpos, int);
|
||||
|
||||
#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
|
||||
do { \
|
||||
bool cond_finished = false; \
|
||||
|
@ -134,13 +134,6 @@ static void move_buckets_wait(struct btree_trans *trans,
|
||||
if (atomic_read(&i->count))
|
||||
break;
|
||||
|
||||
/*
|
||||
* moving_ctxt_exit calls bch2_write as it flushes pending
|
||||
* reads, which inits another btree_trans; this one must be
|
||||
* unlocked:
|
||||
*/
|
||||
bch2_verify_bucket_evacuated(trans, i->bucket.k.bucket, i->bucket.k.gen);
|
||||
|
||||
list->first = i->next;
|
||||
if (!list->first)
|
||||
list->last = NULL;
|
||||
|
Loading…
x
Reference in New Issue
Block a user