mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
bcachefs: Proper refcounting for journal_keys
The btree iterator code overlays keys from the journal until journal replay is finished; since we're now starting copygc/rebalance etc. before replay is finished, this is multithreaded access and thus needs refcounting. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
63807d9518
commit
8a443d3ea1
@ -638,6 +638,8 @@ struct journal_keys {
|
||||
size_t gap;
|
||||
size_t nr;
|
||||
size_t size;
|
||||
atomic_t ref;
|
||||
bool initial_ref_held;
|
||||
};
|
||||
|
||||
struct btree_trans_buf {
|
||||
|
@ -2981,7 +2981,8 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
|
||||
trans->fn_idx = fn_idx;
|
||||
trans->locking_wait.task = current;
|
||||
trans->journal_replay_not_finished =
|
||||
!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
|
||||
unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
|
||||
atomic_inc_not_zero(&c->journal_keys.ref);
|
||||
closure_init_stack(&trans->ref);
|
||||
|
||||
s = btree_trans_stats(trans);
|
||||
@ -3098,6 +3099,9 @@ void bch2_trans_put(struct btree_trans *trans)
|
||||
kfree(trans->fs_usage_deltas);
|
||||
}
|
||||
|
||||
if (unlikely(trans->journal_replay_not_finished))
|
||||
bch2_journal_keys_put(c);
|
||||
|
||||
if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
|
||||
mempool_free(trans->mem, &c->btree_trans_mem_pool);
|
||||
else
|
||||
|
@ -80,6 +80,8 @@ struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
unsigned iters = 0;
|
||||
struct journal_key *k;
|
||||
|
||||
BUG_ON(*idx > keys->nr);
|
||||
search:
|
||||
if (!*idx)
|
||||
*idx = __bch2_journal_key_search(keys, btree_id, level, pos);
|
||||
@ -189,10 +191,12 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||
/* Since @keys was full, there was no gap: */
|
||||
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
|
||||
kvfree(keys->d);
|
||||
*keys = new_keys;
|
||||
keys->d = new_keys.d;
|
||||
keys->nr = new_keys.nr;
|
||||
keys->size = new_keys.size;
|
||||
|
||||
/* And now the gap is at the end: */
|
||||
keys->gap = keys->nr;
|
||||
keys->gap = keys->nr;
|
||||
}
|
||||
|
||||
journal_iters_move_gap(c, keys->gap, idx);
|
||||
@ -415,10 +419,16 @@ static int journal_sort_key_cmp(const void *_l, const void *_r)
|
||||
cmp_int(l->journal_offset, r->journal_offset);
|
||||
}
|
||||
|
||||
void bch2_journal_keys_free(struct journal_keys *keys)
|
||||
void bch2_journal_keys_put(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
struct journal_key *i;
|
||||
|
||||
BUG_ON(atomic_read(&keys->ref) <= 0);
|
||||
|
||||
if (!atomic_dec_and_test(&keys->ref))
|
||||
return;
|
||||
|
||||
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
|
||||
keys->gap = keys->nr;
|
||||
|
||||
@ -429,6 +439,8 @@ void bch2_journal_keys_free(struct journal_keys *keys)
|
||||
kvfree(keys->d);
|
||||
keys->d = NULL;
|
||||
keys->nr = keys->gap = keys->size = 0;
|
||||
|
||||
bch2_journal_entries_free(c);
|
||||
}
|
||||
|
||||
static void __journal_keys_sort(struct journal_keys *keys)
|
||||
|
@ -49,7 +49,15 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *,
|
||||
struct bch_fs *,
|
||||
struct btree *);
|
||||
|
||||
void bch2_journal_keys_free(struct journal_keys *);
|
||||
void bch2_journal_keys_put(struct bch_fs *);
|
||||
|
||||
static inline void bch2_journal_keys_put_initial(struct bch_fs *c)
|
||||
{
|
||||
if (c->journal_keys.initial_ref_held)
|
||||
bch2_journal_keys_put(c);
|
||||
c->journal_keys.initial_ref_held = false;
|
||||
}
|
||||
|
||||
void bch2_journal_entries_free(struct bch_fs *);
|
||||
|
||||
int bch2_journal_keys_sort(struct bch_fs *);
|
||||
|
@ -167,6 +167,8 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
BUG_ON(!atomic_read(&keys->ref));
|
||||
|
||||
for (i = 0; i < keys->nr; i++) {
|
||||
k = keys_sorted[i];
|
||||
|
||||
@ -188,6 +190,9 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
}
|
||||
}
|
||||
|
||||
if (!c->opts.keep_journal)
|
||||
bch2_journal_keys_put_initial(c);
|
||||
|
||||
replay_now_at(j, j->replay_journal_seq_end);
|
||||
j->replay_journal_seq = 0;
|
||||
|
||||
@ -909,10 +914,8 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
bch2_flush_fsck_errs(c);
|
||||
|
||||
if (!c->opts.keep_journal &&
|
||||
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
|
||||
bch2_journal_keys_free(&c->journal_keys);
|
||||
bch2_journal_entries_free(c);
|
||||
}
|
||||
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
|
||||
bch2_journal_keys_put_initial(c);
|
||||
kfree(clean);
|
||||
|
||||
if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) {
|
||||
|
@ -508,8 +508,8 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
bch2_io_clock_exit(&c->io_clock[WRITE]);
|
||||
bch2_io_clock_exit(&c->io_clock[READ]);
|
||||
bch2_fs_compress_exit(c);
|
||||
bch2_journal_keys_free(&c->journal_keys);
|
||||
bch2_journal_entries_free(c);
|
||||
bch2_journal_keys_put_initial(c);
|
||||
BUG_ON(atomic_read(&c->journal_keys.ref));
|
||||
bch2_fs_btree_write_buffer_exit(c);
|
||||
percpu_free_rwsem(&c->mark_lock);
|
||||
free_percpu(c->online_reserved);
|
||||
@ -706,6 +706,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
|
||||
init_rwsem(&c->gc_lock);
|
||||
mutex_init(&c->gc_gens_lock);
|
||||
atomic_set(&c->journal_keys.ref, 1);
|
||||
c->journal_keys.initial_ref_held = true;
|
||||
|
||||
for (i = 0; i < BCH_TIME_STAT_NR; i++)
|
||||
bch2_time_stats_init(&c->times[i]);
|
||||
|
Loading…
Reference in New Issue
Block a user