mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 02:36:21 +00:00
bcachefs: Plumb btree_trans through btree cache code
Soon, __bch2_btree_node_write() is going to require a btree_trans: zoned device support is going to require a new allocation for every btree node write. This is a bit of prep work. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
b1cfe5ed2b
commit
1306f87de3
@ -561,8 +561,9 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
|
||||
}
|
||||
}
|
||||
|
||||
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
|
||||
struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
struct list_head *freed = pcpu_read_locks
|
||||
? &bc->freed_pcpu
|
||||
@ -673,8 +674,7 @@ err:
|
||||
}
|
||||
|
||||
/* Slowpath, don't want it inlined into btree_iter_traverse() */
|
||||
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
struct btree_trans *trans,
|
||||
static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
const struct bkey_i *k,
|
||||
enum btree_id btree_id,
|
||||
@ -682,6 +682,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
enum six_lock_type lock_type,
|
||||
bool sync)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
struct btree *b;
|
||||
u32 seq;
|
||||
@ -691,14 +692,14 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
* Parent node must be locked, else we could read in a btree node that's
|
||||
* been freed:
|
||||
*/
|
||||
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
|
||||
if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
|
||||
trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
|
||||
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
|
||||
}
|
||||
|
||||
b = bch2_btree_node_mem_alloc(c, level != 0);
|
||||
b = bch2_btree_node_mem_alloc(trans, level != 0);
|
||||
|
||||
if (trans && b == ERR_PTR(-ENOMEM)) {
|
||||
if (b == ERR_PTR(-ENOMEM)) {
|
||||
trans->memory_allocation_failure = true;
|
||||
trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
|
||||
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
|
||||
@ -744,7 +745,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
if (!sync)
|
||||
return NULL;
|
||||
|
||||
if (trans) {
|
||||
if (path) {
|
||||
int ret = bch2_trans_relock(trans) ?:
|
||||
bch2_btree_path_relock_intent(trans, path);
|
||||
if (ret) {
|
||||
@ -754,7 +755,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
}
|
||||
|
||||
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
|
||||
if (trans)
|
||||
if (path)
|
||||
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
|
||||
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
|
||||
}
|
||||
@ -820,7 +821,7 @@ retry:
|
||||
* else we could read in a btree node from disk that's been
|
||||
* freed:
|
||||
*/
|
||||
b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
|
||||
b = bch2_btree_node_fill(trans, path, k, path->btree_id,
|
||||
level, lock_type, true);
|
||||
|
||||
/* We raced and found the btree node in the cache */
|
||||
@ -1029,7 +1030,7 @@ retry:
|
||||
if (nofill)
|
||||
goto out;
|
||||
|
||||
b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
|
||||
b = bch2_btree_node_fill(trans, NULL, k, btree_id,
|
||||
level, SIX_LOCK_read, true);
|
||||
|
||||
/* We raced and found the btree node in the cache */
|
||||
@ -1089,12 +1090,12 @@ out:
|
||||
return b;
|
||||
}
|
||||
|
||||
int bch2_btree_node_prefetch(struct bch_fs *c,
|
||||
struct btree_trans *trans,
|
||||
int bch2_btree_node_prefetch(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
const struct bkey_i *k,
|
||||
enum btree_id btree_id, unsigned level)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
struct btree *b;
|
||||
|
||||
@ -1105,7 +1106,7 @@ int bch2_btree_node_prefetch(struct bch_fs *c,
|
||||
if (b)
|
||||
return 0;
|
||||
|
||||
b = bch2_btree_node_fill(c, trans, path, k, btree_id,
|
||||
b = bch2_btree_node_fill(trans, path, k, btree_id,
|
||||
level, SIX_LOCK_read, false);
|
||||
return PTR_ERR_OR_ZERO(b);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *);
|
||||
int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
|
||||
|
||||
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
|
||||
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *, bool);
|
||||
struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
|
||||
|
||||
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
|
||||
const struct bkey_i *, unsigned,
|
||||
@ -30,7 +30,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
|
||||
struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
|
||||
enum btree_id, unsigned, bool);
|
||||
|
||||
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *, struct btree_path *,
|
||||
int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
|
||||
const struct bkey_i *, enum btree_id, unsigned);
|
||||
|
||||
void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
|
||||
|
@ -1610,9 +1610,10 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
|
||||
const struct bkey_i *k, unsigned level)
|
||||
static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
|
||||
const struct bkey_i *k, unsigned level)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct closure cl;
|
||||
struct btree *b;
|
||||
int ret;
|
||||
@ -1624,7 +1625,7 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
|
||||
closure_sync(&cl);
|
||||
} while (ret);
|
||||
|
||||
b = bch2_btree_node_mem_alloc(c, level != 0);
|
||||
b = bch2_btree_node_mem_alloc(trans, level != 0);
|
||||
bch2_btree_cache_cannibalize_unlock(c);
|
||||
|
||||
BUG_ON(IS_ERR(b));
|
||||
@ -1655,6 +1656,13 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
|
||||
const struct bkey_i *k, unsigned level)
|
||||
{
|
||||
return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level));
|
||||
|
||||
}
|
||||
|
||||
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
|
||||
struct btree_write *w)
|
||||
{
|
||||
|
@ -815,7 +815,7 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat
|
||||
break;
|
||||
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
|
||||
ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
|
||||
ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
|
||||
path->level - 1);
|
||||
}
|
||||
|
||||
@ -850,7 +850,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p
|
||||
break;
|
||||
|
||||
bch2_bkey_buf_reassemble(&tmp, c, k);
|
||||
ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
|
||||
ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
|
||||
path->level - 1);
|
||||
}
|
||||
|
||||
|
@ -300,7 +300,7 @@ retry:
|
||||
bch2_open_bucket_get(c, wp, &ob);
|
||||
bch2_alloc_sectors_done(c, wp);
|
||||
mem_alloc:
|
||||
b = bch2_btree_node_mem_alloc(c, interior_node);
|
||||
b = bch2_btree_node_mem_alloc(trans, interior_node);
|
||||
six_unlock_write(&b->c.lock);
|
||||
six_unlock_intent(&b->c.lock);
|
||||
|
||||
@ -2261,7 +2261,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_hash = bch2_btree_node_mem_alloc(c, false);
|
||||
new_hash = bch2_btree_node_mem_alloc(trans, false);
|
||||
}
|
||||
|
||||
path->intent_ref++;
|
||||
@ -2324,8 +2324,9 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
|
||||
bch2_btree_set_root_inmem(c, b);
|
||||
}
|
||||
|
||||
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
|
||||
static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct closure cl;
|
||||
struct btree *b;
|
||||
int ret;
|
||||
@ -2337,7 +2338,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
|
||||
closure_sync(&cl);
|
||||
} while (ret);
|
||||
|
||||
b = bch2_btree_node_mem_alloc(c, false);
|
||||
b = bch2_btree_node_mem_alloc(trans, false);
|
||||
bch2_btree_cache_cannibalize_unlock(c);
|
||||
|
||||
set_btree_node_fake(b);
|
||||
@ -2366,6 +2367,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
|
||||
|
||||
six_unlock_write(&b->c.lock);
|
||||
six_unlock_intent(&b->c.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
|
||||
{
|
||||
bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id));
|
||||
}
|
||||
|
||||
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
|
Loading…
x
Reference in New Issue
Block a user