bcachefs: lock time stats prep work.

We need the caller name and a place to store our results, btree_trans provides this.

Signed-off-by: Daniel Hill <daniel@gluo.nz>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Daniel Hill 2022-07-14 18:58:23 +12:00 committed by Kent Overstreet
parent 43de721a33
commit 8bfe14e86a
6 changed files with 43 additions and 38 deletions

View File

@ -883,7 +883,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
* was removed - and we'll bail out: * was removed - and we'll bail out:
*/ */
if (btree_node_read_locked(path, level + 1)) if (btree_node_read_locked(path, level + 1))
btree_node_unlock(path, level + 1); btree_node_unlock(trans, path, level + 1);
if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type, if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) { lock_node_check_fn, (void *) k, trace_ip)) {

View File

@ -224,7 +224,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
if (btree_node_lock_seq_matches(path, b, level) && if (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) { btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(path, level); btree_node_unlock(trans, path, level);
goto success; goto success;
} }
@ -259,7 +259,7 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
* the node that we failed to relock: * the node that we failed to relock:
*/ */
if (fail_idx >= 0) { if (fail_idx >= 0) {
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
do { do {
@ -417,7 +417,7 @@ bool bch2_btree_path_relock_intent(struct btree_trans *trans,
l < path->locks_want && btree_path_node(path, l); l < path->locks_want && btree_path_node(path, l);
l++) { l++) {
if (!bch2_btree_node_relock(trans, path, l)) { if (!bch2_btree_node_relock(trans, path, l)) {
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_, trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
path->btree_id, &path->pos); path->btree_id, &path->pos);
@ -496,7 +496,8 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
return false; return false;
} }
void __bch2_btree_path_downgrade(struct btree_path *path, void __bch2_btree_path_downgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want) unsigned new_locks_want)
{ {
unsigned l; unsigned l;
@ -508,7 +509,7 @@ void __bch2_btree_path_downgrade(struct btree_path *path,
while (path->nodes_locked && while (path->nodes_locked &&
(l = __fls(path->nodes_locked)) >= path->locks_want) { (l = __fls(path->nodes_locked)) >= path->locks_want) {
if (l > path->level) { if (l > path->level) {
btree_node_unlock(path, l); btree_node_unlock(trans, path, l);
} else { } else {
if (btree_node_intent_locked(path, l)) { if (btree_node_intent_locked(path, l)) {
six_lock_downgrade(&path->l[l].b->c.lock); six_lock_downgrade(&path->l[l].b->c.lock);
@ -526,7 +527,7 @@ void bch2_trans_downgrade(struct btree_trans *trans)
struct btree_path *path; struct btree_path *path;
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
bch2_btree_path_downgrade(path); bch2_btree_path_downgrade(trans, path);
} }
/* Btree transaction locking: */ /* Btree transaction locking: */
@ -554,7 +555,7 @@ void bch2_trans_unlock(struct btree_trans *trans)
struct btree_path *path; struct btree_path *path;
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
} }
/* Btree iterator: */ /* Btree iterator: */
@ -575,7 +576,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
bkey_cmp(ck->key.pos, path->pos)); bkey_cmp(ck->key.pos, path->pos));
if (!locked) if (!locked)
btree_node_unlock(path, 0); btree_node_unlock(trans, path, 0);
} }
static void bch2_btree_path_verify_level(struct btree_trans *trans, static void bch2_btree_path_verify_level(struct btree_trans *trans,
@ -632,7 +633,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
} }
if (!locked) if (!locked)
btree_node_unlock(path, level); btree_node_unlock(trans, path, level);
return; return;
err: err:
bch2_bpos_to_text(&buf1, path->pos); bch2_bpos_to_text(&buf1, path->pos);
@ -1106,7 +1107,7 @@ static void btree_path_verify_new_node(struct btree_trans *trans,
} }
if (!parent_locked) if (!parent_locked)
btree_node_unlock(path, plevel); btree_node_unlock(trans, path, plevel);
} }
static inline void __btree_path_level_init(struct btree_path *path, static inline void __btree_path_level_init(struct btree_path *path,
@ -1158,7 +1159,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
if (path->nodes_locked && if (path->nodes_locked &&
t != BTREE_NODE_UNLOCKED) { t != BTREE_NODE_UNLOCKED) {
btree_node_unlock(path, b->c.level); btree_node_unlock(trans, path, b->c.level);
six_lock_increment(&b->c.lock, (enum six_lock_type) t); six_lock_increment(&b->c.lock, (enum six_lock_type) t);
mark_btree_node_locked(path, b->c.level, (enum six_lock_type) t); mark_btree_node_locked(path, b->c.level, (enum six_lock_type) t);
} }
@ -1277,7 +1278,7 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat
} }
if (!was_locked) if (!was_locked)
btree_node_unlock(path, path->level); btree_node_unlock(trans, path, path->level);
bch2_bkey_buf_exit(&tmp, c); bch2_bkey_buf_exit(&tmp, c);
return ret; return ret;
@ -1312,7 +1313,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p
} }
if (!was_locked) if (!was_locked)
btree_node_unlock(path, path->level); btree_node_unlock(trans, path, path->level);
bch2_bkey_buf_exit(&tmp, c); bch2_bkey_buf_exit(&tmp, c);
return ret; return ret;
@ -1337,7 +1338,7 @@ static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
bp->mem_ptr = (unsigned long)b; bp->mem_ptr = (unsigned long)b;
if (!locked) if (!locked)
btree_node_unlock(path, plevel); btree_node_unlock(trans, path, plevel);
} }
static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
@ -1410,7 +1411,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
btree_node_mem_ptr_set(trans, path, level + 1, b); btree_node_mem_ptr_set(trans, path, level + 1, b);
if (btree_node_read_locked(path, level + 1)) if (btree_node_read_locked(path, level + 1))
btree_node_unlock(path, level + 1); btree_node_unlock(trans, path, level + 1);
path->level = level; path->level = level;
bch2_btree_path_verify_locks(path); bch2_btree_path_verify_locks(path);
@ -1519,9 +1520,10 @@ static inline bool btree_path_good_node(struct btree_trans *trans,
return true; return true;
} }
static void btree_path_set_level_up(struct btree_path *path) static void btree_path_set_level_up(struct btree_trans *trans,
struct btree_path *path)
{ {
btree_node_unlock(path, path->level); btree_node_unlock(trans, path, path->level);
path->l[path->level].b = BTREE_ITER_NO_NODE_UP; path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
path->level++; path->level++;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
@ -1537,7 +1539,7 @@ static void btree_path_set_level_down(struct btree_trans *trans,
for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++) for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
btree_node_unlock(path, l); btree_node_unlock(trans, path, l);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
bch2_btree_path_verify(trans, path); bch2_btree_path_verify(trans, path);
@ -1551,7 +1553,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
while (btree_path_node(path, l) && while (btree_path_node(path, l) &&
!btree_path_good_node(trans, path, l, check_pos)) { !btree_path_good_node(trans, path, l, check_pos)) {
btree_node_unlock(path, l); btree_node_unlock(trans, path, l);
path->l[l].b = BTREE_ITER_NO_NODE_UP; path->l[l].b = BTREE_ITER_NO_NODE_UP;
l++; l++;
} }
@ -1562,7 +1564,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
i++) i++)
if (!bch2_btree_node_relock(trans, path, i)) if (!bch2_btree_node_relock(trans, path, i))
while (l <= i) { while (l <= i) {
btree_node_unlock(path, l); btree_node_unlock(trans, path, l);
path->l[l].b = BTREE_ITER_NO_NODE_UP; path->l[l].b = BTREE_ITER_NO_NODE_UP;
l++; l++;
} }
@ -1631,7 +1633,7 @@ static int btree_path_traverse_one(struct btree_trans *trans,
goto out; goto out;
} }
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
path->level = depth_want; path->level = depth_want;
if (ret == -EIO) if (ret == -EIO)
@ -1717,7 +1719,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
trans->paths_sorted = false; trans->paths_sorted = false;
if (unlikely(path->cached)) { if (unlikely(path->cached)) {
btree_node_unlock(path, 0); btree_node_unlock(trans, path, 0);
path->l[0].b = BTREE_ITER_NO_NODE_CACHED; path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
goto out; goto out;
@ -1740,7 +1742,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
if (l != path->level) { if (l != path->level) {
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
} }
out: out:
bch2_btree_path_verify(trans, path); bch2_btree_path_verify(trans, path);
@ -1781,7 +1783,7 @@ static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btr
static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path) static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
{ {
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
btree_path_list_remove(trans, path); btree_path_list_remove(trans, path);
trans->paths_allocated &= ~(1ULL << path->idx); trans->paths_allocated &= ~(1ULL << path->idx);
} }
@ -2122,12 +2124,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
/* got to end? */ /* got to end? */
if (!btree_path_node(path, path->level + 1)) { if (!btree_path_node(path, path->level + 1)) {
btree_path_set_level_up(path); btree_path_set_level_up(trans, path);
return NULL; return NULL;
} }
if (!bch2_btree_node_relock(trans, path, path->level + 1)) { if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
__bch2_btree_path_unlock(path); __bch2_btree_path_unlock(trans, path);
path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS; path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS; path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
@ -2141,7 +2143,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
b = btree_path_node(path, path->level + 1); b = btree_path_node(path, path->level + 1);
if (!bpos_cmp(iter->pos, b->key.k.p)) { if (!bpos_cmp(iter->pos, b->key.k.p)) {
btree_node_unlock(path, path->level); btree_node_unlock(trans, path, path->level);
path->l[path->level].b = BTREE_ITER_NO_NODE_UP; path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
path->level++; path->level++;
} else { } else {
@ -2582,7 +2584,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
(iter->advanced && (iter->advanced &&
!bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) { !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
iter->pos = path_l(iter->path)->b->key.k.p; iter->pos = path_l(iter->path)->b->key.k.p;
btree_path_set_level_up(iter->path); btree_path_set_level_up(trans, iter->path);
iter->advanced = false; iter->advanced = false;
continue; continue;
} }

View File

@ -228,14 +228,15 @@ static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
: path->uptodate == BTREE_ITER_UPTODATE; : path->uptodate == BTREE_ITER_UPTODATE;
} }
void __bch2_btree_path_downgrade(struct btree_path *, unsigned); void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
static inline void bch2_btree_path_downgrade(struct btree_path *path) static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
struct btree_path *path)
{ {
unsigned new_locks_want = path->level + !!path->intent_ref; unsigned new_locks_want = path->level + !!path->intent_ref;
if (path->locks_want > new_locks_want) if (path->locks_want > new_locks_want)
__bch2_btree_path_downgrade(path, new_locks_want); __bch2_btree_path_downgrade(trans, path, new_locks_want);
} }
void bch2_trans_downgrade(struct btree_trans *); void bch2_trans_downgrade(struct btree_trans *);

View File

@ -431,7 +431,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
return ret; return ret;
err: err:
if (ret != -EINTR) { if (ret != -EINTR) {
btree_node_unlock(path, 0); btree_node_unlock(trans, path, 0);
path->l[0].b = BTREE_ITER_NO_NODE_ERROR; path->l[0].b = BTREE_ITER_NO_NODE_ERROR;
} }
return ret; return ret;

View File

@ -94,7 +94,8 @@ btree_lock_want(struct btree_path *path, int level)
return BTREE_NODE_UNLOCKED; return BTREE_NODE_UNLOCKED;
} }
static inline void btree_node_unlock(struct btree_path *path, unsigned level) static inline void btree_node_unlock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{ {
int lock_type = btree_node_locked_type(path, level); int lock_type = btree_node_locked_type(path, level);
@ -105,12 +106,13 @@ static inline void btree_node_unlock(struct btree_path *path, unsigned level)
mark_btree_node_unlocked(path, level); mark_btree_node_unlocked(path, level);
} }
static inline void __bch2_btree_path_unlock(struct btree_path *path) static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
struct btree_path *path)
{ {
btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK); btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
while (path->nodes_locked) while (path->nodes_locked)
btree_node_unlock(path, __ffs(path->nodes_locked)); btree_node_unlock(trans, path, __ffs(path->nodes_locked));
} }
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type) static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)

View File

@ -1844,7 +1844,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_update_done(as); bch2_btree_update_done(as);
out: out:
bch2_btree_path_downgrade(iter->path); bch2_btree_path_downgrade(trans, iter->path);
return ret; return ret;
} }
@ -1956,7 +1956,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
BUG_ON(iter2.path->level != b->c.level); BUG_ON(iter2.path->level != b->c.level);
BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p)); BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
btree_node_unlock(iter2.path, iter2.path->level); btree_node_unlock(trans, iter2.path, iter2.path->level);
path_l(iter2.path)->b = BTREE_ITER_NO_NODE_UP; path_l(iter2.path)->b = BTREE_ITER_NO_NODE_UP;
iter2.path->level++; iter2.path->level++;
btree_path_set_dirty(iter2.path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(iter2.path, BTREE_ITER_NEED_TRAVERSE);