bcachefs: bch2_btree_node_relock_notrace()

Most of the node_relock_fail trace events are generated from
bch2_btree_path_verify_level(), when debugcheck_iterators is enabled -
but we're not interested in these trace events, they don't indicate that
we're in a slowpath.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-09-25 16:42:53 -04:00
parent c36ff038fd
commit e9174370d0
3 changed files with 19 additions and 5 deletions

View File

@ -167,7 +167,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
if (!btree_path_node(path, level))
return;
if (!bch2_btree_node_relock(trans, path, level))
if (!bch2_btree_node_relock_notrace(trans, path, level))
return;
BUG_ON(!btree_path_pos_in_node(path, l->b));

View File

@ -401,7 +401,8 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
}
bool __bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
struct btree_path *path, unsigned level,
bool trace)
{
struct btree *b = btree_path_node(path, level);
int want = __btree_lock_want(path, level);
@ -416,7 +417,8 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
return true;
}
fail:
trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
if (trace)
trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
return false;
}

View File

@ -317,7 +317,7 @@ static inline int bch2_btree_path_relock(struct btree_trans *trans,
: __bch2_btree_path_relock(trans, path, trace_ip);
}
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
@ -328,7 +328,19 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans,
return likely(btree_node_locked(path, level)) ||
(!IS_ERR_OR_NULL(path->l[level].b) &&
__bch2_btree_node_relock(trans, path, level));
__bch2_btree_node_relock(trans, path, level, true));
}
static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
EBUG_ON(btree_node_locked(path, level) &&
!btree_node_write_locked(path, level) &&
btree_node_locked_type(path, level) != __btree_lock_want(path, level));
return likely(btree_node_locked(path, level)) ||
(!IS_ERR_OR_NULL(path->l[level].b) &&
__bch2_btree_node_relock(trans, path, level, false));
}
/* upgrade */