mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
btrfs: simplify obtaining a delayed ref head
Instead of doing it in two steps outside of delayed-ref.c, leaking low level details such as locking, move the logic entirely to delayed-ref.c under btrfs_select_ref_head(), reducing code and making things simpler for the caller. Reviewed-by: Boris Burkov <boris@bur.io> Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
7ef3604886
commit
a98048e10d
@ -431,8 +431,8 @@ static struct btrfs_delayed_ref_head *find_ref_head(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_delayed_ref_head *head)
|
||||
static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_delayed_ref_head *head)
|
||||
{
|
||||
lockdep_assert_held(&delayed_refs->lock);
|
||||
if (mutex_trylock(&head->mutex))
|
||||
@ -561,8 +561,9 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
|
||||
struct btrfs_delayed_ref_root *delayed_refs)
|
||||
{
|
||||
struct btrfs_delayed_ref_head *head;
|
||||
bool locked;
|
||||
|
||||
lockdep_assert_held(&delayed_refs->lock);
|
||||
spin_lock(&delayed_refs->lock);
|
||||
again:
|
||||
head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
|
||||
true);
|
||||
@ -570,16 +571,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
|
||||
delayed_refs->run_delayed_start = 0;
|
||||
head = find_first_ref_head(delayed_refs);
|
||||
}
|
||||
if (!head)
|
||||
if (!head) {
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (head->processing) {
|
||||
struct rb_node *node;
|
||||
|
||||
node = rb_next(&head->href_node);
|
||||
if (!node) {
|
||||
if (delayed_refs->run_delayed_start == 0)
|
||||
if (delayed_refs->run_delayed_start == 0) {
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
return NULL;
|
||||
}
|
||||
delayed_refs->run_delayed_start = 0;
|
||||
goto again;
|
||||
}
|
||||
@ -592,6 +597,18 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
|
||||
delayed_refs->num_heads_ready--;
|
||||
delayed_refs->run_delayed_start = head->bytenr +
|
||||
head->num_bytes;
|
||||
|
||||
locked = btrfs_delayed_ref_lock(delayed_refs, head);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
|
||||
/*
|
||||
* We may have dropped the spin lock to get the head mutex lock, and
|
||||
* that might have given someone else time to free the head. If that's
|
||||
* true, it has been removed from our list and we can move on.
|
||||
*/
|
||||
if (!locked)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
|
@ -369,8 +369,6 @@ void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_head *
|
||||
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
|
||||
u64 bytenr);
|
||||
bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_delayed_ref_head *head);
|
||||
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
|
||||
{
|
||||
mutex_unlock(&head->mutex);
|
||||
|
@ -1953,39 +1953,6 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
|
||||
struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_delayed_ref_root *delayed_refs =
|
||||
&trans->transaction->delayed_refs;
|
||||
struct btrfs_delayed_ref_head *head = NULL;
|
||||
bool locked;
|
||||
|
||||
spin_lock(&delayed_refs->lock);
|
||||
head = btrfs_select_ref_head(delayed_refs);
|
||||
if (!head) {
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
return head;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab the lock that says we are going to process all the refs for
|
||||
* this head
|
||||
*/
|
||||
locked = btrfs_delayed_ref_lock(delayed_refs, head);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
|
||||
/*
|
||||
* We may have dropped the spin lock to get the head mutex lock, and
|
||||
* that might have given someone else time to free the head. If that's
|
||||
* true, it has been removed from our list and we can move on.
|
||||
*/
|
||||
if (!locked)
|
||||
head = ERR_PTR(-EAGAIN);
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_head *locked_ref,
|
||||
u64 *bytes_released)
|
||||
@ -2092,7 +2059,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
|
||||
do {
|
||||
if (!locked_ref) {
|
||||
locked_ref = btrfs_obtain_ref_head(trans);
|
||||
locked_ref = btrfs_select_ref_head(delayed_refs);
|
||||
if (IS_ERR_OR_NULL(locked_ref)) {
|
||||
if (PTR_ERR(locked_ref) == -EAGAIN) {
|
||||
continue;
|
||||
|
Loading…
Reference in New Issue
Block a user