mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
mm, swap: avoid lock swap_avail_lock when held cluster lock
Cluster lock is used to protect the swap_cluster_info and corresponding elements in swap_info_struct->swap_map[]. But it is found that now in scan_swap_map_slots(), swap_avail_lock may be acquired when cluster lock is held. This does no good except making the locking more complex and improving the potential locking contention, because the swap_info_struct->lock is used to protect the data structure operated in the code already. Fix this via moving the corresponding operations in scan_swap_map_slots() out of cluster lock. Link: http://lkml.kernel.org/r/20170317064635.12792-3-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Acked-by: Tim Chen <tim.c.chen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0ef017d117
commit
2872bb2d0a
@ -672,6 +672,9 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
|
||||
else
|
||||
goto done;
|
||||
}
|
||||
si->swap_map[offset] = usage;
|
||||
inc_cluster_info_page(si, si->cluster_info, offset);
|
||||
unlock_cluster(ci);
|
||||
|
||||
if (offset == si->lowest_bit)
|
||||
si->lowest_bit++;
|
||||
@ -685,9 +688,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
|
||||
plist_del(&si->avail_list, &swap_avail_head);
|
||||
spin_unlock(&swap_avail_lock);
|
||||
}
|
||||
si->swap_map[offset] = usage;
|
||||
inc_cluster_info_page(si, si->cluster_info, offset);
|
||||
unlock_cluster(ci);
|
||||
si->cluster_next = offset + 1;
|
||||
slots[n_ret++] = swp_entry(si->type, offset);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user