mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
md/raid5: use wait_on_bit() for R5_Overlap
Convert uses of wait_for_overlap wait queue with R5_Overlap bit to wait_on_bit() / wake_up_bit(). Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com> Link: https://lore.kernel.org/r/20240827153536.6743-2-artur.paszkiewicz@intel.com Signed-off-by: Song Liu <song@kernel.org>
This commit is contained in:
parent
7f67fdae33
commit
e6a03207b9
@ -2798,7 +2798,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
|
||||
{
|
||||
struct r5l_log *log = READ_ONCE(conf->log);
|
||||
int i;
|
||||
int do_wakeup = 0;
|
||||
sector_t tree_index;
|
||||
void __rcu **pslot;
|
||||
uintptr_t refcount;
|
||||
@ -2815,7 +2814,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
|
||||
for (i = sh->disks; i--; ) {
|
||||
clear_bit(R5_InJournal, &sh->dev[i].flags);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
|
||||
do_wakeup = 1;
|
||||
wake_up_bit(&sh->dev[i].flags, R5_Overlap);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2828,9 +2827,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
|
||||
if (atomic_dec_and_test(&conf->pending_full_writes))
|
||||
md_wakeup_thread(conf->mddev->thread);
|
||||
|
||||
if (do_wakeup)
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
|
||||
spin_lock_irq(&log->stripe_in_journal_lock);
|
||||
list_del_init(&sh->r5c);
|
||||
spin_unlock_irq(&log->stripe_in_journal_lock);
|
||||
|
@ -2337,7 +2337,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if (test_and_clear_bit(R5_Overlap, &dev->flags))
|
||||
wake_up(&sh->raid_conf->wait_for_overlap);
|
||||
wake_up_bit(&dev->flags, R5_Overlap);
|
||||
}
|
||||
}
|
||||
local_unlock(&conf->percpu->lock);
|
||||
@ -3473,7 +3473,7 @@ static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
|
||||
* With PPL only writes to consecutive data chunks within a
|
||||
* stripe are allowed because for a single stripe_head we can
|
||||
* only have one PPL entry at a time, which describes one data
|
||||
* range. Not really an overlap, but wait_for_overlap can be
|
||||
* range. Not really an overlap, but R5_Overlap can be
|
||||
* used to handle this.
|
||||
*/
|
||||
sector_t sector;
|
||||
@ -3652,7 +3652,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
||||
log_stripe_write_finished(sh);
|
||||
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
wake_up_bit(&sh->dev[i].flags, R5_Overlap);
|
||||
|
||||
while (bi && bi->bi_iter.bi_sector <
|
||||
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
|
||||
@ -3697,7 +3697,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
||||
sh->dev[i].toread = NULL;
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
wake_up_bit(&sh->dev[i].flags, R5_Overlap);
|
||||
if (bi)
|
||||
s->to_read--;
|
||||
while (bi && bi->bi_iter.bi_sector <
|
||||
@ -3736,7 +3736,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
|
||||
BUG_ON(sh->batch_head);
|
||||
clear_bit(STRIPE_SYNCING, &sh->state);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap);
|
||||
s->syncing = 0;
|
||||
s->replacing = 0;
|
||||
/* There is nothing more to do for sync/check/repair.
|
||||
@ -4877,7 +4877,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
|
||||
{
|
||||
struct stripe_head *sh, *next;
|
||||
int i;
|
||||
int do_wakeup = 0;
|
||||
|
||||
list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
|
||||
|
||||
@ -4913,7 +4912,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
for (i = 0; i < sh->disks; i++) {
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
|
||||
do_wakeup = 1;
|
||||
wake_up_bit(&sh->dev[i].flags, R5_Overlap);
|
||||
sh->dev[i].flags = head_sh->dev[i].flags &
|
||||
(~((1 << R5_WriteError) | (1 << R5_Overlap)));
|
||||
}
|
||||
@ -4927,12 +4926,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
|
||||
spin_unlock_irq(&head_sh->stripe_lock);
|
||||
for (i = 0; i < head_sh->disks; i++)
|
||||
if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
|
||||
do_wakeup = 1;
|
||||
wake_up_bit(&head_sh->dev[i].flags, R5_Overlap);
|
||||
if (head_sh->state & handle_flags)
|
||||
set_bit(STRIPE_HANDLE, &head_sh->state);
|
||||
|
||||
if (do_wakeup)
|
||||
wake_up(&head_sh->raid_conf->wait_for_overlap);
|
||||
}
|
||||
|
||||
static void handle_stripe(struct stripe_head *sh)
|
||||
@ -5198,7 +5194,7 @@ static void handle_stripe(struct stripe_head *sh)
|
||||
md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
|
||||
clear_bit(STRIPE_SYNCING, &sh->state);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap);
|
||||
}
|
||||
|
||||
/* If the failed drives are just a ReadError, then we might need
|
||||
@ -5755,12 +5751,11 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
||||
int d;
|
||||
again:
|
||||
sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
|
||||
prepare_to_wait(&conf->wait_for_overlap, &w,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
|
||||
if (test_bit(STRIPE_SYNCING, &sh->state)) {
|
||||
raid5_release_stripe(sh);
|
||||
schedule();
|
||||
wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
goto again;
|
||||
}
|
||||
clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
|
||||
@ -5772,12 +5767,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
||||
set_bit(R5_Overlap, &sh->dev[d].flags);
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
raid5_release_stripe(sh);
|
||||
schedule();
|
||||
wait_on_bit(&sh->dev[d].flags, R5_Overlap,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
set_bit(STRIPE_DISCARD, &sh->state);
|
||||
finish_wait(&conf->wait_for_overlap, &w);
|
||||
sh->overwrite_disks = 0;
|
||||
for (d = 0; d < conf->raid_disks; d++) {
|
||||
if (d == sh->pd_idx || d == sh->qd_idx)
|
||||
@ -5854,7 +5849,6 @@ static int add_all_stripe_bios(struct r5conf *conf,
|
||||
struct bio *bi, int forwrite, int previous)
|
||||
{
|
||||
int dd_idx;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
|
||||
@ -5870,14 +5864,19 @@ static int add_all_stripe_bios(struct r5conf *conf,
|
||||
|
||||
if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
|
||||
set_bit(R5_Overlap, &dev->flags);
|
||||
ret = 0;
|
||||
continue;
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
raid5_release_stripe(sh);
|
||||
/* release batch_last before wait to avoid risk of deadlock */
|
||||
if (ctx->batch_last) {
|
||||
raid5_release_stripe(ctx->batch_last);
|
||||
ctx->batch_last = NULL;
|
||||
}
|
||||
md_wakeup_thread(conf->mddev->thread);
|
||||
wait_on_bit(&dev->flags, R5_Overlap, TASK_UNINTERRUPTIBLE);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
|
||||
struct r5dev *dev = &sh->dev[dd_idx];
|
||||
|
||||
@ -5893,9 +5892,8 @@ static int add_all_stripe_bios(struct r5conf *conf,
|
||||
RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
enum reshape_loc {
|
||||
@ -5991,17 +5989,17 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
|
||||
!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
|
||||
/*
|
||||
* Stripe is busy expanding or add failed due to
|
||||
* overlap. Flush everything and wait a while.
|
||||
*/
|
||||
if (test_bit(STRIPE_EXPANDING, &sh->state)) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
ret = STRIPE_SCHEDULE_AND_RETRY;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
|
||||
ret = STRIPE_RETRY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (stripe_can_batch(sh)) {
|
||||
stripe_add_to_batch_list(conf, sh, ctx->batch_last);
|
||||
if (ctx->batch_last)
|
||||
|
Loading…
Reference in New Issue
Block a user