Merge tag 'md-next-20230817' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.6/block

Pull MD changes from Song:

"1. Fix perf regression for raid0 large sequential writes, by Jan Kara.
 2. Fix split bio iostat for raid0, by David Jeffery.
 3. Various raid1 fixes, by Heinz Mauelshagen and Xueshi Hu."

* tag 'md-next-20230817' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  md: raid0: account for split bio in iostat accounting
  md/raid0: Fix performance regression for large sequential writes
  md/raid0: Factor out helper for mapping and submitting a bio
  md raid1: allow writebehind to work on any leg device set WriteMostly
  md/raid1: hold the barrier until handle_read_error() finishes
  md/raid1: free the r1bio before waiting for blocked rdev
  md/raid1: call free_r1bio() before allow_barrier() in raid_end_bio_io()
This commit is contained in:
Jens Axboe 2023-08-18 09:28:41 -06:00
commit eb051b2d31
2 changed files with 52 additions and 50 deletions

View File

@ -545,14 +545,50 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
}
static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
{
struct r0conf *conf = mddev->private;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
sector_t bio_sector;
sector_t bio_sector = bio->bi_iter.bi_sector;
sector_t sector = bio_sector;
md_account_bio(mddev, &bio);
zone = find_zone(mddev->private, &sector);
switch (conf->layout) {
case RAID0_ORIG_LAYOUT:
tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
break;
case RAID0_ALT_MULTIZONE_LAYOUT:
tmp_dev = map_sector(mddev, zone, sector, &sector);
break;
default:
WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
bio_io_error(bio);
return;
}
if (unlikely(is_rdev_broken(tmp_dev))) {
bio_io_error(bio);
md_error(mddev, tmp_dev);
return;
}
bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
if (mddev->gendisk)
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
}
static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
{
sector_t sector;
sector_t orig_sector;
unsigned chunk_sects;
unsigned sectors;
@ -565,8 +601,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
return true;
}
bio_sector = bio->bi_iter.bi_sector;
sector = bio_sector;
sector = bio->bi_iter.bi_sector;
chunk_sects = mddev->chunk_sectors;
sectors = chunk_sects -
@ -574,50 +609,15 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
? (sector & (chunk_sects-1))
: sector_div(sector, chunk_sects));
/* Restore due to sector_div */
sector = bio_sector;
if (sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
submit_bio_noacct(bio);
raid0_map_submit_bio(mddev, bio);
bio = split;
}
if (bio->bi_pool != &mddev->bio_set)
md_account_bio(mddev, &bio);
orig_sector = sector;
zone = find_zone(mddev->private, &sector);
switch (conf->layout) {
case RAID0_ORIG_LAYOUT:
tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
break;
case RAID0_ALT_MULTIZONE_LAYOUT:
tmp_dev = map_sector(mddev, zone, sector, &sector);
break;
default:
WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
bio_io_error(bio);
return true;
}
if (unlikely(is_rdev_broken(tmp_dev))) {
bio_io_error(bio);
md_error(mddev, tmp_dev);
return true;
}
bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
if (mddev->gendisk)
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
raid0_map_submit_bio(mddev, bio);
return true;
}

View File

@ -311,6 +311,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
struct r1conf *conf = r1_bio->mddev->private;
sector_t sector = r1_bio->sector;
/* if nobody has done the final endio yet, do it now */
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
@ -321,13 +322,13 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
call_bio_endio(r1_bio);
}
free_r1bio(r1_bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle. All I/Os, even write-behind writes, are done.
*/
allow_barrier(conf, r1_bio->sector);
free_r1bio(r1_bio);
allow_barrier(conf, sector);
}
/*
@ -1377,6 +1378,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
return;
}
retry_write:
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
@ -1392,7 +1394,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
disks = conf->raid_disks * 2;
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
max_sectors = r1_bio->sectors;
@ -1472,7 +1473,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
free_r1bio(r1_bio);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {
@ -1522,8 +1523,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
* is waiting for behind writes to flush */
if (bitmap &&
test_bit(WriteMostly, &rdev->flags) &&
if (bitmap && write_behind &&
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
!waitqueue_active(&bitmap->behind_wait)) {
@ -2510,6 +2510,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct mddev *mddev = conf->mddev;
struct bio *bio;
struct md_rdev *rdev;
sector_t sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@ -2539,12 +2540,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
}
rdev_dec_pending(rdev, conf->mddev);
allow_barrier(conf, r1_bio->sector);
sector = r1_bio->sector;
bio = r1_bio->master_bio;
/* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
r1_bio->state = 0;
raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
allow_barrier(conf, sector);
}
static void raid1d(struct md_thread *thread)