btrfs: handle allocation failure in btrfs_wq_submit_bio gracefully

btrfs_wq_submit_bio is used for writeback under memory pressure.
Instead of failing the I/O when we can't allocate the async_submit_bio,
just punt back to the synchronous submission path.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Tested-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Christoph Hellwig 2022-06-17 12:04:12 +02:00 committed by David Sterba
parent 82443fd55c
commit ea1f0cedef
3 changed files with 36 additions and 30 deletions

View File

@ -759,16 +759,23 @@ static void run_one_async_free(struct btrfs_work *work)
kfree(async); kfree(async);
} }
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, /*
int mirror_num, u64 dio_file_offset, * Submit bio to an async queue.
extent_submit_bio_start_t *submit_bio_start) *
* Retrun:
* - true if the work has been succesfuly submitted
* - false in case of error
*/
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start)
{ {
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct async_submit_bio *async; struct async_submit_bio *async;
async = kmalloc(sizeof(*async), GFP_NOFS); async = kmalloc(sizeof(*async), GFP_NOFS);
if (!async) if (!async)
return BLK_STS_RESOURCE; return false;
async->inode = inode; async->inode = inode;
async->bio = bio; async->bio = bio;
@ -786,7 +793,7 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
btrfs_queue_work(fs_info->hipri_workers, &async->work); btrfs_queue_work(fs_info->hipri_workers, &async->work);
else else
btrfs_queue_work(fs_info->workers, &async->work); btrfs_queue_work(fs_info->workers, &async->work);
return 0; return true;
} }
static blk_status_t btree_csum_one_bio(struct bio *bio) static blk_status_t btree_csum_one_bio(struct bio *bio)
@ -840,25 +847,23 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
btrfs_submit_bio(fs_info, bio, mirror_num); btrfs_submit_bio(fs_info, bio, mirror_num);
return; return;
} }
if (!should_async_write(fs_info, BTRFS_I(inode))) {
ret = btree_csum_one_bio(bio);
if (!ret) {
btrfs_submit_bio(fs_info, bio, mirror_num);
return;
}
} else {
/*
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
btree_submit_bio_start);
}
/*
* Kthread helpers are used to submit writes so that checksumming can
* happen in parallel across all CPUs.
*/
if (should_async_write(fs_info, BTRFS_I(inode)) &&
btrfs_wq_submit_bio(inode, bio, mirror_num, 0, btree_submit_bio_start))
return;
ret = btree_csum_one_bio(bio);
if (ret) { if (ret) {
bio->bi_status = ret; bio->bi_status = ret;
bio_endio(bio); bio_endio(bio);
return;
} }
btrfs_submit_bio(fs_info, bio, mirror_num);
} }
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION

View File

@ -114,9 +114,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic); int atomic);
int btrfs_read_extent_buffer(struct extent_buffer *buf, u64 parent_transid, int btrfs_read_extent_buffer(struct extent_buffer *buf, u64 parent_transid,
int level, struct btrfs_key *first_key); int level, struct btrfs_key *first_key);
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
int mirror_num, u64 dio_file_offset, u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start); extent_submit_bio_start_t *submit_bio_start);
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio, blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
int mirror_num); int mirror_num);
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans, int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,

View File

@ -2674,11 +2674,10 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro
if (!(bi->flags & BTRFS_INODE_NODATASUM) && if (!(bi->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
!btrfs_is_data_reloc_root(bi->root)) { !btrfs_is_data_reloc_root(bi->root)) {
if (!atomic_read(&bi->sync_writers)) { if (!atomic_read(&bi->sync_writers) &&
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0, btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
btrfs_submit_bio_start); btrfs_submit_bio_start))
goto out; return;
}
ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false); ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
if (ret) if (ret)
@ -8027,9 +8026,11 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
if (btrfs_op(bio) == BTRFS_MAP_WRITE) { if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
/* Check btrfs_submit_data_write_bio() for async submit rules */ /* Check btrfs_submit_data_write_bio() for async submit rules */
if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers)) if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers) &&
return btrfs_wq_submit_bio(inode, bio, 0, file_offset, btrfs_wq_submit_bio(inode, bio, 0, file_offset,
btrfs_submit_bio_start_direct_io); btrfs_submit_bio_start_direct_io))
return BLK_STS_OK;
/* /*
* If we aren't doing async submit, calculate the csum of the * If we aren't doing async submit, calculate the csum of the
* bio now. * bio now.