mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
bcachefs: Add block plugging to read paths
This will help with some of the btree_trans srcu lock hold time warnings that are still turning up; submit_bio() can block for awhile if the device is sufficiently congested. It's not a perfect solution since blk_plug bios are submitted when scheduling; we might want a way to disable the "submit on context switch" behaviour, or switch to our own plugging in the future. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
c4bfe7049c
commit
b6a562e6d8
@ -248,6 +248,7 @@ void bch2_readahead(struct readahead_control *ractl)
|
||||
struct bch_io_opts opts;
|
||||
struct folio *folio;
|
||||
struct readpages_iter readpages_iter;
|
||||
struct blk_plug plug;
|
||||
|
||||
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
|
||||
|
||||
@ -255,6 +256,16 @@ void bch2_readahead(struct readahead_control *ractl)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Besides being a general performance optimization, plugging helps with
|
||||
* avoiding btree transaction srcu warnings - submitting a bio can
|
||||
* block, and we don't want todo that with the transaction locked.
|
||||
*
|
||||
* However, plugged bios are submitted when we schedule; we ideally
|
||||
* would have our own scheduler hook to call unlock_long() before
|
||||
* scheduling.
|
||||
*/
|
||||
blk_start_plug(&plug);
|
||||
bch2_pagecache_add_get(inode);
|
||||
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
@ -281,7 +292,7 @@ void bch2_readahead(struct readahead_control *ractl)
|
||||
bch2_trans_put(trans);
|
||||
|
||||
bch2_pagecache_add_put(inode);
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
darray_exit(&readpages_iter.folios);
|
||||
}
|
||||
|
||||
@ -296,9 +307,13 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_read_bio *rbio;
|
||||
struct bch_io_opts opts;
|
||||
struct blk_plug plug;
|
||||
int ret;
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
|
||||
BUG_ON(folio_test_uptodate(folio));
|
||||
BUG_ON(folio_test_dirty(folio));
|
||||
|
||||
if (!bch2_folio_create(folio, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -313,7 +328,9 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
|
||||
rbio->bio.bi_iter.bi_sector = folio_sector(folio);
|
||||
BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
|
||||
|
||||
blk_start_plug(&plug);
|
||||
bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
|
||||
blk_finish_plug(&plug);
|
||||
wait_for_completion(&done);
|
||||
|
||||
ret = blk_status_to_errno(rbio->bio.bi_status);
|
||||
|
@ -70,6 +70,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
struct bch_io_opts opts;
|
||||
struct dio_read *dio;
|
||||
struct bio *bio;
|
||||
struct blk_plug plug;
|
||||
loff_t offset = req->ki_pos;
|
||||
bool sync = is_sync_kiocb(req);
|
||||
size_t shorten;
|
||||
@ -128,6 +129,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
*/
|
||||
dio->should_dirty = iter_is_iovec(iter);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
||||
goto start;
|
||||
while (iter->count) {
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
@ -160,6 +163,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
|
||||
}
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
iter->count += shorten;
|
||||
|
||||
if (sync) {
|
||||
|
Loading…
Reference in New Issue
Block a user