mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 12:12:05 +00:00
readahead: drop dead code in ondemand_readahead()
ondemand_readahead() scales up the readahead window if the current read would hit the readahead mark placed by itself. However the condition is mostly dead code because: a) In case of async readahead we always increase ra->start so ra->start == index is never true. b) In case of sync readahead we either go through try_context_readahead() in which case ra->async_size == 1 < ra->size or we go through initial_readahead where ra->async_size == ra->size iff ra->size == max_pages. So the only practical effect is reducing async_size for large initial reads. Make the code more obvious. Link: https://lkml.kernel.org/r/20240625101909.12234-7-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Tested-by: Zhang Peng <zhangpengpeng0808@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8eaf93ac70
commit
0b1efc3e78
@ -549,7 +549,6 @@ static void ondemand_readahead(struct readahead_control *ractl,
|
||||
struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
|
||||
struct file_ra_state *ra = ractl->ra;
|
||||
unsigned long max_pages = ra->ra_pages;
|
||||
unsigned long add_pages;
|
||||
pgoff_t index = readahead_index(ractl);
|
||||
pgoff_t expected, prev_index;
|
||||
unsigned int order = folio ? folio_order(folio) : 0;
|
||||
@ -638,26 +637,10 @@ static void ondemand_readahead(struct readahead_control *ractl,
|
||||
initial_readahead:
|
||||
ra->start = index;
|
||||
ra->size = get_init_ra_size(req_size, max_pages);
|
||||
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
|
||||
ra->async_size = ra->size > req_size ? ra->size - req_size :
|
||||
ra->size >> 1;
|
||||
|
||||
readit:
|
||||
/*
|
||||
* Will this read hit the readahead marker made by itself?
|
||||
* If so, trigger the readahead marker hit now, and merge
|
||||
* the resulted next readahead window into the current one.
|
||||
* Take care of maximum IO pages as above.
|
||||
*/
|
||||
if (index == ra->start && ra->size == ra->async_size) {
|
||||
add_pages = get_next_ra_size(ra, max_pages);
|
||||
if (ra->size + add_pages <= max_pages) {
|
||||
ra->async_size = add_pages;
|
||||
ra->size += add_pages;
|
||||
} else {
|
||||
ra->size = max_pages;
|
||||
ra->async_size = max_pages >> 1;
|
||||
}
|
||||
}
|
||||
|
||||
ractl->_index = ra->start;
|
||||
page_cache_ra_order(ractl, ra, order);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user