mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
readahead: drop pointless index from force_page_cache_ra()
Current index to readahead is tracked in readahead_control and properly updated by page_cache_ra_unbounded() (read_pages() in fact). So there's no need to track the index separately in force_page_cache_ra(). Link: https://lkml.kernel.org/r/20240625101909.12234-4-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Tested-by: Zhang Peng <zhangpengpeng0808@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7c877586da
commit
878343dfa4
@ -313,7 +313,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
|
||||
struct address_space *mapping = ractl->mapping;
|
||||
struct file_ra_state *ra = ractl->ra;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
unsigned long max_pages, index;
|
||||
unsigned long max_pages;
|
||||
|
||||
if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
|
||||
return;
|
||||
@ -322,7 +322,6 @@ void force_page_cache_ra(struct readahead_control *ractl,
|
||||
* If the request exceeds the readahead window, allow the read to
|
||||
* be up to the optimal hardware IO size
|
||||
*/
|
||||
index = readahead_index(ractl);
|
||||
max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
|
||||
nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
|
||||
while (nr_to_read) {
|
||||
@ -330,10 +329,8 @@ void force_page_cache_ra(struct readahead_control *ractl,
|
||||
|
||||
if (this_chunk > nr_to_read)
|
||||
this_chunk = nr_to_read;
|
||||
ractl->_index = index;
|
||||
do_page_cache_ra(ractl, this_chunk, 0);
|
||||
|
||||
index += this_chunk;
|
||||
nr_to_read -= this_chunk;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user