mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
page cache: Convert hole search to XArray
The page cache offers the ability to search for a miss in the previous or next N locations. Rather than teach the XArray about the page cache's definition of a miss, use xas_prev() and xas_next() to search the page array. This should be more efficient as it does not have to start the lookup from the top for each index. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
eb797a8ee0
commit
0d3f929666
@ -896,7 +896,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
|
||||
end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
if (end != inode->i_mapping->nrpages) {
|
||||
rcu_read_lock();
|
||||
end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
|
||||
end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
|
||||
|
||||
typedef int filler_t(void *, struct page *);
|
||||
|
||||
pgoff_t page_cache_next_hole(struct address_space *mapping,
|
||||
pgoff_t page_cache_next_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan);
|
||||
pgoff_t page_cache_prev_hole(struct address_space *mapping,
|
||||
pgoff_t page_cache_prev_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan);
|
||||
|
||||
#define FGP_ACCESSED 0x00000001
|
||||
|
98
mm/filemap.c
98
mm/filemap.c
@ -1326,86 +1326,76 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
||||
}
|
||||
|
||||
/**
|
||||
* page_cache_next_hole - find the next hole (not-present entry)
|
||||
* @mapping: mapping
|
||||
* @index: index
|
||||
* @max_scan: maximum range to search
|
||||
* page_cache_next_miss() - Find the next gap in the page cache.
|
||||
* @mapping: Mapping.
|
||||
* @index: Index.
|
||||
* @max_scan: Maximum range to search.
|
||||
*
|
||||
* Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
|
||||
* lowest indexed hole.
|
||||
* Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
|
||||
* gap with the lowest index.
|
||||
*
|
||||
* Returns: the index of the hole if found, otherwise returns an index
|
||||
* outside of the set specified (in which case 'return - index >=
|
||||
* max_scan' will be true). In rare cases of index wrap-around, 0 will
|
||||
* be returned.
|
||||
* This function may be called under the rcu_read_lock. However, this will
|
||||
* not atomically search a snapshot of the cache at a single point in time.
|
||||
* For example, if a gap is created at index 5, then subsequently a gap is
|
||||
* created at index 10, page_cache_next_miss covering both indices may
|
||||
* return 10 if called under the rcu_read_lock.
|
||||
*
|
||||
* page_cache_next_hole may be called under rcu_read_lock. However,
|
||||
* like radix_tree_gang_lookup, this will not atomically search a
|
||||
* snapshot of the tree at a single point in time. For example, if a
|
||||
* hole is created at index 5, then subsequently a hole is created at
|
||||
* index 10, page_cache_next_hole covering both indexes may return 10
|
||||
* if called under rcu_read_lock.
|
||||
* Return: The index of the gap if found, otherwise an index outside the
|
||||
* range specified (in which case 'return - index >= max_scan' will be true).
|
||||
* In the rare case of index wrap-around, 0 will be returned.
|
||||
*/
|
||||
pgoff_t page_cache_next_hole(struct address_space *mapping,
|
||||
pgoff_t page_cache_next_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan)
|
||||
{
|
||||
unsigned long i;
|
||||
XA_STATE(xas, &mapping->i_pages, index);
|
||||
|
||||
for (i = 0; i < max_scan; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = radix_tree_lookup(&mapping->i_pages, index);
|
||||
if (!page || xa_is_value(page))
|
||||
while (max_scan--) {
|
||||
void *entry = xas_next(&xas);
|
||||
if (!entry || xa_is_value(entry))
|
||||
break;
|
||||
index++;
|
||||
if (index == 0)
|
||||
if (xas.xa_index == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return index;
|
||||
return xas.xa_index;
|
||||
}
|
||||
EXPORT_SYMBOL(page_cache_next_hole);
|
||||
EXPORT_SYMBOL(page_cache_next_miss);
|
||||
|
||||
/**
|
||||
* page_cache_prev_hole - find the prev hole (not-present entry)
|
||||
* @mapping: mapping
|
||||
* @index: index
|
||||
* @max_scan: maximum range to search
|
||||
* page_cache_prev_miss() - Find the next gap in the page cache.
|
||||
* @mapping: Mapping.
|
||||
* @index: Index.
|
||||
* @max_scan: Maximum range to search.
|
||||
*
|
||||
* Search backwards in the range [max(index-max_scan+1, 0), index] for
|
||||
* the first hole.
|
||||
* Search the range [max(index - max_scan + 1, 0), index] for the
|
||||
* gap with the highest index.
|
||||
*
|
||||
* Returns: the index of the hole if found, otherwise returns an index
|
||||
* outside of the set specified (in which case 'index - return >=
|
||||
* max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
|
||||
* will be returned.
|
||||
* This function may be called under the rcu_read_lock. However, this will
|
||||
* not atomically search a snapshot of the cache at a single point in time.
|
||||
* For example, if a gap is created at index 10, then subsequently a gap is
|
||||
* created at index 5, page_cache_prev_miss() covering both indices may
|
||||
* return 5 if called under the rcu_read_lock.
|
||||
*
|
||||
* page_cache_prev_hole may be called under rcu_read_lock. However,
|
||||
* like radix_tree_gang_lookup, this will not atomically search a
|
||||
* snapshot of the tree at a single point in time. For example, if a
|
||||
* hole is created at index 10, then subsequently a hole is created at
|
||||
* index 5, page_cache_prev_hole covering both indexes may return 5 if
|
||||
* called under rcu_read_lock.
|
||||
* Return: The index of the gap if found, otherwise an index outside the
|
||||
* range specified (in which case 'index - return >= max_scan' will be true).
|
||||
* In the rare case of wrap-around, ULONG_MAX will be returned.
|
||||
*/
|
||||
pgoff_t page_cache_prev_hole(struct address_space *mapping,
|
||||
pgoff_t page_cache_prev_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan)
|
||||
{
|
||||
unsigned long i;
|
||||
XA_STATE(xas, &mapping->i_pages, index);
|
||||
|
||||
for (i = 0; i < max_scan; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = radix_tree_lookup(&mapping->i_pages, index);
|
||||
if (!page || xa_is_value(page))
|
||||
while (max_scan--) {
|
||||
void *entry = xas_prev(&xas);
|
||||
if (!entry || xa_is_value(entry))
|
||||
break;
|
||||
index--;
|
||||
if (index == ULONG_MAX)
|
||||
if (xas.xa_index == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
|
||||
return index;
|
||||
return xas.xa_index;
|
||||
}
|
||||
EXPORT_SYMBOL(page_cache_prev_hole);
|
||||
EXPORT_SYMBOL(page_cache_prev_miss);
|
||||
|
||||
/**
|
||||
* find_get_entry - find and get a page cache entry
|
||||
|
@ -336,7 +336,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
|
||||
pgoff_t head;
|
||||
|
||||
rcu_read_lock();
|
||||
head = page_cache_prev_hole(mapping, offset - 1, max);
|
||||
head = page_cache_prev_miss(mapping, offset - 1, max);
|
||||
rcu_read_unlock();
|
||||
|
||||
return offset - 1 - head;
|
||||
@ -425,7 +425,7 @@ ondemand_readahead(struct address_space *mapping,
|
||||
pgoff_t start;
|
||||
|
||||
rcu_read_lock();
|
||||
start = page_cache_next_hole(mapping, offset + 1, max_pages);
|
||||
start = page_cache_next_miss(mapping, offset + 1, max_pages);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!start || start - offset > max_pages)
|
||||
|
Loading…
Reference in New Issue
Block a user