mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 10:17:32 +00:00
mm: shmem: simplify the suitable huge orders validation for tmpfs
Patch series "Some cleanups for shmem", v3. This series does some cleanups to reuse code, rename functions and simplify logic to make code more clear. No functional changes are expected. This patch (of 3): Move the suitable huge orders validation into shmem_suitable_orders() for tmpfs, which can reuse some code to simplify the logic. In addition, we don't have special handling for the error code -E2BIG when checking for conflicts with PMD sized THP in the pagecache for tmpfs, instead, it will just fallback to order-0 allocations like this patch does, so this simplification will not add functional changes. Link: https://lkml.kernel.org/r/cover.1721626645.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/965985dd6d322929d78a0beee0dafa1c2a1b81e2.1721626645.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins <hughd@google.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
590b9d576c
commit
0bedf001e3
39
mm/shmem.c
39
mm/shmem.c
@ -1680,20 +1680,30 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
|
|||||||
struct address_space *mapping, pgoff_t index,
|
struct address_space *mapping, pgoff_t index,
|
||||||
unsigned long orders)
|
unsigned long orders)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
|
||||||
pgoff_t aligned_index;
|
pgoff_t aligned_index;
|
||||||
unsigned long pages;
|
unsigned long pages;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
|
if (vma) {
|
||||||
if (!orders)
|
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
|
||||||
return 0;
|
if (!orders)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Find the highest order that can add into the page cache */
|
/* Find the highest order that can add into the page cache */
|
||||||
order = highest_order(orders);
|
order = highest_order(orders);
|
||||||
while (orders) {
|
while (orders) {
|
||||||
pages = 1UL << order;
|
pages = 1UL << order;
|
||||||
aligned_index = round_down(index, pages);
|
aligned_index = round_down(index, pages);
|
||||||
|
/*
|
||||||
|
* Check for conflict before waiting on a huge allocation.
|
||||||
|
* Conflict might be that a huge page has just been allocated
|
||||||
|
* and added to page cache by a racing thread, or that there
|
||||||
|
* is already at least one small page in the huge extent.
|
||||||
|
* Be careful to retry when appropriate, but not forever!
|
||||||
|
* Elsewhere -EEXIST would be the right code, but not here.
|
||||||
|
*/
|
||||||
if (!xa_find(&mapping->i_pages, &aligned_index,
|
if (!xa_find(&mapping->i_pages, &aligned_index,
|
||||||
aligned_index + pages - 1, XA_PRESENT))
|
aligned_index + pages - 1, XA_PRESENT))
|
||||||
break;
|
break;
|
||||||
@ -1731,7 +1741,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
|
|||||||
{
|
{
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
|
|
||||||
unsigned long suitable_orders = 0;
|
unsigned long suitable_orders = 0;
|
||||||
struct folio *folio = NULL;
|
struct folio *folio = NULL;
|
||||||
long pages;
|
long pages;
|
||||||
@ -1741,26 +1750,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
|
|||||||
orders = 0;
|
orders = 0;
|
||||||
|
|
||||||
if (orders > 0) {
|
if (orders > 0) {
|
||||||
if (vma && vma_is_anon_shmem(vma)) {
|
suitable_orders = shmem_suitable_orders(inode, vmf,
|
||||||
suitable_orders = shmem_suitable_orders(inode, vmf,
|
|
||||||
mapping, index, orders);
|
mapping, index, orders);
|
||||||
} else if (orders & BIT(HPAGE_PMD_ORDER)) {
|
|
||||||
pages = HPAGE_PMD_NR;
|
|
||||||
suitable_orders = BIT(HPAGE_PMD_ORDER);
|
|
||||||
index = round_down(index, HPAGE_PMD_NR);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check for conflict before waiting on a huge allocation.
|
|
||||||
* Conflict might be that a huge page has just been allocated
|
|
||||||
* and added to page cache by a racing thread, or that there
|
|
||||||
* is already at least one small page in the huge extent.
|
|
||||||
* Be careful to retry when appropriate, but not forever!
|
|
||||||
* Elsewhere -EEXIST would be the right code, but not here.
|
|
||||||
*/
|
|
||||||
if (xa_find(&mapping->i_pages, &index,
|
|
||||||
index + HPAGE_PMD_NR - 1, XA_PRESENT))
|
|
||||||
return ERR_PTR(-E2BIG);
|
|
||||||
}
|
|
||||||
|
|
||||||
order = highest_order(suitable_orders);
|
order = highest_order(suitable_orders);
|
||||||
while (suitable_orders) {
|
while (suitable_orders) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user