linux/arch/mips/include/asm/hugetlb.h
Oscar Salvador bd40b053fa mm: consolidate common checks in hugetlb_get_unmapped_area
prepare_hugepage_range() performs almost the same checks for all
architectures that define it, with the exception of mips and loongarch
that also check for overflows.

The rest checks for the addr and len to be properly aligned, so we can
move that to hugetlb_get_unmapped_area() and get rid of a fair amount of
duplicated code.

[akpm@linux-foundation.org: remove now-unused local]
 Link: https://lore.kernel.org/oe-kbuild-all/202410081210.uNLbf3Jk-lkp@intel.com/
Link: https://lkml.kernel.org/r/20241007075037.267650-10-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Donet Tom <donettom@linux.ibm.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-11-06 20:11:10 -08:00

85 lines
2.0 KiB
C

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
*/
#ifndef __ASM_HUGETLB_H
#define __ASM_HUGETLB_H
#include <asm/page.h>
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
return -EINVAL;
return 0;
}
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t clear;
pte_t pte = *ptep;
pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
return pte;
}
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr);
return pte;
}
#define __HAVE_ARCH_HUGE_PTE_NONE
static inline int huge_pte_none(pte_t pte)
{
unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
return !val || (val == (unsigned long)invalid_pte_table);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte,
int dirty)
{
int changed = !pte_same(*ptep, pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
/*
* There could be some standard sized pages in there,
* get them all.
*/
flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */