mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-18 03:06:43 +00:00
Merge patch series "svnapot fixes"
Alexandre Ghiti <alexghiti@rivosinc.com> says: While merging riscv napot and arm64 contpte support, I noticed we did not abide by the specification which states that we should clear a napot mapping before setting a new one, called "break before make" in arm64 (patch 1). And also that we did not add the new hugetlb page size added by napot in hugetlb_mask_last_page() (patch 2). * b4-shazam-merge: riscv: Fix hugetlb_mask_last_page() when NAPOT is enabled riscv: Fix set_huge_pte_at() for NAPOT mapping Link: https://lore.kernel.org/r/20240117195741.1926459-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
commit
168b849728
@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
return pte;
|
||||
}
|
||||
|
||||
unsigned long hugetlb_mask_last_page(struct hstate *h)
|
||||
{
|
||||
unsigned long hp_size = huge_page_size(h);
|
||||
|
||||
switch (hp_size) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
return P4D_SIZE - PUD_SIZE;
|
||||
#endif
|
||||
case PMD_SIZE:
|
||||
return PUD_SIZE - PMD_SIZE;
|
||||
case napot_cont_size(NAPOT_CONT64KB_ORDER):
|
||||
return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0UL;
|
||||
}
|
||||
|
||||
static pte_t get_clear_contig(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep,
|
||||
@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void clear_flush(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep,
|
||||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
|
||||
unsigned long i, saddr = addr;
|
||||
|
||||
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
|
||||
ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
flush_tlb_range(&vma, saddr, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* When dealing with NAPOT mappings, the privileged specification indicates that
|
||||
* "if an update needs to be made, the OS generally should first mark all of the
|
||||
* PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
|
||||
* within the range, [...] then update the PTE(s), as described in Section
|
||||
* 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
|
||||
* arm64.
|
||||
*/
|
||||
void set_huge_pte_at(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep,
|
||||
pte_t pte,
|
||||
unsigned long sz)
|
||||
{
|
||||
unsigned long hugepage_shift;
|
||||
unsigned long hugepage_shift, pgsize;
|
||||
int i, pte_num;
|
||||
|
||||
if (sz >= PGDIR_SIZE)
|
||||
@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
|
||||
hugepage_shift = PAGE_SHIFT;
|
||||
|
||||
pte_num = sz >> hugepage_shift;
|
||||
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
|
||||
pgsize = 1 << hugepage_shift;
|
||||
|
||||
if (!pte_present(pte)) {
|
||||
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
|
||||
set_ptes(mm, addr, ptep, pte, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!pte_napot(pte)) {
|
||||
set_ptes(mm, addr, ptep, pte, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
clear_flush(mm, addr, ptep, pgsize, pte_num);
|
||||
|
||||
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
|
||||
set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user