mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
mm: Introduce flush_cache_vmap_early()
The pcpu setup when using the page allocator sets up a new vmalloc mapping very early in the boot process, so early that it cannot use the flush_cache_vmap() function which may depend on structures not yet initialized (for example in riscv, we currently send an IPI to flush other cpus TLB). But on some architectures, we must call flush_cache_vmap(): for example, in riscv, some uarchs can cache invalid TLB entries so we need to flush the new established mapping to avoid taking an exception. So fix this by introducing a new function flush_cache_vmap_early() which is called right after setting the new page table entry and before accessing this new mapping. This new function implements a local flush tlb on riscv and is no-op for other architectures (same as today). Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Dennis Zhou <dennis@kernel.org>
This commit is contained in:
parent
33cc938e65
commit
7a92fc8b4d
@ -40,6 +40,7 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
|
||||
|
||||
/* TBD: optimize this */
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
|
||||
|
@ -340,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!cache_is_vipt_nonaliasing())
|
||||
|
@ -43,6 +43,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
||||
*/
|
||||
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
||||
#define flush_cache_vmap(start, end) cache_wbinv_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) cache_wbinv_all()
|
||||
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
|
@ -41,6 +41,7 @@ void flush_icache_mm_range(struct mm_struct *mm,
|
||||
void flush_icache_deferred(struct mm_struct *mm);
|
||||
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
|
@ -191,6 +191,7 @@ extern void cache_push_v(unsigned long vaddr, int len);
|
||||
#define flush_cache_all() __flush_cache_all()
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
static inline void flush_cache_mm(struct mm_struct *mm)
|
||||
|
@ -97,6 +97,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
__flush_cache_vmap();
|
||||
}
|
||||
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
|
||||
extern void (*__flush_cache_vunmap)(void);
|
||||
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
|
@ -38,6 +38,7 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
#define flush_icache_pages flush_icache_pages
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
|
||||
|
||||
extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
@ -41,6 +41,7 @@ void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
@ -37,7 +37,8 @@ static inline void flush_dcache_page(struct page *page)
|
||||
flush_icache_mm(vma->vm_mm, 0)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
|
||||
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
|
||||
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
|
@ -41,6 +41,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
|
||||
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
||||
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
|
@ -66,6 +66,11 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
|
||||
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
|
||||
}
|
||||
|
||||
static void __ipi_flush_tlb_all(void *info)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
|
@ -90,6 +90,7 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
|
||||
unsigned long len);
|
||||
|
||||
#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
|
@ -48,6 +48,7 @@ static inline void flush_dcache_page(struct page *page)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
/* When a context switch happens we must flush all user windows so that
|
||||
|
@ -75,6 +75,7 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@ -116,8 +116,9 @@ void flush_cache_page(struct vm_area_struct*,
|
||||
#define flush_cache_mm(mm) flush_cache_all()
|
||||
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
||||
|
||||
#define flush_cache_vmap(start,end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start,end) flush_cache_all()
|
||||
#define flush_cache_vmap(start,end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start,end) do { } while (0)
|
||||
#define flush_cache_vunmap(start,end) flush_cache_all()
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
@ -140,6 +141,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
|
||||
#define flush_cache_vmap(start,end) do { } while (0)
|
||||
#define flush_cache_vmap_early(start,end) do { } while (0)
|
||||
#define flush_cache_vunmap(start,end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
|
@ -91,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef flush_cache_vmap_early
|
||||
static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef flush_cache_vunmap
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
|
@ -3333,13 +3333,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
|
||||
if (rc < 0)
|
||||
panic("failed to map percpu area, err=%d\n", rc);
|
||||
|
||||
/*
|
||||
* FIXME: Archs with virtual cache should flush local
|
||||
* cache for the linear mapping here - something
|
||||
* equivalent to flush_cache_vmap() on the local cpu.
|
||||
* flush_cache_vmap() can't be used as most supporting
|
||||
* data structures are not set up yet.
|
||||
*/
|
||||
flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
|
||||
|
||||
/* copy static data */
|
||||
memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
|
||||
|
Loading…
Reference in New Issue
Block a user