mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 10:17:32 +00:00
mm: rationalise flush_icache_pages() and flush_icache_page()
Move the default (no-op) implementation of flush_icache_pages() to <linux/cacheflush.h> from <asm-generic/cacheflush.h>. Remove the flush_icache_page() wrapper from each architecture into <linux/cacheflush.h>. Link: https://lkml.kernel.org/r/20230802151406.3735276-32-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
29269ad90b
commit
203b7b6aad
@ -53,10 +53,6 @@ extern void flush_icache_user_page(struct vm_area_struct *vma,
|
||||
#define flush_icache_user_page flush_icache_user_page
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* This is used only in __do_fault and do_swap_page. */
|
||||
#define flush_icache_page(vma, page) \
|
||||
flush_icache_user_page((vma), (page), 0, 0)
|
||||
|
||||
/*
|
||||
* Both implementations of flush_icache_user_page flush the entire
|
||||
* address space, so one call, no matter how many pages.
|
||||
@ -66,6 +62,7 @@ static inline void flush_icache_pages(struct vm_area_struct *vma,
|
||||
{
|
||||
flush_icache_user_page(vma, page, 0, 0);
|
||||
}
|
||||
#define flush_icache_pages flush_icache_pages
|
||||
|
||||
#include <asm-generic/cacheflush.h>
|
||||
|
||||
|
@ -18,15 +18,6 @@
|
||||
#include <linux/mm.h>
|
||||
#include <asm/shmparam.h>
|
||||
|
||||
/*
|
||||
* Semantically we need this because icache doesn't snoop dcache/dma.
|
||||
* However ARC Cache flush requires paddr as well as vaddr, latter not available
|
||||
* in the flush_icache_page() API. So we no-op it but do the equivalent work
|
||||
* in update_mmu_cache()
|
||||
*/
|
||||
#define flush_icache_page(vma, page)
|
||||
#define flush_icache_pages(vma, page, nr)
|
||||
|
||||
void flush_cache_all(void);
|
||||
|
||||
void flush_icache_range(unsigned long kstart, unsigned long kend);
|
||||
|
@ -321,13 +321,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
|
||||
/*
|
||||
* We don't appear to need to do anything here. In fact, if we did, we'd
|
||||
* duplicate cache flushing elsewhere performed by flush_dcache_page().
|
||||
*/
|
||||
#define flush_icache_page(vma,page) do { } while (0)
|
||||
#define flush_icache_pages(vma, page, nr) do { } while (0)
|
||||
|
||||
/*
|
||||
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
|
||||
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
|
||||
|
@ -45,7 +45,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
|
||||
#define flush_cache_vmap(start, end) cache_wbinv_all()
|
||||
#define flush_cache_vunmap(start, end) cache_wbinv_all()
|
||||
|
||||
#define flush_icache_page(vma, page) do {} while (0);
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
|
||||
#define flush_icache_deferred(mm) do {} while (0);
|
||||
|
@ -33,7 +33,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) do { } while (0)
|
||||
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
* - flush_cache_range(vma, start, end) flushes a range of pages
|
||||
* - flush_icache_range(start, end) flush a range of instructions
|
||||
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
|
||||
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
|
||||
* - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache
|
||||
*
|
||||
* Need to doublecheck which one is really needed for ptrace stuff to work.
|
||||
*/
|
||||
|
@ -46,8 +46,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma, page) do { } while (0)
|
||||
#define flush_icache_pages(vma, page) do { } while (0)
|
||||
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
|
@ -261,7 +261,6 @@ static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_pages(vma, page, nr) \
|
||||
__flush_pages_to_ram(page_address(page), nr)
|
||||
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
|
||||
|
||||
extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long addr, int len);
|
||||
|
@ -82,12 +82,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
||||
__flush_anon_page(page, vmaddr);
|
||||
}
|
||||
|
||||
static inline void flush_icache_pages(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned int nr)
|
||||
{
|
||||
}
|
||||
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
|
||||
|
||||
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
|
||||
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
|
||||
extern void (*__flush_icache_user_range)(unsigned long start,
|
||||
|
@ -35,7 +35,7 @@ void flush_dcache_folio(struct folio *folio);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned int nr);
|
||||
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1);
|
||||
#define flush_icache_pages flush_icache_pages
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
|
||||
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
|
||||
|
@ -60,7 +60,7 @@ static inline void flush_dcache_page(struct page *page)
|
||||
|
||||
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned int nr);
|
||||
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
|
||||
#define flush_icache_pages flush_icache_pages
|
||||
|
||||
#define flush_icache_range(s,e) do { \
|
||||
flush_kernel_dcache_range_asm(s,e); \
|
||||
|
@ -53,7 +53,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#define flush_icache_user_range flush_icache_range
|
||||
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned int nr);
|
||||
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
|
||||
#define flush_icache_pages flush_icache_pages
|
||||
extern void flush_cache_sigtramp(unsigned long address);
|
||||
|
||||
struct flusher_data {
|
||||
|
@ -16,8 +16,6 @@
|
||||
#define flush_cache_page(vma,addr,pfn) \
|
||||
sparc32_cachetlb_ops->cache_page(vma, addr)
|
||||
#define flush_icache_range(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma, pg) do { } while (0)
|
||||
#define flush_icache_pages(vma, pg, nr) do { } while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
|
@ -53,9 +53,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
flush_dcache_folio(page_folio(page));
|
||||
}
|
||||
|
||||
#define flush_icache_page(vma, pg) do { } while(0)
|
||||
#define flush_icache_pages(vma, pg, nr) do { } while(0)
|
||||
|
||||
void flush_ptrace_access(struct vm_area_struct *, struct page *,
|
||||
unsigned long uaddr, void *kaddr,
|
||||
unsigned long len, int write);
|
||||
|
@ -160,10 +160,6 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
||||
__invalidate_icache_range(start,(end) - (start)); \
|
||||
} while (0)
|
||||
|
||||
/* This is not required, see Documentation/core-api/cachetlb.rst */
|
||||
#define flush_icache_page(vma,page) do { } while (0)
|
||||
#define flush_icache_pages(vma, page, nr) do { } while (0)
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
|
@ -77,18 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
|
||||
#define flush_icache_user_range flush_icache_range
|
||||
#endif
|
||||
|
||||
#ifndef flush_icache_page
|
||||
static inline void flush_icache_pages(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned int nr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_icache_page(struct vm_area_struct *vma,
|
||||
struct page *page)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef flush_icache_user_page
|
||||
static inline void flush_icache_user_page(struct vm_area_struct *vma,
|
||||
struct page *page,
|
||||
|
@ -17,4 +17,13 @@ static inline void flush_dcache_folio(struct folio *folio)
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
|
||||
|
||||
#ifndef flush_icache_pages
|
||||
static inline void flush_icache_pages(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned int nr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
|
||||
|
||||
#endif /* _LINUX_CACHEFLUSH_H */
|
||||
|
Loading…
x
Reference in New Issue
Block a user