mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
a9ff696160
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
59 lines
1.4 KiB
C
59 lines
1.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/bug.h>
|
|
#include <linux/export.h>
|
|
#include <linux/types.h>
|
|
#include <linux/mmdebug.h>
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/sections.h>
|
|
#include <asm/page.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/dma.h>
|
|
|
|
#include "mm.h"
|
|
|
|
static inline bool __virt_addr_valid(unsigned long x)
|
|
{
|
|
/*
|
|
* high_memory does not get immediately defined, and there
|
|
* are early callers of __pa() against PAGE_OFFSET
|
|
*/
|
|
if (!high_memory && x >= PAGE_OFFSET)
|
|
return true;
|
|
|
|
if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
|
|
return true;
|
|
|
|
/*
|
|
* MAX_DMA_ADDRESS is a virtual address that may not correspond to an
|
|
* actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS)
|
|
* that we just need to work around it and always return true.
|
|
*/
|
|
if (x == MAX_DMA_ADDRESS)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
phys_addr_t __virt_to_phys(unsigned long x)
|
|
{
|
|
WARN(!__virt_addr_valid(x),
|
|
"virt_to_phys used for non-linear address: %pK (%pS)\n",
|
|
(void *)x, (void *)x);
|
|
|
|
return __virt_to_phys_nodebug(x);
|
|
}
|
|
EXPORT_SYMBOL(__virt_to_phys);
|
|
|
|
phys_addr_t __phys_addr_symbol(unsigned long x)
|
|
{
|
|
/* This is bounds checking against the kernel image only.
|
|
* __pa_symbol should only be used on kernel symbol addresses.
|
|
*/
|
|
VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START ||
|
|
x > (unsigned long)KERNEL_END);
|
|
|
|
return __pa_symbol_nodebug(x);
|
|
}
|
|
EXPORT_SYMBOL(__phys_addr_symbol);
|