mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
a9ff696160
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
106 lines
2.8 KiB
C
106 lines
2.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Hibernation support specific for ARM
|
|
*
|
|
* Derived from work on ARM hibernation support by:
|
|
*
|
|
* Ubuntu project, hibernation support for mach-dove
|
|
* Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
|
|
* Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
|
|
* https://lkml.org/lkml/2010/6/18/4
|
|
* https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
|
|
* https://patchwork.kernel.org/patch/96442/
|
|
*
|
|
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/suspend.h>
|
|
#include <asm/system_misc.h>
|
|
#include <asm/idmap.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
#include "reboot.h"
|
|
|
|
int pfn_is_nosave(unsigned long pfn)
|
|
{
|
|
unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
|
|
unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
|
|
|
|
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
|
|
}
|
|
|
|
void notrace save_processor_state(void)
|
|
{
|
|
WARN_ON(num_online_cpus() != 1);
|
|
local_fiq_disable();
|
|
}
|
|
|
|
void notrace restore_processor_state(void)
|
|
{
|
|
local_fiq_enable();
|
|
}
|
|
|
|
/*
|
|
* Snapshot kernel memory and reset the system.
|
|
*
|
|
* swsusp_save() is executed in the suspend finisher so that the CPU
|
|
* context pointer and memory are part of the saved image, which is
|
|
* required by the resume kernel image to restart execution from
|
|
* swsusp_arch_suspend().
|
|
*
|
|
* soft_restart is not technically needed, but is used to get success
|
|
* returned from cpu_suspend.
|
|
*
|
|
* When soft reboot completes, the hibernation snapshot is written out.
|
|
*/
|
|
static int notrace arch_save_image(unsigned long unused)
|
|
{
|
|
int ret;
|
|
|
|
ret = swsusp_save();
|
|
if (ret == 0)
|
|
_soft_restart(virt_to_idmap(cpu_resume), false);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Save the current CPU state before suspend / poweroff.
|
|
*/
|
|
int notrace swsusp_arch_suspend(void)
|
|
{
|
|
return cpu_suspend(0, arch_save_image);
|
|
}
|
|
|
|
/*
|
|
* Restore page contents for physical pages that were in use during loading
|
|
* hibernation image. Switch to idmap_pgd so the physical page tables
|
|
* are overwritten with the same contents.
|
|
*/
|
|
static void notrace arch_restore_image(void *unused)
|
|
{
|
|
struct pbe *pbe;
|
|
|
|
cpu_switch_mm(idmap_pgd, &init_mm);
|
|
for (pbe = restore_pblist; pbe; pbe = pbe->next)
|
|
copy_page(pbe->orig_address, pbe->address);
|
|
|
|
_soft_restart(virt_to_idmap(cpu_resume), false);
|
|
}
|
|
|
|
static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
|
|
|
|
/*
|
|
* Resume from the hibernation image.
|
|
* Due to the kernel heap / data restore, stack contents change underneath
|
|
* and that would make function calls impossible; switch to a temporary
|
|
* stack within the nosave region to avoid that problem.
|
|
*/
|
|
int swsusp_arch_resume(void)
|
|
{
|
|
call_with_stack(arch_restore_image, 0,
|
|
resume_stack + ARRAY_SIZE(resume_stack));
|
|
return 0;
|
|
}
|