powerpc updates for 6.9 #2

- Handle errors in mark_rodata_ro() and mark_initmem_nx().
 
  - Make struct crash_mem available without CONFIG_CRASH_DUMP.
 
 Thanks to: Christophe Leroy, Hari Bathini.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmX+HgQTHG1wZUBlbGxl
 cm1hbi5pZC5hdQAKCRBR6+o8yOGlgEGiEACC18EO6YUDVCblbRQsJLgZUZWdskqF
 0TTgrqhemXS1m7yucqp4HwM5N9YV7d3nXtaBTd0nxwrHRCpIef3XDFUyiLpFWbDQ
 pPt9AyUhTkkdH/6JQlYTC9S0/l9xAYPo15As8ZUnTPUNJk2pV6NxSpjYqZuQcmFm
 W5ln2rYDtG57XDI0WMMo/CSZ50YRgWSpINavQLUxn6MxTda2ZrXF39HaKi0FS1u+
 64bTT3uwKHvVWcf4/+KTiY3EOUgbgNeedZ7PjVGOx6VpQvig96/qtbu8zCfqMX0v
 eTz+p6IDtLTeXSu8Ak5YN5wBBNA9EsnR0osk/T99Ru48EEErkTl4+G6bkXlgRu7A
 KRbbyI66JZHXauwaKIwuhOswoYtEDrCrOadYMlYGCqjUsDs9zmxTUWamcZBSow60
 5S/Oo5SgOES+5P/p889hwW8XodC9uzLltMw7M+R36nRDPh9nbwu93Y1Kx2LQiery
 sxnXu+Pg7DTLq4zH75WggwpOH6nPoAP0NYT3QXnbgz8CFArePE7h1OfMzy26Xj1X
 C7wh1npAksdPo/00t2VEO7V38vq8nm41JqQ5lbPsRtoUuwFVqMp7+Agmq+eK4J/B
 zVEroBLIlGYexV5OLu824wIAVhT/PrYm9ONaTi7O7DFZrOFsnNyrRw1fifr7afgu
 xECv0qJ+5I9uAw==
 =edty
 -----END PGP SIGNATURE-----

Merge tag 'powerpc-6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull more powerpc updates from Michael Ellerman:

 - Handle errors in mark_rodata_ro() and mark_initmem_nx()

 - Make struct crash_mem available without CONFIG_CRASH_DUMP

Thanks to Christophe Leroy and Hari Bathini.

* tag 'powerpc-6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/kdump: Split KEXEC_CORE and CRASH_DUMP dependency
  powerpc/kexec: split CONFIG_KEXEC_FILE and CONFIG_CRASH_DUMP
  kexec/kdump: make struct crash_mem available without CONFIG_CRASH_DUMP
  powerpc: Handle error in mark_rodata_ro() and mark_initmem_nx()
This commit is contained in:
Linus Torvalds 2024-03-23 09:21:26 -07:00
commit 484193fecd
16 changed files with 274 additions and 231 deletions

View File

@ -607,11 +607,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config ARCH_SUPPORTS_KEXEC
def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP)
config ARCH_SELECTS_KEXEC
def_bool y
depends on KEXEC
select CRASH_DUMP
config ARCH_SUPPORTS_KEXEC_FILE
def_bool PPC64
@ -622,7 +617,6 @@ config ARCH_SELECTS_KEXEC_FILE
def_bool y
depends on KEXEC_FILE
select KEXEC_ELF
select CRASH_DUMP
select HAVE_IMA_KEXEC if IMA
config PPC64_BIG_ENDIAN_ELF_ABI_V2
@ -694,8 +688,7 @@ config ARCH_SELECTS_CRASH_DUMP
config FA_DUMP
bool "Firmware-assisted dump"
depends on PPC64 && (PPC_RTAS || PPC_POWERNV)
select CRASH_DUMP
depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,

View File

@ -55,59 +55,18 @@
typedef void (*crash_shutdown_t)(void);
#ifdef CONFIG_KEXEC_CORE
/*
* This function is responsible for capturing register states if coming
* via panic or invoking dump using sysrq-trigger.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
ppc_save_regs(newregs);
}
struct kimage;
struct pt_regs;
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern void crash_ipi_callback(struct pt_regs *);
extern int crash_wake_offline;
struct kimage;
struct pt_regs;
extern void default_machine_kexec(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
}
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
void kexec_copy_flush(struct kimage *image);
#if defined(CONFIG_CRASH_DUMP)
bool is_kdump_kernel(void);
#define is_kdump_kernel is_kdump_kernel
#if defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#endif /* CONFIG_PPC_RTAS */
#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
@ -152,15 +111,56 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
#endif /* CONFIG_KEXEC_FILE */
#else /* !CONFIG_KEXEC_CORE */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
#endif /* CONFIG_KEXEC_CORE */
static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
#ifdef CONFIG_CRASH_RESERVE
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
#else
static inline void reserve_crashkernel(void) {}
static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
#endif
#if defined(CONFIG_CRASH_DUMP)
/*
* This function is responsible for capturing register states if coming
* via panic or invoking dump using sysrq-trigger.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
return 0;
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
ppc_save_regs(newregs);
}
static inline void reserve_crashkernel(void) { ; }
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern void crash_ipi_callback(struct pt_regs *regs);
extern int crash_wake_offline;
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
}
bool is_kdump_kernel(void);
#define is_kdump_kernel is_kdump_kernel
#if defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#endif /* CONFIG_PPC_RTAS */
#else /* !CONFIG_CRASH_DUMP */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int crash_shutdown_register(crash_shutdown_t handler)
{
@ -183,7 +183,7 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
}
#endif /* CONFIG_KEXEC_CORE */
#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kexec.h>

View File

@ -475,7 +475,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
tce_alloc_end = *lprop;
#endif
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_RESERVE
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;

View File

@ -110,7 +110,7 @@ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
#ifdef CONFIG_VMCORE_INFO
#ifdef CONFIG_CRASH_DUMP
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
#endif

View File

@ -588,7 +588,7 @@ void smp_send_debugger_break(void)
}
#endif
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
int cpu;
@ -631,7 +631,7 @@ void crash_smp_send_stop(void)
stopped = true;
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
if (kexec_crash_image) {
crash_kexec_prepare();
return;

View File

@ -3,12 +3,13 @@
# Makefile for the linux kernel.
#
obj-y += core.o crash.o core_$(BITS).o
obj-y += core.o core_$(BITS).o
obj-$(CONFIG_PPC32) += relocate_32.o
obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_CRASH_DUMP) += crash.o
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_core_$(BITS).o := n

View File

@ -44,10 +44,12 @@ void machine_kexec_mask_interrupts(void) {
}
}
#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{
default_machine_crash_shutdown(regs);
}
#endif
void machine_kexec_cleanup(struct kimage *image)
{
@ -77,6 +79,7 @@ void machine_kexec(struct kimage *image)
for(;;);
}
#ifdef CONFIG_CRASH_RESERVE
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base, total_mem_sz;
@ -251,3 +254,4 @@ static int __init kexec_setup(void)
return 0;
}
late_initcall(kexec_setup);
#endif /* CONFIG_CRASH_RESERVE */

View File

@ -47,7 +47,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
if (ret)
return ERR_PTR(ret);
if (image->type == KEXEC_TYPE_CRASH) {
if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) {
/* min & max buffer values for kdump case */
kbuf.buf_min = pbuf.buf_min = crashk_res.start;
kbuf.buf_max = pbuf.buf_max =
@ -70,7 +70,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* Load additional segments needed for panic kernel */
if (image->type == KEXEC_TYPE_CRASH) {
if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) {
ret = load_crashdump_segments_ppc64(image, &kbuf);
if (ret) {
pr_err("Failed to load kdump kernel segments\n");

View File

@ -96,119 +96,6 @@ static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
return ret;
}
/**
* get_usable_memory_ranges - Get usable memory ranges. This list includes
* regions like crashkernel, opal/rtas & tce-table,
* that kdump kernel could use.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
{
int ret;
/*
* Early boot failure observed on guests when low memory (first memory
* block?) is not added to usable memory. So, add [0, crashk_res.end]
* instead of [crashk_res.start, crashk_res.end] to workaround it.
* Also, crashed kernel's memory must be added to reserve map to
* avoid kdump kernel from using it.
*/
ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
if (ret)
goto out;
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_tce_mem_ranges(mem_ranges);
out:
if (ret)
pr_err("Failed to setup usable memory ranges\n");
return ret;
}
/**
* get_crash_memory_ranges - Get crash memory ranges. This list includes
* first/crashing kernel's memory regions that
* would be exported via an elfcore.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
{
phys_addr_t base, end;
struct crash_mem *tmem;
u64 i;
int ret;
for_each_mem_range(i, &base, &end) {
u64 size = end - base;
/* Skip backup memory region, which needs a separate entry */
if (base == BACKUP_SRC_START) {
if (size > BACKUP_SRC_SIZE) {
base = BACKUP_SRC_END + 1;
size -= BACKUP_SRC_SIZE;
} else
continue;
}
ret = add_mem_range(mem_ranges, base, size);
if (ret)
goto out;
/* Try merging adjacent ranges before reallocation attempt */
if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
sort_memory_ranges(*mem_ranges, true);
}
/* Reallocate memory ranges if there is no space to split ranges */
tmem = *mem_ranges;
if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
tmem = realloc_mem_ranges(mem_ranges);
if (!tmem)
goto out;
}
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
if (ret)
goto out;
/*
* FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
* regions are exported to save their context at the time of
* crash, they should actually be backed up just like the
* first 64K bytes of memory.
*/
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
/* create a separate program header for the backup region */
ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
if (ret)
goto out;
sort_memory_ranges(*mem_ranges, false);
out:
if (ret)
pr_err("Failed to setup crash memory ranges\n");
return ret;
}
/**
* get_reserved_memory_ranges - Get reserve memory ranges. This list includes
* memory regions that should be added to the
@ -434,6 +321,120 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
return ret;
}
#ifdef CONFIG_CRASH_DUMP
/**
* get_usable_memory_ranges - Get usable memory ranges. This list includes
* regions like crashkernel, opal/rtas & tce-table,
* that kdump kernel could use.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
{
int ret;
/*
* Early boot failure observed on guests when low memory (first memory
* block?) is not added to usable memory. So, add [0, crashk_res.end]
* instead of [crashk_res.start, crashk_res.end] to workaround it.
* Also, crashed kernel's memory must be added to reserve map to
* avoid kdump kernel from using it.
*/
ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
if (ret)
goto out;
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_tce_mem_ranges(mem_ranges);
out:
if (ret)
pr_err("Failed to setup usable memory ranges\n");
return ret;
}
/**
* get_crash_memory_ranges - Get crash memory ranges. This list includes
* first/crashing kernel's memory regions that
* would be exported via an elfcore.
* @mem_ranges: Range list to add the memory ranges to.
*
* Returns 0 on success, negative errno on error.
*/
static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
{
phys_addr_t base, end;
struct crash_mem *tmem;
u64 i;
int ret;
for_each_mem_range(i, &base, &end) {
u64 size = end - base;
/* Skip backup memory region, which needs a separate entry */
if (base == BACKUP_SRC_START) {
if (size > BACKUP_SRC_SIZE) {
base = BACKUP_SRC_END + 1;
size -= BACKUP_SRC_SIZE;
} else
continue;
}
ret = add_mem_range(mem_ranges, base, size);
if (ret)
goto out;
/* Try merging adjacent ranges before reallocation attempt */
if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
sort_memory_ranges(*mem_ranges, true);
}
/* Reallocate memory ranges if there is no space to split ranges */
tmem = *mem_ranges;
if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
tmem = realloc_mem_ranges(mem_ranges);
if (!tmem)
goto out;
}
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
if (ret)
goto out;
/*
* FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
* regions are exported to save their context at the time of
* crash, they should actually be backed up just like the
* first 64K bytes of memory.
*/
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
ret = add_opal_mem_range(mem_ranges);
if (ret)
goto out;
/* create a separate program header for the backup region */
ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
if (ret)
goto out;
sort_memory_ranges(*mem_ranges, false);
out:
if (ret)
pr_err("Failed to setup crash memory ranges\n");
return ret;
}
/**
* check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
* @um_info: Usable memory buffer and ranges info.
@ -863,6 +864,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
return 0;
}
#endif
/**
* setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
@ -972,26 +974,14 @@ static unsigned int cpu_node_size(void)
return size;
}
/**
* kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
* setup FDT for kexec/kdump kernel.
* @image: kexec image being loaded.
*
* Returns the estimated extra size needed for kexec/kdump kernel FDT.
*/
unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
{
unsigned int cpu_nodes, extra_size = 0;
struct device_node *dn;
u64 usm_entries;
// Budget some space for the password blob. There's already extra space
// for the key name
if (plpks_is_available())
extra_size += (unsigned int)plpks_get_passwordlen();
if (image->type != KEXEC_TYPE_CRASH)
return extra_size;
if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
return 0;
/*
* For kdump kernel, account for linux,usable-memory and
@ -1019,6 +1009,25 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
return extra_size;
}
/**
* kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
* setup FDT for kexec/kdump kernel.
* @image: kexec image being loaded.
*
* Returns the estimated extra size needed for kexec/kdump kernel FDT.
*/
unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
{
unsigned int extra_size = 0;
// Budget some space for the password blob. There's already extra space
// for the key name
if (plpks_is_available())
extra_size += (unsigned int)plpks_get_passwordlen();
return extra_size + kdump_extra_fdt_size_ppc64(image);
}
/**
* add_node_props - Reads node properties from device node structure and add
* them to fdt.
@ -1171,6 +1180,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
struct crash_mem *umem = NULL, *rmem = NULL;
int i, nr_ranges, ret;
#ifdef CONFIG_CRASH_DUMP
/*
* Restrict memory usage for kdump kernel by setting up
* usable memory ranges and memory reserve map.
@ -1207,6 +1217,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
goto out;
}
}
#endif
/* Update cpus nodes information to account hotplug CPUs. */
ret = update_cpus_node(fdt);
@ -1278,7 +1289,7 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
buf_min = kbuf->buf_min;
buf_max = kbuf->buf_max;
/* Segments for kdump kernel should be within crashkernel region */
if (kbuf->image->type == KEXEC_TYPE_CRASH) {
if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
buf_min = (buf_min < crashk_res.start ?
crashk_res.start : buf_min);
buf_max = (buf_max > crashk_res.end ?

View File

@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr)
return true;
}
void mmu_mark_initmem_nx(void)
int mmu_mark_initmem_nx(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void)
mtsr(mfsr(i << 28) | 0x10000000, i << 28);
}
return 0;
}
void mmu_mark_rodata_ro(void)
int mmu_mark_rodata_ro(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void)
}
update_bats();
return 0;
}
/*

View File

@ -160,11 +160,11 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500)
void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void);
int mmu_mark_initmem_nx(void);
int mmu_mark_rodata_ro(void);
#else
static inline void mmu_mark_initmem_nx(void) { }
static inline void mmu_mark_rodata_ro(void) { }
static inline int mmu_mark_initmem_nx(void) { return 0; }
static inline int mmu_mark_rodata_ro(void) { return 0; }
#endif
#ifdef CONFIG_PPC_8xx

View File

@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void)
PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
pgprot_t prot, bool new)
static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
pgprot_t prot, bool new)
{
unsigned long v = PAGE_OFFSET + offset;
unsigned long p = offset;
int err = 0;
WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K)
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M)
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K)
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
if (!new)
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
return err;
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return top;
}
void mmu_mark_initmem_nx(void)
int mmu_mark_initmem_nx(void)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
int err = 0;
if (!debug_pagealloc_enabled_or_kfence())
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
mmu_pin_tlb(block_mapped_ram, false);
return err;
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
int mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);
int err;
mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
mmu_pin_tlb(block_mapped_ram, true);
return err;
}
#endif

View File

@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void)
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
int mmu_mark_rodata_ro(void)
{
unsigned long remapped;
remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
WARN_ON(__max_low_memory != remapped);
if (WARN_ON(__max_low_memory != remapped))
return -EINVAL;
return 0;
}
#endif
void mmu_mark_initmem_nx(void)
int mmu_mark_initmem_nx(void)
{
/* Everything is done in mmu_mark_rodata_ro() */
return 0;
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,

View File

@ -130,31 +130,41 @@ void __init mapin_ram(void)
}
}
void mark_initmem_nx(void)
static int __mark_initmem_nx(void)
{
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
int err;
mmu_mark_initmem_nx();
err = mmu_mark_initmem_nx();
if (!v_block_mapped((unsigned long)_sinittext)) {
set_memory_nx((unsigned long)_sinittext, numpages);
set_memory_rw((unsigned long)_sinittext, numpages);
err = set_memory_nx((unsigned long)_sinittext, numpages);
if (err)
return err;
err = set_memory_rw((unsigned long)_sinittext, numpages);
}
return err;
}
void mark_initmem_nx(void)
{
int err = __mark_initmem_nx();
if (err)
panic("%s() failed, err = %d\n", __func__, err);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
static int __mark_rodata_ro(void)
{
unsigned long numpages;
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
if (v_block_mapped((unsigned long)_stext + 1)) {
mmu_mark_rodata_ro();
return;
}
if (v_block_mapped((unsigned long)_stext + 1))
return mmu_mark_rodata_ro();
/*
* mark text and rodata as read only. __end_rodata is set by
@ -164,6 +174,14 @@ void mark_rodata_ro(void)
numpages = PFN_UP((unsigned long)__end_rodata) -
PFN_DOWN((unsigned long)_stext);
set_memory_ro((unsigned long)_stext, numpages);
return set_memory_ro((unsigned long)_stext, numpages);
}
void mark_rodata_ro(void)
{
int err = __mark_rodata_ro();
if (err)
panic("%s() failed, err = %d\n", __func__, err);
}
#endif

View File

@ -434,7 +434,7 @@ void __init pnv_smp_init(void)
smp_ops = &pnv_smp_ops;
#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
crash_wake_offline = 1;
#endif
#endif

View File

@ -8,6 +8,12 @@
struct kimage;
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
struct range ranges[] __counted_by(max_nr_ranges);
};
#ifdef CONFIG_CRASH_DUMP
int crash_shrink_memory(unsigned long new_size);
@ -51,12 +57,6 @@ static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
struct range ranges[] __counted_by(max_nr_ranges);
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart,
unsigned long long mend);