mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
ARM development updates for 6.1-rc1
- Print an un-hashed userspace PC on undefined instruction exception - Disable FDPIC ABI - Remove redundant vfp_flush/release_thread functions - Use raw_cpu_* rather than this_cpu_* in handle_bad_stack() - Avoid needlessly long backtraces when show_regs() is called - Fix an issue with stack traces through call_with_stack() - Avoid stack traces saving a duplicate exception PC value - Pass a void pointer to virt_to_page() in DMA mapping code - Fix kasan maps for modules when CONFIG_KASAN_VMALLOC=n - Show FDT region and page table level names in kernel page tables dump -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmM8BzcACgkQ9OeQG+St rGQVJA//REETiTYENYY+u0T14HdLeYBbfSu9pmhc8b12rsnPlgym/bKVwV9jC+pk /FMNfArFwxHCVKooVvYwgyzpAXE4zLWbtZj8goH94Ce2JBrsgUCzaizgRCvna1f8 jSRg/krUnK0ZRK0VSuiHTsgjGToP5F8zGgJu2WaeN6qGnzRViTVA6FglJav4WkK0 302lKmHOhgUk7hvBf18b4MSl0ouFPQ37iDB7enzsIoBWtrhmVfX4+4bPjP6Bz8x1 54xCw2FD6RsIUTfqgNUZt3S3PO4Khs8m97sFnoPIZWt9LY7OxA8oqnPBEN4Wbm1a vCoJjKspDTmDtdUIdUfFF4uKMEYlYiwsjS3trUxdNlqA7G81kSuy5QPoPfFue9nF Q3tg8mjyGnoH6iavajVqLUYk7Kvwv5CA6j3EU0j2BxPwUwzumaAxsTmh3/0niCas iuM838l5hKOHdyOPb25pUz4juroE9bKNZbeDitsJzZoT+Xh9C2dsGi4+ZHKVgAmi I3HdIdXLIJl/1HH8vlM2m2w8Bcs8qrtkSy9g8kCuWHVkiakf5fWEnUhNbw4e2Hxt onRNaCW7Wok3tWMF5p4KJVjXDhi/QrwztDftVhhmFmZCgK6LypS2MMp0uPULHlWO SEbXKdu0iKb62K0nmt68IP+3gainL6xurFiOSVlkQRmWSE1NJb4= =chYJ -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm Pull ARM updates from Russell King: - Print an un-hashed userspace PC on undefined instruction exception - Disable FDPIC ABI - Remove redundant vfp_flush/release_thread functions - Use raw_cpu_* rather than this_cpu_* in handle_bad_stack() - Avoid needlessly long backtraces when show_regs() is called - Fix an issue with stack traces through call_with_stack() - Avoid stack traces saving a duplicate exception PC value - Pass a void pointer to virt_to_page() in DMA mapping code - Fix kasan maps for modules when CONFIG_KASAN_VMALLOC=n - Show FDT region and page table level names in kernel page tables dump * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 9246/1: dump: show page table level name ARM: 9245/1: dump: show FDT region ARM: 9242/1: kasan: Only map modules if CONFIG_KASAN_VMALLOC=n ARM: 9240/1: dma-mapping: Pass (void *) to virt_to_page() ARM: 9234/1: stacktrace: Avoid duplicate saving of exception PC value ARM: 9233/1: stacktrace: Skip frame pointer boundary check for call_with_stack() ARM: 9224/1: Dump the stack traces based on the parameter 'regs' of show_regs() ARM: 9232/1: Replace this_cpu_* with raw_cpu_* in handle_bad_stack() ARM: 9228/1: vfp: kill vfp_flush/release_thread() ARM: 9226/1: disable FDPIC ABI ARM: 9221/1: traps: print un-hashed user pc on undefined instruction
This commit is contained in:
commit
7782aae498
@ -22,6 +22,9 @@ GZFLAGS :=-9
|
||||
# Never generate .eh_frame
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
|
||||
|
||||
# Disable FDPIC ABI
|
||||
KBUILD_CFLAGS += $(call cc-option,-mno-fdpic)
|
||||
|
||||
# This should work on most of the modern platforms
|
||||
KBUILD_DEFCONFIG := multi_v7_defconfig
|
||||
|
||||
|
@ -46,9 +46,6 @@ union vfp_state {
|
||||
struct vfp_hard_struct hard;
|
||||
};
|
||||
|
||||
extern void vfp_flush_thread(union vfp_state *);
|
||||
extern void vfp_release_thread(union vfp_state *);
|
||||
|
||||
#define FP_HARD_SIZE 35
|
||||
|
||||
struct fp_hard_struct {
|
||||
|
@ -21,6 +21,9 @@ struct stackframe {
|
||||
struct llist_node *kr_cur;
|
||||
struct task_struct *tsk;
|
||||
#endif
|
||||
#ifdef CONFIG_UNWINDER_FRAME_POINTER
|
||||
bool ex_frame;
|
||||
#endif
|
||||
};
|
||||
|
||||
static __always_inline
|
||||
@ -34,6 +37,9 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
|
||||
frame->kr_cur = NULL;
|
||||
frame->tsk = current;
|
||||
#endif
|
||||
#ifdef CONFIG_UNWINDER_FRAME_POINTER
|
||||
frame->ex_frame = in_entry_text(frame->pc);
|
||||
#endif
|
||||
}
|
||||
|
||||
extern int unwind_frame(struct stackframe *frame);
|
||||
@ -41,5 +47,7 @@ extern void walk_stackframe(struct stackframe *frame,
|
||||
int (*fn)(struct stackframe *, void *), void *data);
|
||||
extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
unsigned long top);
|
||||
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl);
|
||||
|
||||
#endif /* __ASM_STACKTRACE_H */
|
||||
|
@ -201,7 +201,7 @@ void __show_regs(struct pt_regs *regs)
|
||||
void show_regs(struct pt_regs * regs)
|
||||
{
|
||||
__show_regs(regs);
|
||||
dump_stack();
|
||||
dump_backtrace(regs, NULL, KERN_DEFAULT);
|
||||
}
|
||||
|
||||
ATOMIC_NOTIFIER_HEAD(thread_notify_head);
|
||||
|
@ -47,6 +47,7 @@ void *return_address(unsigned int level)
|
||||
frame.kr_cur = NULL;
|
||||
frame.tsk = current;
|
||||
#endif
|
||||
frame.ex_frame = false;
|
||||
|
||||
walk_stackframe(&frame, save_return_addr, &data);
|
||||
|
||||
|
@ -9,6 +9,8 @@
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#include "reboot.h"
|
||||
|
||||
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
|
||||
/*
|
||||
* Unwind the current stack frame and store the new register values in the
|
||||
@ -39,29 +41,74 @@
|
||||
* Note that with framepointer enabled, even the leaf functions have the same
|
||||
* prologue and epilogue, therefore we can ignore the LR value in this case.
|
||||
*/
|
||||
int notrace unwind_frame(struct stackframe *frame)
|
||||
|
||||
extern unsigned long call_with_stack_end;
|
||||
|
||||
static int frame_pointer_check(struct stackframe *frame)
|
||||
{
|
||||
unsigned long high, low;
|
||||
unsigned long fp = frame->fp;
|
||||
unsigned long pc = frame->pc;
|
||||
|
||||
/*
|
||||
* call_with_stack() is the only place we allow SP to jump from one
|
||||
* stack to another, with FP and SP pointing to different stacks,
|
||||
* skipping the FP boundary check at this point.
|
||||
*/
|
||||
if (pc >= (unsigned long)&call_with_stack &&
|
||||
pc < (unsigned long)&call_with_stack_end)
|
||||
return 0;
|
||||
|
||||
/* only go to a higher address on the stack */
|
||||
low = frame->sp;
|
||||
high = ALIGN(low, THREAD_SIZE);
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
/* check current frame pointer is within bounds */
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
if (fp < low + 4 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
#else
|
||||
if (fp < low + 12 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int notrace unwind_frame(struct stackframe *frame)
|
||||
{
|
||||
unsigned long fp = frame->fp;
|
||||
|
||||
if (frame_pointer_check(frame))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* When we unwind through an exception stack, include the saved PC
|
||||
* value into the stack trace.
|
||||
*/
|
||||
if (frame->ex_frame) {
|
||||
struct pt_regs *regs = (struct pt_regs *)frame->sp;
|
||||
|
||||
/*
|
||||
* We check that 'regs + sizeof(struct pt_regs)' (that is,
|
||||
* ®s[1]) does not exceed the bottom of the stack to avoid
|
||||
* accessing data outside the task's stack. This may happen
|
||||
* when frame->ex_frame is a false positive.
|
||||
*/
|
||||
if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
frame->pc = regs->ARM_pc;
|
||||
frame->ex_frame = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* restore the registers from the stack frame */
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
frame->sp = frame->fp;
|
||||
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
|
||||
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
|
||||
#else
|
||||
/* check current frame pointer is within bounds */
|
||||
if (fp < low + 12 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
|
||||
/* restore the registers from the stack frame */
|
||||
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
|
||||
frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
|
||||
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
|
||||
@ -72,6 +119,9 @@ int notrace unwind_frame(struct stackframe *frame)
|
||||
(void *)frame->fp, &frame->kr_cur);
|
||||
#endif
|
||||
|
||||
if (in_entry_text(frame->pc))
|
||||
frame->ex_frame = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@ -102,7 +152,6 @@ static int save_trace(struct stackframe *frame, void *d)
|
||||
{
|
||||
struct stack_trace_data *data = d;
|
||||
struct stack_trace *trace = data->trace;
|
||||
struct pt_regs *regs;
|
||||
unsigned long addr = frame->pc;
|
||||
|
||||
if (data->no_sched_functions && in_sched_functions(addr))
|
||||
@ -113,19 +162,6 @@ static int save_trace(struct stackframe *frame, void *d)
|
||||
}
|
||||
|
||||
trace->entries[trace->nr_entries++] = addr;
|
||||
|
||||
if (trace->nr_entries >= trace->max_entries)
|
||||
return 1;
|
||||
|
||||
if (!in_entry_text(frame->pc))
|
||||
return 0;
|
||||
|
||||
regs = (struct pt_regs *)frame->sp;
|
||||
if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE))
|
||||
return 0;
|
||||
|
||||
trace->entries[trace->nr_entries++] = regs->ARM_pc;
|
||||
|
||||
return trace->nr_entries >= trace->max_entries;
|
||||
}
|
||||
|
||||
@ -167,6 +203,9 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
|
||||
frame.kr_cur = NULL;
|
||||
frame.tsk = tsk;
|
||||
#endif
|
||||
#ifdef CONFIG_UNWINDER_FRAME_POINTER
|
||||
frame.ex_frame = false;
|
||||
#endif
|
||||
|
||||
walk_stackframe(&frame, save_trace, &data);
|
||||
}
|
||||
@ -188,6 +227,9 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
||||
frame.kr_cur = NULL;
|
||||
frame.tsk = current;
|
||||
#endif
|
||||
#ifdef CONFIG_UNWINDER_FRAME_POINTER
|
||||
frame.ex_frame = in_entry_text(frame.pc);
|
||||
#endif
|
||||
|
||||
walk_stackframe(&frame, save_trace, &data);
|
||||
}
|
||||
|
@ -205,14 +205,14 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
{
|
||||
unwind_backtrace(regs, tsk, loglvl);
|
||||
}
|
||||
#else
|
||||
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
{
|
||||
unsigned int fp, mode;
|
||||
int ok = 1;
|
||||
@ -487,7 +487,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
|
||||
die_sig:
|
||||
#ifdef CONFIG_DEBUG_USER
|
||||
if (user_debug & UDBG_UNDEFINED) {
|
||||
pr_info("%s (%d): undefined instruction: pc=%p\n",
|
||||
pr_info("%s (%d): undefined instruction: pc=%px\n",
|
||||
current->comm, task_pid_nr(current), pc);
|
||||
__show_regs(regs);
|
||||
dump_instr(KERN_INFO, regs);
|
||||
@ -920,9 +920,9 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tsk_stk = (unsigned long)current->stack;
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
|
||||
unsigned long irq_stk = (unsigned long)raw_cpu_read(irq_stack_ptr);
|
||||
#endif
|
||||
unsigned long ovf_stk = (unsigned long)this_cpu_read(overflow_stack_ptr);
|
||||
unsigned long ovf_stk = (unsigned long)raw_cpu_read(overflow_stack_ptr);
|
||||
|
||||
console_verbose();
|
||||
pr_emerg("Insufficient stack space to handle exception!");
|
||||
|
@ -46,4 +46,6 @@ UNWIND( .setfp fpreg, sp )
|
||||
pop {fpreg, pc}
|
||||
UNWIND( .fnend )
|
||||
#endif
|
||||
.globl call_with_stack_end
|
||||
call_with_stack_end:
|
||||
ENDPROC(call_with_stack)
|
||||
|
@ -307,7 +307,7 @@ void __init dma_contiguous_remap(void)
|
||||
|
||||
static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
|
||||
{
|
||||
struct page *page = virt_to_page(addr);
|
||||
struct page *page = virt_to_page((void *)addr);
|
||||
pgprot_t prot = *(pgprot_t *)data;
|
||||
|
||||
set_pte_ext(pte, mk_pte(page, prot), 0);
|
||||
|
@ -26,7 +26,7 @@ static struct addr_marker address_markers[] = {
|
||||
{ MODULES_VADDR, "Modules" },
|
||||
{ PAGE_OFFSET, "Kernel Mapping" },
|
||||
{ 0, "vmalloc() Area" },
|
||||
{ VMALLOC_END, "vmalloc() End" },
|
||||
{ FDT_FIXED_BASE, "FDT Area" },
|
||||
{ FIXADDR_START, "Fixmap Area" },
|
||||
{ VECTORS_BASE, "Vectors" },
|
||||
{ VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
|
||||
@ -200,6 +200,7 @@ static const struct prot_bits section_bits[] = {
|
||||
};
|
||||
|
||||
struct pg_level {
|
||||
const char *name;
|
||||
const struct prot_bits *bits;
|
||||
size_t num;
|
||||
u64 mask;
|
||||
@ -213,9 +214,11 @@ static struct pg_level pg_level[] = {
|
||||
}, { /* p4d */
|
||||
}, { /* pud */
|
||||
}, { /* pmd */
|
||||
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
|
||||
.bits = section_bits,
|
||||
.num = ARRAY_SIZE(section_bits),
|
||||
}, { /* pte */
|
||||
.name = "PTE",
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
},
|
||||
@ -282,7 +285,8 @@ static void note_page(struct pg_state *st, unsigned long addr,
|
||||
delta >>= 10;
|
||||
unit++;
|
||||
}
|
||||
pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
|
||||
pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
|
||||
pg_level[st->level].name);
|
||||
if (st->current_domain)
|
||||
pt_dump_seq_printf(st->seq, " %s",
|
||||
st->current_domain);
|
||||
|
@ -268,12 +268,17 @@ void __init kasan_init(void)
|
||||
|
||||
/*
|
||||
* 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
|
||||
* so we need to map this area.
|
||||
* so we need to map this area if CONFIG_KASAN_VMALLOC=n. With
|
||||
* VMALLOC support KASAN will manage this region dynamically,
|
||||
* refer to kasan_populate_vmalloc() and ARM's implementation of
|
||||
* module_alloc().
|
||||
* 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
|
||||
* ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
|
||||
* use kasan_populate_zero_shadow.
|
||||
*/
|
||||
create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE));
|
||||
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES))
|
||||
create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END));
|
||||
create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
|
||||
|
||||
/*
|
||||
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so
|
||||
|
Loading…
Reference in New Issue
Block a user