mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
ARM updates for 6.13-rc1
- add dev_is_amba() function to allow conversions during the next cycle - improve PREEMPT_RT performance with VFP - KASAN fixes for vmap stack -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmdI+gkACgkQ9OeQG+St rGSyrQ/+PxDrzd06IFsbICV5xrf3JJBQDdwbLzHhhMpRdg84/nxO60YyrbMqFO/H koDI9WmNU8nN0K5WcYwpVFG23oajktHGLNSnJRcp2KEolQ83abjzh1n0UfkA3TCV TZhPLL6FjRcwqRKX1dV2d/bZHzhQ4FS/nv85vyAhcUcSq0R6boS4Q8d3EftKEhly nlch4vl/aMVCXgCBB7Y6/SF8E9Xzx5ZTFkqnyZZptY+3hM8ZQbJJt2ppAU0Qo2JP gtS2FapBVozcKsWdA2pq2yU3gFQJC3rzMToms6z/LrdPhjs0RlCklVuv25HUoiY1 y0sXaoXzIPmmpkLGXwQx3qEJZM3rC1rR/i+yEFZB4/NzelN3TPYkSeI918gW+74B S1zluO20BDL+2MsDQ+1AKPPq3iOVuNrmTiOQm8kdllChOoS41pMURjWm1pthRc4p ScPSDqWHcmS4SrH+ftbT9ma4RskVindPtxsAf01mAbmLC92jj0WWSiFWaxydhkz1 430rxjFDKGZSBDoRbbxe9bE0NGWOfaInqrM3FwVNRXeZHdpUACGWU0Cx2AagtljY 9adFwCXdWc8r/Y9nZxBnkr+RQs01zJALcb44lqj70JYkUvoLPPNExIRwyh8vupjb zjgQj1Cdj2bnfnv1PB/DGK63aGUpELlXkKFkKionJWcilDdmtiY= =RJA2 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux Pull ARM updates from Russell King: - add dev_is_amba() function to allow conversions during the next cycle - improve PREEMPT_RT performance with VFP - KASAN fixes for vmap stack * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux: ARM: 9431/1: mm: Pair atomic_set_release() with _read_acquire() ARM: 9430/1: entry: Do a dummy read from VMAP shadow ARM: 9429/1: ioremap: Sync PGDs for VMALLOC shadow ARM: 9426/1: vfp: Move sending signals outside of vfp_state_hold()ed section. ARM: 9425/1: vfp: Use vfp_state_hold() in vfp_support_entry(). ARM: 9424/1: vfp: Use vfp_state_hold() in vfp_sync_hwstate(). ARM: 9423/1: vfp: Provide vfp_state_hold() for VFP locking. ARM: 9415/1: amba: Add dev_is_amba() function and export it for modules
This commit is contained in:
commit
9d5daef3b3
@ -25,6 +25,7 @@
|
||||
#include <asm/tls.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/uaccess-asm.h>
|
||||
#include <asm/kasan_def.h>
|
||||
|
||||
#include "entry-header.S"
|
||||
#include <asm/probes.h>
|
||||
@ -561,6 +562,13 @@ ENTRY(__switch_to)
|
||||
@ entries covering the vmalloc region.
|
||||
@
|
||||
ldr r2, [ip]
|
||||
#ifdef CONFIG_KASAN_VMALLOC
|
||||
@ Also dummy read from the KASAN shadow memory for the new stack if we
|
||||
@ are using KASAN
|
||||
mov_l r2, KASAN_SHADOW_OFFSET
|
||||
add r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT
|
||||
ldr r2, [r2]
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/io.h>
|
||||
@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys,
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_page);
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
|
||||
{
|
||||
return (unsigned long)kasan_mem_to_shadow((void *)addr);
|
||||
}
|
||||
#else
|
||||
static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void memcpy_pgd(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
end = ALIGN(end, PGDIR_SIZE);
|
||||
memcpy(pgd_offset(mm, start), pgd_offset_k(start),
|
||||
sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
|
||||
}
|
||||
|
||||
void __check_vmalloc_seq(struct mm_struct *mm)
|
||||
{
|
||||
int seq;
|
||||
|
||||
do {
|
||||
seq = atomic_read(&init_mm.context.vmalloc_seq);
|
||||
memcpy(pgd_offset(mm, VMALLOC_START),
|
||||
pgd_offset_k(VMALLOC_START),
|
||||
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
|
||||
pgd_index(VMALLOC_START)));
|
||||
seq = atomic_read_acquire(&init_mm.context.vmalloc_seq);
|
||||
memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
unsigned long start =
|
||||
arm_kasan_mem_to_shadow(VMALLOC_START);
|
||||
unsigned long end =
|
||||
arm_kasan_mem_to_shadow(VMALLOC_END);
|
||||
memcpy_pgd(mm, start, end);
|
||||
}
|
||||
/*
|
||||
* Use a store-release so that other CPUs that observe the
|
||||
* counter's new value are guaranteed to see the results of the
|
||||
|
@ -55,6 +55,34 @@ extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
|
||||
*/
|
||||
union vfp_state *vfp_current_hw_state[NR_CPUS];
|
||||
|
||||
/*
|
||||
* Claim ownership of the VFP unit.
|
||||
*
|
||||
* The caller may change VFP registers until vfp_state_release() is called.
|
||||
*
|
||||
* local_bh_disable() is used to disable preemption and to disable VFP
|
||||
* processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
|
||||
* not sufficient because it only serializes soft interrupt related sections
|
||||
* via a local lock, but stays preemptible. Disabling preemption is the right
|
||||
* choice here as bottom half processing is always in thread context on RT
|
||||
* kernels so it implicitly prevents bottom half processing as well.
|
||||
*/
|
||||
static void vfp_state_hold(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_bh_disable();
|
||||
else
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static void vfp_state_release(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_bh_enable();
|
||||
else
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Is 'thread's most up to date state stored in this CPUs hardware?
|
||||
* Must be called from non-preemptible context.
|
||||
@ -240,7 +268,7 @@ static void vfp_panic(char *reason, u32 inst)
|
||||
/*
|
||||
* Process bitmask of exception conditions.
|
||||
*/
|
||||
static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
|
||||
static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
|
||||
{
|
||||
int si_code = 0;
|
||||
|
||||
@ -248,8 +276,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
|
||||
|
||||
if (exceptions == VFP_EXCEPTION_ERROR) {
|
||||
vfp_panic("unhandled bounce", inst);
|
||||
vfp_raise_sigfpe(FPE_FLTINV, regs);
|
||||
return;
|
||||
return FPE_FLTINV;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -277,8 +304,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
|
||||
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
|
||||
RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
|
||||
|
||||
if (si_code)
|
||||
vfp_raise_sigfpe(si_code, regs);
|
||||
return si_code;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -324,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
|
||||
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||
{
|
||||
u32 fpscr, orig_fpscr, fpsid, exceptions;
|
||||
int si_code2 = 0;
|
||||
int si_code = 0;
|
||||
|
||||
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
|
||||
|
||||
@ -369,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||
* unallocated VFP instruction but with FPSCR.IXE set and not
|
||||
* on VFP subarch 1.
|
||||
*/
|
||||
vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
|
||||
return;
|
||||
si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -394,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||
*/
|
||||
exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
|
||||
if (exceptions)
|
||||
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
||||
si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
|
||||
|
||||
/*
|
||||
* If there isn't a second FP instruction, exit now. Note that
|
||||
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
|
||||
*/
|
||||
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
|
||||
return;
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
* The barrier() here prevents fpinst2 being read
|
||||
@ -413,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||
emulate:
|
||||
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
|
||||
if (exceptions)
|
||||
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
||||
si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
|
||||
exit:
|
||||
vfp_state_release();
|
||||
if (si_code2)
|
||||
vfp_raise_sigfpe(si_code2, regs);
|
||||
if (si_code)
|
||||
vfp_raise_sigfpe(si_code, regs);
|
||||
}
|
||||
|
||||
static void vfp_enable(void *unused)
|
||||
@ -512,11 +546,9 @@ static inline void vfp_pm_init(void) { }
|
||||
*/
|
||||
void vfp_sync_hwstate(struct thread_info *thread)
|
||||
{
|
||||
unsigned int cpu = get_cpu();
|
||||
vfp_state_hold();
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
if (vfp_state_in_hw(cpu, thread)) {
|
||||
if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
|
||||
u32 fpexc = fmrx(FPEXC);
|
||||
|
||||
/*
|
||||
@ -527,8 +559,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
|
||||
fmxr(FPEXC, fpexc);
|
||||
}
|
||||
|
||||
local_bh_enable();
|
||||
put_cpu();
|
||||
vfp_state_release();
|
||||
}
|
||||
|
||||
/* Ensure that the thread reloads the hardware VFP state on the next use. */
|
||||
@ -683,7 +714,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
|
||||
if (!user_mode(regs))
|
||||
return vfp_kmode_exception(regs, trigger);
|
||||
|
||||
local_bh_disable();
|
||||
vfp_state_hold();
|
||||
fpexc = fmrx(FPEXC);
|
||||
|
||||
/*
|
||||
@ -748,6 +779,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
|
||||
* replay the instruction that trapped.
|
||||
*/
|
||||
fmxr(FPEXC, fpexc);
|
||||
vfp_state_release();
|
||||
} else {
|
||||
/* Check for synchronous or asynchronous exceptions */
|
||||
if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
|
||||
@ -762,17 +794,17 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
|
||||
if (!(fpscr & FPSCR_IXE)) {
|
||||
if (!(fpscr & FPSCR_LENGTH_MASK)) {
|
||||
pr_debug("not VFP\n");
|
||||
local_bh_enable();
|
||||
vfp_state_release();
|
||||
return -ENOEXEC;
|
||||
}
|
||||
fpexc |= FPEXC_DEX;
|
||||
}
|
||||
}
|
||||
bounce: regs->ARM_pc += 4;
|
||||
/* VFP_bounce() will invoke vfp_state_release() */
|
||||
VFP_bounce(trigger, fpexc, regs);
|
||||
}
|
||||
|
||||
local_bh_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -837,7 +869,7 @@ void kernel_neon_begin(void)
|
||||
unsigned int cpu;
|
||||
u32 fpexc;
|
||||
|
||||
local_bh_disable();
|
||||
vfp_state_hold();
|
||||
|
||||
/*
|
||||
* Kernel mode NEON is only allowed outside of hardirq context with
|
||||
@ -868,7 +900,7 @@ void kernel_neon_end(void)
|
||||
{
|
||||
/* Disable the NEON/VFP unit. */
|
||||
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
|
||||
local_bh_enable();
|
||||
vfp_state_release();
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_neon_end);
|
||||
|
||||
|
@ -449,6 +449,12 @@ const struct bus_type amba_bustype = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(amba_bustype);
|
||||
|
||||
bool dev_is_amba(const struct device *dev)
|
||||
{
|
||||
return dev->bus == &amba_bustype;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_is_amba);
|
||||
|
||||
static int __init amba_init(void)
|
||||
{
|
||||
return bus_register(&amba_bustype);
|
||||
|
@ -121,6 +121,7 @@ extern const struct bus_type amba_bustype;
|
||||
#ifdef CONFIG_ARM_AMBA
|
||||
int __amba_driver_register(struct amba_driver *, struct module *);
|
||||
void amba_driver_unregister(struct amba_driver *);
|
||||
bool dev_is_amba(const struct device *dev);
|
||||
#else
|
||||
static inline int __amba_driver_register(struct amba_driver *drv,
|
||||
struct module *owner)
|
||||
@ -130,6 +131,10 @@ static inline int __amba_driver_register(struct amba_driver *drv,
|
||||
static inline void amba_driver_unregister(struct amba_driver *drv)
|
||||
{
|
||||
}
|
||||
static inline bool dev_is_amba(const struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
|
||||
|
Loading…
Reference in New Issue
Block a user