2019-04-18 16:51:18 +10:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_POWERPC_KUP_H_
|
|
|
|
#define _ASM_POWERPC_KUP_H_
|
|
|
|
|
2019-04-18 16:51:24 +10:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#include <asm/book3s/64/kup-radix.h>
|
|
|
|
#endif
|
2019-03-11 08:30:34 +00:00
|
|
|
#ifdef CONFIG_PPC_8xx
|
|
|
|
#include <asm/nohash/32/kup-8xx.h>
|
|
|
|
#endif
|
2019-03-11 08:30:35 +00:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
|
|
#include <asm/book3s/32/kup.h>
|
|
|
|
#endif
|
2019-04-18 16:51:24 +10:00
|
|
|
|
2019-03-11 08:30:31 +00:00
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#ifndef CONFIG_PPC_KUAP
|
|
|
|
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro kuap_check current, gpr
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else /* !__ASSEMBLY__ */
|
2019-04-18 16:51:18 +10:00
|
|
|
|
2019-04-18 16:51:20 +10:00
|
|
|
#include <asm/pgtable.h>
|
|
|
|
|
2019-04-18 16:51:18 +10:00
|
|
|
void setup_kup(void);
|
|
|
|
|
2019-04-18 16:51:19 +10:00
|
|
|
#ifdef CONFIG_PPC_KUEP
|
|
|
|
void setup_kuep(bool disabled);
|
|
|
|
#else
|
|
|
|
static inline void setup_kuep(bool disabled) { }
|
|
|
|
#endif /* CONFIG_PPC_KUEP */
|
|
|
|
|
2019-04-18 16:51:20 +10:00
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
void setup_kuap(bool disabled);
|
|
|
|
#else
|
|
|
|
static inline void setup_kuap(bool disabled) { }
|
|
|
|
static inline void allow_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size) { }
|
|
|
|
static inline void prevent_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size) { }
|
powerpc/mm: Detect bad KUAP faults
When KUAP is enabled we have logic to detect page faults that occur
outside of a valid user access region and are blocked by the AMR.
What we don't have at the moment is logic to detect a fault *within* a
valid user access region, that has been incorrectly blocked by AMR.
This is not meant to ever happen, but it can if we incorrectly
save/restore the AMR, or if the AMR was overwritten for some other
reason.
Currently if that happens we assume it's just a regular fault that
will be corrected by handling the fault normally, so we just return.
But there is nothing the fault handling code can do to fix it, so the
fault just happens again and we spin forever, leading to soft lockups.
So add some logic to detect that case and WARN() if we ever see it.
Arguably it should be a BUG(), but it's more polite to fail the access
and let the kernel continue, rather than taking down the box. There
should be no data integrity issue with failing the fault rather than
BUG'ing, as we're just going to disallow an access that should have
been allowed.
To make the code a little easier to follow, unroll the condition at
the end of bad_kernel_fault() and comment each case, before adding the
call to bad_kuap_fault().
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-04-18 16:51:25 +10:00
|
|
|
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
|
2019-04-18 16:51:20 +10:00
|
|
|
#endif /* CONFIG_PPC_KUAP */
|
|
|
|
|
|
|
|
static inline void allow_read_from_user(const void __user *from, unsigned long size)
|
|
|
|
{
|
|
|
|
allow_user_access(NULL, from, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void allow_write_to_user(void __user *to, unsigned long size)
|
|
|
|
{
|
|
|
|
allow_user_access(to, NULL, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void prevent_read_from_user(const void __user *from, unsigned long size)
|
|
|
|
{
|
|
|
|
prevent_user_access(NULL, from, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void prevent_write_to_user(void __user *to, unsigned long size)
|
|
|
|
{
|
|
|
|
prevent_user_access(to, NULL, size);
|
|
|
|
}
|
|
|
|
|
2019-04-18 16:51:18 +10:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_KUP_H_ */
|