mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 14:50:19 +00:00
arm64: KVM: Allow mapping of vectors outside of the RAM region
We're now ready to map our vectors in weird and wonderful locations. On enabling ARM64_HARDEN_EL2_VECTORS, a vector slot gets allocated if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR and gets mapped outside of the normal RAM region, next to the idmap. That way, being able to obtain VBAR_EL2 doesn't reveal the mapping of the rest of the hypervisor code. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
4205a89b80
commit
dee39247dc
@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, the
|
|||||||
hypervisor maps kernel pages in EL2 at a fixed (and potentially
|
hypervisor maps kernel pages in EL2 at a fixed (and potentially
|
||||||
random) offset from the linear mapping. See the kern_hyp_va macro and
|
random) offset from the linear mapping. See the kern_hyp_va macro and
|
||||||
kvm_update_va_mask function for more details. MMIO devices such as
|
kvm_update_va_mask function for more details. MMIO devices such as
|
||||||
GICv2 gets mapped next to the HYP idmap page.
|
GICv2 gets mapped next to the HYP idmap page, as do vectors when
|
||||||
|
ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
|
||||||
|
|
||||||
When using KVM with the Virtualization Host Extensions, no additional
|
When using KVM with the Virtualization Host Extensions, no additional
|
||||||
mappings are created, since the host kernel runs directly in EL2.
|
mappings are created, since the host kernel runs directly in EL2.
|
||||||
|
@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR
|
|||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
config HARDEN_EL2_VECTORS
|
||||||
|
bool "Harden EL2 vector mapping against system register leak" if EXPERT
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Speculation attacks against some high-performance processors can
|
||||||
|
be used to leak privileged information such as the vector base
|
||||||
|
register, resulting in a potential defeat of the EL2 layout
|
||||||
|
randomization.
|
||||||
|
|
||||||
|
This config option will map the vectors to a fixed location,
|
||||||
|
independent of the EL2 code mapping, so that revealing VBAR_EL2
|
||||||
|
to an attacker does not give away any extra information. This
|
||||||
|
only gets enabled on affected CPUs.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
menuconfig ARMV8_DEPRECATED
|
menuconfig ARMV8_DEPRECATED
|
||||||
bool "Emulate deprecated/obsolete ARMv8 instructions"
|
bool "Emulate deprecated/obsolete ARMv8 instructions"
|
||||||
depends on COMPAT
|
depends on COMPAT
|
||||||
|
@ -360,31 +360,91 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
|||||||
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||||
|
/*
|
||||||
|
* EL2 vectors can be mapped and rerouted in a number of ways,
|
||||||
|
* depending on the kernel configuration and CPU present:
|
||||||
|
*
|
||||||
|
* - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
|
||||||
|
* hardening sequence is placed in one of the vector slots, which is
|
||||||
|
* executed before jumping to the real vectors.
|
||||||
|
*
|
||||||
|
* - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
|
||||||
|
* ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
|
||||||
|
* hardening sequence is mapped next to the idmap page, and executed
|
||||||
|
* before jumping to the real vectors.
|
||||||
|
*
|
||||||
|
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
|
||||||
|
* empty slot is selected, mapped next to the idmap page, and
|
||||||
|
* executed before jumping to the real vectors.
|
||||||
|
*
|
||||||
|
* Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
|
||||||
|
* VHE, as we don't have hypervisor-specific mappings. If the system
|
||||||
|
* is VHE and yet selects this capability, it will be ignored.
|
||||||
|
*/
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
|
||||||
|
extern void *__kvm_bp_vect_base;
|
||||||
|
extern int __kvm_harden_el2_vector_slot;
|
||||||
|
|
||||||
static inline void *kvm_get_hyp_vector(void)
|
static inline void *kvm_get_hyp_vector(void)
|
||||||
{
|
{
|
||||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||||
void *vect = kvm_ksym_ref(__kvm_hyp_vector);
|
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
|
||||||
|
int slot = -1;
|
||||||
|
|
||||||
if (data->fn) {
|
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
|
||||||
vect = __bp_harden_hyp_vecs_start +
|
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
|
||||||
data->hyp_vectors_slot * SZ_2K;
|
slot = data->hyp_vectors_slot;
|
||||||
|
|
||||||
if (!has_vhe())
|
|
||||||
vect = lm_alias(vect);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vect = kern_hyp_va(vect);
|
if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
|
||||||
|
vect = __kvm_bp_vect_base;
|
||||||
|
if (slot == -1)
|
||||||
|
slot = __kvm_harden_el2_vector_slot;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (slot != -1)
|
||||||
|
vect += slot * SZ_2K;
|
||||||
|
|
||||||
return vect;
|
return vect;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This is only called on a !VHE system */
|
||||||
static inline int kvm_map_vectors(void)
|
static inline int kvm_map_vectors(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* HBP = ARM64_HARDEN_BRANCH_PREDICTOR
|
||||||
|
* HEL2 = ARM64_HARDEN_EL2_VECTORS
|
||||||
|
*
|
||||||
|
* !HBP + !HEL2 -> use direct vectors
|
||||||
|
* HBP + !HEL2 -> use hardened vectors in place
|
||||||
|
* !HBP + HEL2 -> allocate one vector slot and use exec mapping
|
||||||
|
* HBP + HEL2 -> use hardened vertors and use exec mapping
|
||||||
|
*/
|
||||||
|
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
|
||||||
|
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
|
||||||
|
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
|
||||||
|
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
|
||||||
|
unsigned long size = (__bp_harden_hyp_vecs_end -
|
||||||
|
__bp_harden_hyp_vecs_start);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Always allocate a spare vector slot, as we don't
|
||||||
|
* know yet which CPUs have a BP hardening slot that
|
||||||
|
* we can reuse.
|
||||||
|
*/
|
||||||
|
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
|
||||||
|
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
|
||||||
|
return create_hyp_exec_mappings(vect_pa, size,
|
||||||
|
&__kvm_bp_vect_base);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void *kvm_get_hyp_vector(void)
|
static inline void *kvm_get_hyp_vector(void)
|
||||||
{
|
{
|
||||||
|
@ -51,10 +51,13 @@ struct bp_hardening_data {
|
|||||||
bp_hardening_cb_t fn;
|
bp_hardening_cb_t fn;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
|
||||||
|
defined(CONFIG_HARDEN_EL2_VECTORS))
|
||||||
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
|
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
|
||||||
extern atomic_t arm64_el2_vector_last_slot;
|
extern atomic_t arm64_el2_vector_last_slot;
|
||||||
|
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||||
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||||
|
|
||||||
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
|
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
|
||||||
|
@ -58,7 +58,7 @@ config KVM_ARM_PMU
|
|||||||
virtual machines.
|
virtual machines.
|
||||||
|
|
||||||
config KVM_INDIRECT_VECTORS
|
config KVM_INDIRECT_VECTORS
|
||||||
def_bool KVM && HARDEN_BRANCH_PREDICTOR
|
def_bool KVM && (HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS)
|
||||||
|
|
||||||
source drivers/vhost/Kconfig
|
source drivers/vhost/Kconfig
|
||||||
|
|
||||||
|
@ -151,6 +151,9 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *__kvm_bp_vect_base;
|
||||||
|
int __kvm_harden_el2_vector_slot;
|
||||||
|
|
||||||
void kvm_patch_vector_branch(struct alt_instr *alt,
|
void kvm_patch_vector_branch(struct alt_instr *alt,
|
||||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user