mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
ARM64:
* Fix confusion with implicitly-shifted MDCR_EL2 masks breaking SPE/TRBE initialization. * Align nested page table walker with the intended memory attribute combining rules of the architecture. * Prevent userspace from constraining the advertised ASID width, avoiding horrors of guest TLBIs not matching the intended context in hardware. * Don't leak references on LPIs when insertion into the translation cache fails. RISC-V: * Replace csr_write() with csr_set() for HVIEN PMU overflow bit. x86: * Cache CPUID.0xD XSTATE offsets+sizes during module init - On Intel's Emerald Rapids CPUID costs hundreds of cycles and there are a lot of leaves under 0xD. Getting rid of the CPUIDs during nested VM-Enter and VM-Exit is planned for the next release, for now just cache them: even on Skylake that is 40% faster. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmdcibgUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroOQsgf+NwNdfNQ0V5vU7YNeVxyhkCyYvNiA njvBTd1Lwh7EDtJ2NLKzwHktH2ymQI8qykxKr/qY3Jxkow+vcvsK0LacAaJdIzGo jnMGxXxRCFpxdkNb1kDJk4Cd6GSSAxYwgPj3wj7whsMcVRjPlFcjuHf02bRUU0Gt yulzBOZJ/7QTquKSnwt1kZQ1i/mJ8wCh4vJArZqtcImrDSK7oh+BaQ44h+lNe8qa Xiw6Fw3tYXgHy5WlnUU/OyFs+bZbcVzPM75qYgdGIWSo0TdL69BeIw8S4K2Ri4eL EoEBigwAd8PiF16Q1wO4gXWcNwinMTs3LIftxYpENTHA5gnrS5hgWWDqHw== =4v2y -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM64: - Fix confusion with implicitly-shifted MDCR_EL2 masks breaking SPE/TRBE initialization - Align nested page table walker with the intended memory attribute combining rules of the architecture - Prevent userspace from constraining the advertised ASID width, avoiding horrors of guest TLBIs not matching the intended context in hardware - Don't leak references on LPIs when insertion into the translation cache fails RISC-V: - Replace csr_write() with csr_set() for HVIEN PMU overflow bit x86: - Cache CPUID.0xD XSTATE offsets+sizes during module init On Intel's Emerald Rapids CPUID costs hundreds of cycles and there are a lot of leaves under 0xD. Getting rid of the CPUIDs during nested VM-Enter and VM-Exit is planned for the next release, for now just cache them: even on Skylake that is 40% faster" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Cache CPUID.0xD XSTATE offsets+sizes during module init RISC-V: KVM: Fix csr_write -> csr_set for HVIEN PMU overflow bit KVM: arm64: vgic-its: Add error handling in vgic_its_cache_translation KVM: arm64: Do not allow ID_AA64MMFR0_EL1.ASIDbits to be overridden KVM: arm64: Fix S1/S2 combination when FWB==1 and S2 has Device memory type arm64: Fix usage of new shifted MDCR_EL2 values
This commit is contained in:
commit
81576a9a27
@ -87,7 +87,7 @@
|
|||||||
1 << PMSCR_EL2_PA_SHIFT)
|
1 << PMSCR_EL2_PA_SHIFT)
|
||||||
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
||||||
.Lskip_spe_el2_\@:
|
.Lskip_spe_el2_\@:
|
||||||
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
mov x0, #MDCR_EL2_E2PB_MASK
|
||||||
orr x2, x2, x0 // If we don't have VHE, then
|
orr x2, x2, x0 // If we don't have VHE, then
|
||||||
// use EL1&0 translation.
|
// use EL1&0 translation.
|
||||||
|
|
||||||
@ -100,7 +100,7 @@
|
|||||||
and x0, x0, TRBIDR_EL1_P
|
and x0, x0, TRBIDR_EL1_P
|
||||||
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
|
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
|
||||||
|
|
||||||
mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
mov x0, #MDCR_EL2_E2TB_MASK
|
||||||
orr x2, x2, x0 // allow the EL1&0 translation
|
orr x2, x2, x0 // allow the EL1&0 translation
|
||||||
// to own it.
|
// to own it.
|
||||||
|
|
||||||
|
@ -114,8 +114,8 @@ SYM_CODE_START_LOCAL(__finalise_el2)
|
|||||||
|
|
||||||
// Use EL2 translations for SPE & TRBE and disable access from EL1
|
// Use EL2 translations for SPE & TRBE and disable access from EL1
|
||||||
mrs x0, mdcr_el2
|
mrs x0, mdcr_el2
|
||||||
bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
bic x0, x0, #MDCR_EL2_E2PB_MASK
|
||||||
bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
bic x0, x0, #MDCR_EL2_E2TB_MASK
|
||||||
msr mdcr_el2, x0
|
msr mdcr_el2, x0
|
||||||
|
|
||||||
// Transfer the MM state from EL1 to EL2
|
// Transfer the MM state from EL1 to EL2
|
||||||
|
@ -739,8 +739,15 @@ static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
|
|||||||
final_attr = s1_parattr;
|
final_attr = s1_parattr;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* MemAttr[2]=0, Device from S2 */
|
/*
|
||||||
final_attr = s2_memattr & GENMASK(1,0) << 2;
|
* MemAttr[2]=0, Device from S2.
|
||||||
|
*
|
||||||
|
* FWB does not influence the way that stage 1
|
||||||
|
* memory types and attributes are combined
|
||||||
|
* with stage 2 Device type and attributes.
|
||||||
|
*/
|
||||||
|
final_attr = min(s2_memattr_to_attr(s2_memattr),
|
||||||
|
s1_parattr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
|
/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
|
||||||
|
@ -126,7 +126,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
|||||||
/* Trap SPE */
|
/* Trap SPE */
|
||||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
|
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
|
||||||
mdcr_set |= MDCR_EL2_TPMS;
|
mdcr_set |= MDCR_EL2_TPMS;
|
||||||
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
|
mdcr_clear |= MDCR_EL2_E2PB_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trap Trace Filter */
|
/* Trap Trace Filter */
|
||||||
@ -143,7 +143,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
/* Trap External Trace */
|
/* Trap External Trace */
|
||||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
|
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
|
||||||
mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
|
mdcr_clear |= MDCR_EL2_E2TB_MASK;
|
||||||
|
|
||||||
vcpu->arch.mdcr_el2 |= mdcr_set;
|
vcpu->arch.mdcr_el2 |= mdcr_set;
|
||||||
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
|
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
|
||||||
|
@ -2618,7 +2618,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
|
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
|
||||||
ID_AA64MMFR0_EL1_TGRAN4_2 |
|
ID_AA64MMFR0_EL1_TGRAN4_2 |
|
||||||
ID_AA64MMFR0_EL1_TGRAN64_2 |
|
ID_AA64MMFR0_EL1_TGRAN64_2 |
|
||||||
ID_AA64MMFR0_EL1_TGRAN16_2)),
|
ID_AA64MMFR0_EL1_TGRAN16_2 |
|
||||||
|
ID_AA64MMFR0_EL1_ASIDBITS)),
|
||||||
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
|
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
|
||||||
ID_AA64MMFR1_EL1_HCX |
|
ID_AA64MMFR1_EL1_HCX |
|
||||||
ID_AA64MMFR1_EL1_TWED |
|
ID_AA64MMFR1_EL1_TWED |
|
||||||
|
@ -608,12 +608,22 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
|
|||||||
lockdep_assert_held(&its->its_lock);
|
lockdep_assert_held(&its->its_lock);
|
||||||
vgic_get_irq_kref(irq);
|
vgic_get_irq_kref(irq);
|
||||||
|
|
||||||
|
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Put the reference taken on @irq if the store fails. Intentionally do
|
||||||
|
* not return the error as the translation cache is best effort.
|
||||||
|
*/
|
||||||
|
if (xa_is_err(old)) {
|
||||||
|
vgic_put_irq(kvm, irq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We could have raced with another CPU caching the same
|
* We could have raced with another CPU caching the same
|
||||||
* translation behind our back, ensure we don't leak a
|
* translation behind our back, ensure we don't leak a
|
||||||
* reference if that is the case.
|
* reference if that is the case.
|
||||||
*/
|
*/
|
||||||
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
|
||||||
if (old)
|
if (old)
|
||||||
vgic_put_irq(kvm, old);
|
vgic_put_irq(kvm, old);
|
||||||
}
|
}
|
||||||
|
@ -590,7 +590,7 @@ void kvm_riscv_aia_enable(void)
|
|||||||
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
|
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
|
||||||
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
|
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
|
||||||
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
|
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
|
||||||
csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
|
csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_riscv_aia_disable(void)
|
void kvm_riscv_aia_disable(void)
|
||||||
|
@ -36,6 +36,26 @@
|
|||||||
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
|
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
|
||||||
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
|
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
|
||||||
|
|
||||||
|
struct cpuid_xstate_sizes {
|
||||||
|
u32 eax;
|
||||||
|
u32 ebx;
|
||||||
|
u32 ecx;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
|
||||||
|
|
||||||
|
void __init kvm_init_xstate_sizes(void)
|
||||||
|
{
|
||||||
|
u32 ign;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
|
||||||
|
struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
|
||||||
|
|
||||||
|
cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||||
{
|
{
|
||||||
int feature_bit = 0;
|
int feature_bit = 0;
|
||||||
@ -44,14 +64,15 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
|||||||
xstate_bv &= XFEATURE_MASK_EXTEND;
|
xstate_bv &= XFEATURE_MASK_EXTEND;
|
||||||
while (xstate_bv) {
|
while (xstate_bv) {
|
||||||
if (xstate_bv & 0x1) {
|
if (xstate_bv & 0x1) {
|
||||||
u32 eax, ebx, ecx, edx, offset;
|
struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
|
||||||
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
|
u32 offset;
|
||||||
|
|
||||||
/* ECX[1]: 64B alignment in compacted form */
|
/* ECX[1]: 64B alignment in compacted form */
|
||||||
if (compacted)
|
if (compacted)
|
||||||
offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
||||||
else
|
else
|
||||||
offset = ebx;
|
offset = xs->ebx;
|
||||||
ret = max(ret, offset + eax);
|
ret = max(ret, offset + xs->eax);
|
||||||
}
|
}
|
||||||
|
|
||||||
xstate_bv >>= 1;
|
xstate_bv >>= 1;
|
||||||
|
@ -31,6 +31,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
|||||||
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
|
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
|
||||||
u32 *ecx, u32 *edx, bool exact_only);
|
u32 *ecx, u32 *edx, bool exact_only);
|
||||||
|
|
||||||
|
void __init kvm_init_xstate_sizes(void);
|
||||||
u32 xstate_required_size(u64 xstate_bv, bool compacted);
|
u32 xstate_required_size(u64 xstate_bv, bool compacted);
|
||||||
|
|
||||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||||
|
@ -13997,6 +13997,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
|
|||||||
|
|
||||||
static int __init kvm_x86_init(void)
|
static int __init kvm_x86_init(void)
|
||||||
{
|
{
|
||||||
|
kvm_init_xstate_sizes();
|
||||||
|
|
||||||
kvm_mmu_x86_module_init();
|
kvm_mmu_x86_module_init();
|
||||||
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user