mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 08:18:47 +00:00
KVM: x86: Always use 32-bit SMRAM save state for 32-bit kernels
Invoking the 64-bit variation on a 32-bit kenrel will crash the guest, trigger a WARN, and/or lead to a buffer overrun in the host, e.g. rsm_load_state_64() writes r8-r15 unconditionally, but enum kvm_reg and thus x86_emulate_ctxt._regs only define r8-r15 for CONFIG_X86_64. KVM allows userspace to report long mode support via CPUID, even though the guest is all but guaranteed to crash if it actually tries to enable long mode. But, a pure 32-bit guest that is ignorant of long mode will happily plod along. SMM complicates things as 64-bit CPUs use a different SMRAM save state area. KVM handles this correctly for 64-bit kernels, e.g. uses the legacy save state map if userspace has hid long mode from the guest, but doesn't fare well when userspace reports long mode support on a 32-bit host kernel (32-bit KVM doesn't support 64-bit guests). Since the alternative is to crash the guest, e.g. by not loading state or explicitly requesting shutdown, unconditionally use the legacy SMRAM save state map for 32-bit KVM. If a guest has managed to get far enough to handle SMIs when running under a weird/buggy userspace hypervisor, then don't deliberately crash the guest since there are no downsides (from KVM's perspective) to allow it to continue running. Fixes: 660a5d517aaab ("KVM: x86: save/load state on SMM switch") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
8f4dc2e77c
commit
b68f3cc7d9
@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
eax = 0x80000001;
|
||||
ecx = 0;
|
||||
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
|
||||
return edx & bit(X86_FEATURE_LM);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
|
||||
@ -2372,6 +2376,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
|
||||
int n)
|
||||
{
|
||||
@ -2391,6 +2396,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
|
||||
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
||||
u64 cr0, u64 cr3, u64 cr4)
|
||||
@ -2492,6 +2498,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
|
||||
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
|
||||
const char *smstate)
|
||||
{
|
||||
@ -2554,6 +2561,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
@ -2621,9 +2629,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
||||
if (ctxt->ops->pre_leave_smm(ctxt, buf))
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (emulator_has_longmode(ctxt))
|
||||
ret = rsm_load_state_64(ctxt, buf);
|
||||
else
|
||||
#endif
|
||||
ret = rsm_load_state_32(ctxt, buf);
|
||||
|
||||
if (ret != X86EMUL_CONTINUE) {
|
||||
|
@ -7441,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
|
||||
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
struct desc_ptr dt;
|
||||
struct kvm_segment seg;
|
||||
unsigned long val;
|
||||
@ -7493,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
||||
|
||||
for (i = 0; i < 6; i++)
|
||||
enter_smm_save_seg_64(vcpu, buf, i);
|
||||
#else
|
||||
WARN_ON_ONCE(1);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static void enter_smm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -7507,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
||||
|
||||
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
||||
memset(buf, 0, 512);
|
||||
#ifdef CONFIG_X86_64
|
||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||
enter_smm_save_state_64(vcpu, buf);
|
||||
else
|
||||
#endif
|
||||
enter_smm_save_state_32(vcpu, buf);
|
||||
|
||||
/*
|
||||
@ -7567,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
||||
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
||||
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||
kvm_x86_ops->set_efer(vcpu, 0);
|
||||
#endif
|
||||
|
||||
kvm_update_cpuid(vcpu);
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
Loading…
x
Reference in New Issue
Block a user