mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-07 13:53:24 +00:00
Bugfixes, and a few cleanups to the newly-introduced assembly language
vmentry code for AMD. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl6fFwoUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroNgEQf/WK0z8WMKxGDr4YdLlxvJxLHUTd/Z uKDMkllRil5+hFy5tq5yeKEzPRtINkJ9bSwrUW3dWtZiCxdED/K3uXOh30znycQL KmVX5ZlmD5Gm9YizVUSbhXZj9p4AvtsvmrUUSH5W1FOJ7g4cxK9a29h3CkfJ5EPq WFyVfua9JMBjKCyWgjSOlCQ5L0NEB3bezWzuIj1TQW5A82fTjrUyciwBZQ5mnZC6 nC4kN8M8NWoceRQT/uD/I3l2o+GlYf6xYE6637if0CpaLQRyvYDSwdB4G+1MB0M1 PtEwkSkwni4PmWwcMyi/gIx37HRA3ycgZIVbb+MUmTA1pakUMCOjqin6hw== =Ax1z -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "Bugfixes, and a few cleanups to the newly-introduced assembly language vmentry code for AMD" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Book3S HV: Handle non-present PTEs in page fault functions kvm: Disable objtool frame pointer checking for vmenter.S MAINTAINERS: add a reviewer for KVM/s390 KVM: s390: Fix PV check in deliverable_irqs() kvm: Handle reads of SandyBridge RAPL PMU MSRs rather than injecting #GP KVM: Remove CREATE_IRQCHIP/SET_PIT2 race KVM: SVM: Fix __svm_vcpu_run declaration. KVM: SVM: Do not setup frame pointer in __svm_vcpu_run KVM: SVM: Fix build error due to missing release_pages() include KVM: SVM: Do not mark svm_vcpu_run with STACK_FRAME_NON_STANDARD kvm: nVMX: match comment with return type for nested_vmx_exit_reflected kvm: nVMX: reflect MTF VM-exits if injected by L1 KVM: s390: Return last valid slot if approx index is out-of-bounds KVM: Check validity of resolved slot when searching memslots KVM: VMX: Enable machine check support for 32bit targets KVM: SVM: move more vmentry code to assembly KVM: SVM: fix compilation with modular PSP and non-modular KVM
This commit is contained in:
commit
8160a563cf
@ -9329,6 +9329,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Janosch Frank <frankja@linux.ibm.com>
|
||||
R: David Hildenbrand <david@redhat.com>
|
||||
R: Cornelia Huck <cohuck@redhat.com>
|
||||
R: Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
@ -604,18 +604,19 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
pte = __pte(0);
|
||||
if (ptep)
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
/*
|
||||
* If the PTE disappeared temporarily due to a THP
|
||||
* collapse, just return and let the guest try again.
|
||||
*/
|
||||
if (!ptep) {
|
||||
local_irq_enable();
|
||||
if (!pte_present(pte)) {
|
||||
if (page)
|
||||
put_page(page);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
hpa = pte_pfn(pte) << PAGE_SHIFT;
|
||||
pte_size = PAGE_SIZE;
|
||||
if (shift)
|
||||
|
@ -815,18 +815,19 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
pte = __pte(0);
|
||||
if (ptep)
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
/*
|
||||
* If the PTE disappeared temporarily due to a THP
|
||||
* collapse, just return and let the guest try again.
|
||||
*/
|
||||
if (!ptep) {
|
||||
local_irq_enable();
|
||||
if (!pte_present(pte)) {
|
||||
if (page)
|
||||
put_page(page);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
|
||||
/* If we're logging dirty pages, always map single pages */
|
||||
large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
|
||||
|
@ -393,7 +393,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
|
||||
if (psw_mchk_disabled(vcpu))
|
||||
active_mask &= ~IRQ_PEND_MCHK_MASK;
|
||||
/* PV guest cpus can have a single interruption injected at a time. */
|
||||
if (kvm_s390_pv_cpu_is_protected(vcpu) &&
|
||||
if (kvm_s390_pv_cpu_get_handle(vcpu) &&
|
||||
vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
|
||||
active_mask &= ~(IRQ_PEND_EXT_II_MASK |
|
||||
IRQ_PEND_IO_MASK |
|
||||
|
@ -1939,6 +1939,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
|
||||
start = slot + 1;
|
||||
}
|
||||
|
||||
if (start >= slots->used_slots)
|
||||
return slots->used_slots - 1;
|
||||
|
||||
if (gfn >= memslots[start].base_gfn &&
|
||||
gfn < memslots[start].base_gfn + memslots[start].npages) {
|
||||
atomic_set(&slots->lru_slot, start);
|
||||
|
@ -237,27 +237,6 @@ enum ssb_mitigation {
|
||||
extern char __indirect_thunk_start[];
|
||||
extern char __indirect_thunk_end[];
|
||||
|
||||
/*
|
||||
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
||||
* can be followed in the host, by overwriting the RSB completely. Both
|
||||
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
|
||||
* CPUs with IBRS_ALL *might* it be avoided.
|
||||
*/
|
||||
static inline void vmexit_fill_RSB(void)
|
||||
{
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
unsigned long loops;
|
||||
|
||||
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
||||
ALTERNATIVE("jmp 910f",
|
||||
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
||||
X86_FEATURE_RETPOLINE)
|
||||
"910:"
|
||||
: "=r" (loops), ASM_CALL_CONSTRAINT
|
||||
: : "memory" );
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
{
|
||||
|
@ -3,6 +3,10 @@
|
||||
ccflags-y += -Iarch/x86/kvm
|
||||
ccflags-$(CONFIG_KVM_WERROR) += -Werror
|
||||
|
||||
ifeq ($(CONFIG_FRAME_POINTER),y)
|
||||
OBJECT_FILES_NON_STANDARD_vmenter.o := y
|
||||
endif
|
||||
|
||||
KVM := ../../../virt/kvm
|
||||
|
||||
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/psp-sev.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
|
||||
#include "x86.h"
|
||||
@ -1117,7 +1118,7 @@ int __init sev_hardware_setup(void)
|
||||
/* Maximum number of encrypted guests supported simultaneously */
|
||||
max_sev_asid = cpuid_ecx(0x8000001F);
|
||||
|
||||
if (!max_sev_asid)
|
||||
if (!svm_sev_enabled())
|
||||
return 1;
|
||||
|
||||
/* Minimum ASID value that should be used for SEV guest */
|
||||
@ -1156,6 +1157,9 @@ int __init sev_hardware_setup(void)
|
||||
|
||||
void sev_hardware_teardown(void)
|
||||
{
|
||||
if (!svm_sev_enabled())
|
||||
return;
|
||||
|
||||
bitmap_free(sev_asid_bitmap);
|
||||
bitmap_free(sev_reclaim_asid_bitmap);
|
||||
|
||||
|
@ -3276,7 +3276,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
|
||||
svm_complete_interrupts(svm);
|
||||
}
|
||||
|
||||
bool __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
|
||||
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
|
||||
|
||||
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -3330,13 +3330,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
|
||||
#else
|
||||
@ -3366,8 +3361,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
reload_tss(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
vcpu->arch.cr2 = svm->vmcb->save.cr2;
|
||||
@ -3411,7 +3404,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
mark_all_clean(svm->vmcb);
|
||||
}
|
||||
STACK_FRAME_NON_STANDARD(svm_vcpu_run);
|
||||
|
||||
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
|
||||
{
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#define WORD_SIZE (BITS_PER_LONG / 8)
|
||||
|
||||
@ -35,7 +36,6 @@
|
||||
*/
|
||||
SYM_FUNC_START(__svm_vcpu_run)
|
||||
push %_ASM_BP
|
||||
mov %_ASM_SP, %_ASM_BP
|
||||
#ifdef CONFIG_X86_64
|
||||
push %r15
|
||||
push %r14
|
||||
@ -78,6 +78,7 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
pop %_ASM_AX
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
1: vmload %_ASM_AX
|
||||
jmp 3f
|
||||
2: cmpb $0, kvm_rebooting
|
||||
@ -99,6 +100,13 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
ud2
|
||||
_ASM_EXTABLE(5b, 6b)
|
||||
7:
|
||||
cli
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
||||
/* "POP" @regs to RAX. */
|
||||
pop %_ASM_AX
|
||||
|
||||
|
@ -5533,8 +5533,25 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
|
||||
return 1 & (b >> (field & 7));
|
||||
}
|
||||
|
||||
static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
|
||||
{
|
||||
u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
|
||||
|
||||
if (nested_cpu_has_mtf(vmcs12))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* An MTF VM-exit may be injected into the guest by setting the
|
||||
* interruption-type to 7 (other event) and the vector field to 0. Such
|
||||
* is the case regardless of the 'monitor trap flag' VM-execution
|
||||
* control.
|
||||
*/
|
||||
return entry_intr_info == (INTR_INFO_VALID_MASK
|
||||
| INTR_TYPE_OTHER_EVENT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
|
||||
* Return true if we should exit from L2 to L1 to handle an exit, or false if we
|
||||
* should handle it ourselves in L0 (and then continue L2). Only call this
|
||||
* when in is_guest_mode (L2).
|
||||
*/
|
||||
@ -5633,7 +5650,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
|
||||
case EXIT_REASON_MWAIT_INSTRUCTION:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
|
||||
case EXIT_REASON_MONITOR_TRAP_FLAG:
|
||||
return nested_cpu_has_mtf(vmcs12);
|
||||
return nested_vmx_exit_handled_mtf(vmcs12);
|
||||
case EXIT_REASON_MONITOR_INSTRUCTION:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
|
||||
case EXIT_REASON_PAUSE_INSTRUCTION:
|
||||
|
@ -4572,7 +4572,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static void kvm_machine_check(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
|
||||
#if defined(CONFIG_X86_MCE)
|
||||
struct pt_regs regs = {
|
||||
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
|
||||
.flags = X86_EFLAGS_IF,
|
||||
|
@ -3060,6 +3060,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_IA32_PERF_CTL:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
/*
|
||||
* Intel Sandy Bridge CPUs must support the RAPL (running average power
|
||||
* limit) MSRs. Just return 0, as we do not want to expose the host
|
||||
* data here. Do not conditionalize this on CPUID, as KVM does not do
|
||||
* so for existing CPU-specific MSRs.
|
||||
*/
|
||||
case MSR_RAPL_POWER_UNIT:
|
||||
case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */
|
||||
case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */
|
||||
case MSR_PKG_ENERGY_STATUS: /* Total package */
|
||||
case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */
|
||||
msr_info->data = 0;
|
||||
break;
|
||||
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
|
||||
@ -5049,10 +5060,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
|
||||
goto out;
|
||||
mutex_lock(&kvm->lock);
|
||||
r = -ENXIO;
|
||||
if (!kvm->arch.vpit)
|
||||
goto out;
|
||||
goto set_pit_out;
|
||||
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
|
||||
set_pit_out:
|
||||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
}
|
||||
case KVM_GET_PIT2: {
|
||||
@ -5072,10 +5086,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
|
||||
goto out;
|
||||
mutex_lock(&kvm->lock);
|
||||
r = -ENXIO;
|
||||
if (!kvm->arch.vpit)
|
||||
goto out;
|
||||
goto set_pit2_out;
|
||||
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
|
||||
set_pit2_out:
|
||||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
}
|
||||
case KVM_REINJECT_CONTROL: {
|
||||
|
@ -1048,7 +1048,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
|
||||
start = slot + 1;
|
||||
}
|
||||
|
||||
if (gfn >= memslots[start].base_gfn &&
|
||||
if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
|
||||
gfn < memslots[start].base_gfn + memslots[start].npages) {
|
||||
atomic_set(&slots->lru_slot, start);
|
||||
return &memslots[start];
|
||||
|
Loading…
Reference in New Issue
Block a user