mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
KVM fixes for 6.10
- Fix a "shift too big" goof in the KVM_SEV_INIT2 selftest. - Compute the max mappable gfn for KVM selftests on x86 using GuestMaxPhyAddr from KVM's supported CPUID (if it's available). - Fix a race in kvm_vcpu_on_spin() by ensuring loads and stores are atomic. - Fix technically benign bug in __kvm_handle_hva_range() where KVM consumes the return from a void-returning function as if it were a boolean. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKTobbabEP7vbhhN9OlYIJqCjN/0FAmZ0tUIACgkQOlYIJqCj N/0bNQ//etfWk8SWCeOQ2ir83es04/i57Rz0L5L+d1C58IznwwbuRZdYaMpldb/B Wx8J4mhfmjdd1Q3HeqWqkpDATNBIkTx3Cp0ydyM41mCMgOuL2uz7o9CDf0VG6IPN j+9X91IEbfZ/h2k7qlrHVePY6P4HASsGFkYnc/3q7A8nA3jhZPMUwlzX4v02V3Ib x5MvtLxtuA4V8feAETNMVwFk2DxPXZV8NQAi6RNnPKF8ui8hmkaMPk1ysj3JaqGN bgsfAJQz3+uL5IR/cQQvjKGDFwL6TkE2mHnuziYQAMR9+ir7EIN+88xW/PZYkCHN Bh1pgtv6quCP33MlC2gjUwDLxbLWPuS0zsGe/QOABRrY+95gngS6/DgYIA7tN/ye VjWS3LHEQDaOa6AumKJqhi90WYNICRI3wi/4Bk3so3Oj/lvMisnizeTMKKGSPyU1 FhW6JUYQlcbmTS6aKGz7WoZxCv73Pild9Vz9ZqsW93aKIgJqeUEpfpMeHVg1DO8n /YXBCkYqm2ni6yTeoDxHiXJt+ecwKrZdjOe0Rwhmcybyux82ig98ISq+ZEtptSQW rEpa7wJ6Vb9Kv5Tzf5bKjb2MIzRkMFJgnRjr97taf4LLL4z1WyQm90OSBtqTgU8i 1R6Fy/M8hgE5D/fHOy8SZ63osLVlnnxbX6Fu1LebqxaQcrmKzcU= =Qdo9 -----END PGP SIGNATURE----- Merge tag 'kvm-x86-fixes-6.10-rcN' of https://github.com/kvm-x86/linux into HEAD KVM fixes for 6.10 - Fix a "shift too big" goof in the KVM_SEV_INIT2 selftest. - Compute the max mappable gfn for KVM selftests on x86 using GuestMaxPhyAddr from KVM's supported CPUID (if it's available). - Fix a race in kvm_vcpu_on_spin() by ensuring loads and stores are atomic. - Fix technically benign bug in __kvm_handle_hva_range() where KVM consumes the return from a void-returning function as if it were a boolean.
This commit is contained in:
commit
dee67a94d4
@ -277,6 +277,7 @@ struct kvm_x86_cpu_property {
|
|||||||
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
|
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
|
||||||
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
|
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
|
||||||
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
|
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
|
||||||
|
#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
|
||||||
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
|
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
|
||||||
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
|
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
|
||||||
|
|
||||||
|
@ -1247,9 +1247,20 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
|
|||||||
{
|
{
|
||||||
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
|
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
|
||||||
unsigned long ht_gfn, max_gfn, max_pfn;
|
unsigned long ht_gfn, max_gfn, max_pfn;
|
||||||
uint8_t maxphyaddr;
|
uint8_t maxphyaddr, guest_maxphyaddr;
|
||||||
|
|
||||||
max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
|
/*
|
||||||
|
* Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
|
||||||
|
* enumerates the max _mappable_ GPA, which can be less than the raw
|
||||||
|
* MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
|
||||||
|
* doesn't support 5-level TDP.
|
||||||
|
*/
|
||||||
|
guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
|
||||||
|
guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
|
||||||
|
TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
|
||||||
|
"Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
|
||||||
|
|
||||||
|
max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
|
||||||
|
|
||||||
/* Avoid reserved HyperTransport region on AMD processors. */
|
/* Avoid reserved HyperTransport region on AMD processors. */
|
||||||
if (!host_cpu_is_amd)
|
if (!host_cpu_is_amd)
|
||||||
|
@ -105,11 +105,11 @@ void test_features(uint32_t vm_type, uint64_t supported_features)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
if (!(supported_features & (1u << i)))
|
if (!(supported_features & BIT_ULL(i)))
|
||||||
test_init2_invalid(vm_type,
|
test_init2_invalid(vm_type,
|
||||||
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) },
|
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) },
|
||||||
"unknown feature");
|
"unknown feature");
|
||||||
else if (KNOWN_FEATURES & (1u << i))
|
else if (KNOWN_FEATURES & BIT_ULL(i))
|
||||||
test_init2(vm_type,
|
test_init2(vm_type,
|
||||||
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) });
|
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) });
|
||||||
}
|
}
|
||||||
|
@ -651,7 +651,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
|
|||||||
range->on_lock(kvm);
|
range->on_lock(kvm);
|
||||||
|
|
||||||
if (IS_KVM_NULL_FN(range->handler))
|
if (IS_KVM_NULL_FN(range->handler))
|
||||||
break;
|
goto mmu_unlock;
|
||||||
}
|
}
|
||||||
r.ret |= range->handler(kvm, &gfn_range);
|
r.ret |= range->handler(kvm, &gfn_range);
|
||||||
}
|
}
|
||||||
@ -660,6 +660,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
|
|||||||
if (range->flush_on_ret && r.ret)
|
if (range->flush_on_ret && r.ret)
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
|
|
||||||
|
mmu_unlock:
|
||||||
if (r.found_memslot)
|
if (r.found_memslot)
|
||||||
KVM_MMU_UNLOCK(kvm);
|
KVM_MMU_UNLOCK(kvm);
|
||||||
|
|
||||||
@ -4025,12 +4026,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
|
|||||||
{
|
{
|
||||||
struct kvm *kvm = me->kvm;
|
struct kvm *kvm = me->kvm;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
|
int last_boosted_vcpu;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
int yielded = 0;
|
int yielded = 0;
|
||||||
int try = 3;
|
int try = 3;
|
||||||
int pass;
|
int pass;
|
||||||
|
|
||||||
|
last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
|
||||||
kvm_vcpu_set_in_spin_loop(me, true);
|
kvm_vcpu_set_in_spin_loop(me, true);
|
||||||
/*
|
/*
|
||||||
* We boost the priority of a VCPU that is runnable but not
|
* We boost the priority of a VCPU that is runnable but not
|
||||||
@ -4068,7 +4070,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
|
|||||||
|
|
||||||
yielded = kvm_vcpu_yield_to(vcpu);
|
yielded = kvm_vcpu_yield_to(vcpu);
|
||||||
if (yielded > 0) {
|
if (yielded > 0) {
|
||||||
kvm->last_boosted_vcpu = i;
|
WRITE_ONCE(kvm->last_boosted_vcpu, i);
|
||||||
break;
|
break;
|
||||||
} else if (yielded < 0) {
|
} else if (yielded < 0) {
|
||||||
try--;
|
try--;
|
||||||
|
Loading…
Reference in New Issue
Block a user