mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
KVM: x86: Cache CPUID.0xD XSTATE offsets+sizes during module init
Snapshot the output of CPUID.0xD.[1..n] during kvm.ko initiliaization to avoid the overead of CPUID during runtime. The offset, size, and metadata for CPUID.0xD.[1..n] sub-leaves does not depend on XCR0 or XSS values, i.e. is constant for a given CPU, and thus can be cached during module load. On Intel's Emerald Rapids, CPUID is *wildly* expensive, to the point where recomputing XSAVE offsets and sizes results in a 4x increase in latency of nested VM-Enter and VM-Exit (nested transitions can trigger xstate_required_size() multiple times per transition), relative to using cached values. The issue is easily visible by running `perf top` while triggering nested transitions: kvm_update_cpuid_runtime() shows up at a whopping 50%. As measured via RDTSC from L2 (using KVM-Unit-Test's CPUID VM-Exit test and a slightly modified L1 KVM to handle CPUID in the fastpath), a nested roundtrip to emulate CPUID on Skylake (SKX), Icelake (ICX), and Emerald Rapids (EMR) takes: SKX 11650 ICX 22350 EMR 28850 Using cached values, the latency drops to: SKX 6850 ICX 9000 EMR 7900 The underlying issue is that CPUID itself is slow on ICX, and comically slow on EMR. The problem is exacerbated on CPUs which support XSAVES and/or XSAVEC, as KVM invokes xstate_required_size() twice on each runtime CPUID update, and because there are more supported XSAVE features (CPUID for supported XSAVE feature sub-leafs is significantly slower). SKX: CPUID.0xD.2 = 348 cycles CPUID.0xD.3 = 400 cycles CPUID.0xD.4 = 276 cycles CPUID.0xD.5 = 236 cycles <other sub-leaves are similar> EMR: CPUID.0xD.2 = 1138 cycles CPUID.0xD.3 = 1362 cycles CPUID.0xD.4 = 1068 cycles CPUID.0xD.5 = 910 cycles CPUID.0xD.6 = 914 cycles CPUID.0xD.7 = 1350 cycles CPUID.0xD.8 = 734 cycles CPUID.0xD.9 = 766 cycles CPUID.0xD.10 = 732 cycles CPUID.0xD.11 = 718 cycles CPUID.0xD.12 = 734 cycles CPUID.0xD.13 = 1700 cycles CPUID.0xD.14 = 1126 cycles CPUID.0xD.15 = 898 cycles CPUID.0xD.16 = 716 cycles CPUID.0xD.17 = 748 cycles CPUID.0xD.18 = 776 cycles Note, updating runtime CPUID information multiple times per nested transition is itself a flaw, especially since CPUID is a mandotory intercept on both Intel and AMD. E.g. KVM doesn't need to ensure emulated CPUID state is up-to-date while running L2. That flaw will be fixed in a future patch, as deferring runtime CPUID updates is more subtle than it appears at first glance, the benefits aren't super critical to have once the XSAVE issue is resolved, and caching CPUID output is desirable even if KVM's updates are deferred. Cc: Jim Mattson <jmattson@google.com> Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <seanjc@google.com> Message-ID: <20241211013302.1347853-2-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3154bddf8c
commit
1201f226c8
@ -36,6 +36,26 @@
|
||||
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
|
||||
|
||||
struct cpuid_xstate_sizes {
|
||||
u32 eax;
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
};
|
||||
|
||||
static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
|
||||
|
||||
void __init kvm_init_xstate_sizes(void)
|
||||
{
|
||||
u32 ign;
|
||||
int i;
|
||||
|
||||
for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
|
||||
struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
|
||||
|
||||
cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
|
||||
}
|
||||
}
|
||||
|
||||
u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||
{
|
||||
int feature_bit = 0;
|
||||
@ -44,14 +64,15 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||
xstate_bv &= XFEATURE_MASK_EXTEND;
|
||||
while (xstate_bv) {
|
||||
if (xstate_bv & 0x1) {
|
||||
u32 eax, ebx, ecx, edx, offset;
|
||||
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
|
||||
struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
|
||||
u32 offset;
|
||||
|
||||
/* ECX[1]: 64B alignment in compacted form */
|
||||
if (compacted)
|
||||
offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
||||
offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
||||
else
|
||||
offset = ebx;
|
||||
ret = max(ret, offset + eax);
|
||||
offset = xs->ebx;
|
||||
ret = max(ret, offset + xs->eax);
|
||||
}
|
||||
|
||||
xstate_bv >>= 1;
|
||||
|
@ -31,6 +31,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
||||
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
|
||||
u32 *ecx, u32 *edx, bool exact_only);
|
||||
|
||||
void __init kvm_init_xstate_sizes(void);
|
||||
u32 xstate_required_size(u64 xstate_bv, bool compacted);
|
||||
|
||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||
|
@ -13997,6 +13997,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
|
||||
|
||||
static int __init kvm_x86_init(void)
|
||||
{
|
||||
kvm_init_xstate_sizes();
|
||||
|
||||
kvm_mmu_x86_module_init();
|
||||
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user