mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 22:05:08 +00:00
LoongArch: KVM: Add PV steal time support in host side
Add ParaVirt steal time feature in host side, VM can search supported features provided by KVM hypervisor, a feature KVM_FEATURE_STEAL_TIME is added here. Like x86, steal time structure is saved in guest memory, one hypercall function KVM_HCALL_FUNC_NOTIFY is added to notify KVM to enable this feature. One CPU attr ioctl command KVM_LOONGARCH_VCPU_PVTIME_CTRL is added to save and restore the base address of steal time structure when a VM is migrated. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
parent
d7ad41a31d
commit
b4ba157044
@ -31,6 +31,7 @@
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
|
||||
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
|
||||
|
||||
#define KVM_GUESTDBG_SW_BP_MASK \
|
||||
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
|
||||
@ -206,6 +207,13 @@ struct kvm_vcpu_arch {
|
||||
struct kvm_mp_state mp_state;
|
||||
/* cpucfg */
|
||||
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
|
||||
|
||||
/* paravirt steal time */
|
||||
struct {
|
||||
u64 guest_addr;
|
||||
u64 last_steal;
|
||||
struct gfn_to_hva_cache cache;
|
||||
} st;
|
||||
};
|
||||
|
||||
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
|
||||
#define KVM_HCALL_FUNC_IPI 1
|
||||
#define KVM_HCALL_FUNC_NOTIFY 2
|
||||
|
||||
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
|
||||
|
||||
@ -24,6 +25,16 @@
|
||||
#define KVM_HCALL_INVALID_CODE -1UL
|
||||
#define KVM_HCALL_INVALID_PARAMETER -2UL
|
||||
|
||||
#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
|
||||
#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
|
||||
|
||||
struct kvm_steal_time {
|
||||
__u64 steal;
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u32 pad[12];
|
||||
};
|
||||
|
||||
/*
|
||||
* Hypercall interface for KVM hypervisor
|
||||
*
|
||||
|
@ -120,4 +120,9 @@ static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long v
|
||||
vcpu->arch.gprs[num] = val;
|
||||
}
|
||||
|
||||
static inline bool kvm_pvtime_supported(void)
|
||||
{
|
||||
return !!sched_info_on();
|
||||
}
|
||||
|
||||
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
|
||||
|
@ -169,6 +169,7 @@
|
||||
#define KVM_SIGNATURE "KVM\0"
|
||||
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
|
||||
#define KVM_FEATURE_IPI BIT(1)
|
||||
#define KVM_FEATURE_STEAL_TIME BIT(2)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -81,7 +81,11 @@ struct kvm_fpu {
|
||||
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
|
||||
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
|
||||
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
|
||||
|
||||
/* Device Control API on vcpu fd */
|
||||
#define KVM_LOONGARCH_VCPU_CPUCFG 0
|
||||
#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
|
||||
#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
@ -29,6 +29,7 @@ config KVM
|
||||
select KVM_MMIO
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
select KVM_XFER_TO_GUEST_WORK
|
||||
select SCHED_INFO
|
||||
help
|
||||
Support hosting virtualized guest machines using
|
||||
hardware virtualization extensions. You will need
|
||||
|
@ -24,7 +24,7 @@
|
||||
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
{
|
||||
int rd, rj;
|
||||
unsigned int index;
|
||||
unsigned int index, ret;
|
||||
|
||||
if (inst.reg2_format.opcode != cpucfg_op)
|
||||
return EMULATE_FAIL;
|
||||
@ -50,7 +50,10 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
|
||||
break;
|
||||
case CPUCFG_KVM_FEATURE:
|
||||
vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
|
||||
ret = KVM_FEATURE_IPI;
|
||||
if (kvm_pvtime_supported())
|
||||
ret |= KVM_FEATURE_STEAL_TIME;
|
||||
vcpu->arch.gprs[rd] = ret;
|
||||
break;
|
||||
default:
|
||||
vcpu->arch.gprs[rd] = 0;
|
||||
@ -687,6 +690,34 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
static long kvm_save_notify(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long id, data;
|
||||
|
||||
id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
|
||||
data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
|
||||
switch (id) {
|
||||
case KVM_FEATURE_STEAL_TIME:
|
||||
if (!kvm_pvtime_supported())
|
||||
return KVM_HCALL_INVALID_CODE;
|
||||
|
||||
if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
|
||||
return KVM_HCALL_INVALID_PARAMETER;
|
||||
|
||||
vcpu->arch.st.guest_addr = data;
|
||||
if (!(data & KVM_STEAL_PHYS_VALID))
|
||||
break;
|
||||
|
||||
vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/*
|
||||
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
|
||||
* @vcpu: Virtual CPU context.
|
||||
@ -758,6 +789,9 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
|
||||
kvm_send_pv_ipi(vcpu);
|
||||
ret = KVM_HCALL_SUCCESS;
|
||||
break;
|
||||
case KVM_HCALL_FUNC_NOTIFY:
|
||||
ret = kvm_save_notify(vcpu);
|
||||
break;
|
||||
default:
|
||||
ret = KVM_HCALL_INVALID_CODE;
|
||||
break;
|
||||
|
@ -31,6 +31,50 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
|
||||
sizeof(kvm_vcpu_stats_desc),
|
||||
};
|
||||
|
||||
static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 version;
|
||||
u64 steal;
|
||||
gpa_t gpa;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_steal_time __user *st;
|
||||
struct gfn_to_hva_cache *ghc;
|
||||
|
||||
ghc = &vcpu->arch.st.cache;
|
||||
gpa = vcpu->arch.st.guest_addr;
|
||||
if (!(gpa & KVM_STEAL_PHYS_VALID))
|
||||
return;
|
||||
|
||||
gpa &= KVM_STEAL_PHYS_MASK;
|
||||
slots = kvm_memslots(vcpu->kvm);
|
||||
if (slots->generation != ghc->generation || gpa != ghc->gpa) {
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
|
||||
ghc->gpa = INVALID_GPA;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
st = (struct kvm_steal_time __user *)ghc->hva;
|
||||
unsafe_get_user(version, &st->version, out);
|
||||
if (version & 1)
|
||||
version += 1; /* first time write, random junk */
|
||||
|
||||
version += 1;
|
||||
unsafe_put_user(version, &st->version, out);
|
||||
smp_wmb();
|
||||
|
||||
unsafe_get_user(steal, &st->steal, out);
|
||||
steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
|
||||
vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
||||
unsafe_put_user(steal, &st->steal, out);
|
||||
|
||||
smp_wmb();
|
||||
version += 1;
|
||||
unsafe_put_user(version, &st->version, out);
|
||||
out:
|
||||
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
|
||||
}
|
||||
|
||||
/*
|
||||
* kvm_check_requests - check and handle pending vCPU requests
|
||||
*
|
||||
@ -48,6 +92,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
|
||||
if (kvm_dirty_ring_check_request(vcpu))
|
||||
return RESUME_HOST;
|
||||
|
||||
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
|
||||
kvm_update_stolen_time(vcpu);
|
||||
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
@ -690,6 +737,16 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
if (!kvm_pvtime_supported() ||
|
||||
attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
|
||||
return -ENXIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
@ -699,6 +756,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
|
||||
case KVM_LOONGARCH_VCPU_CPUCFG:
|
||||
ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
|
||||
break;
|
||||
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
|
||||
ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -706,7 +766,7 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
|
||||
static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -722,6 +782,23 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
u64 gpa;
|
||||
u64 __user *user = (u64 __user *)attr->addr;
|
||||
|
||||
if (!kvm_pvtime_supported() ||
|
||||
attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
|
||||
return -ENXIO;
|
||||
|
||||
gpa = vcpu->arch.st.guest_addr;
|
||||
if (put_user(gpa, user))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
@ -729,7 +806,10 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_LOONGARCH_VCPU_CPUCFG:
|
||||
ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
|
||||
ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
|
||||
break;
|
||||
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
|
||||
ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -744,6 +824,43 @@ static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int idx, ret = 0;
|
||||
u64 gpa, __user *user = (u64 __user *)attr->addr;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (!kvm_pvtime_supported() ||
|
||||
attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
|
||||
return -ENXIO;
|
||||
|
||||
if (get_user(gpa, user))
|
||||
return -EFAULT;
|
||||
|
||||
if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(gpa & KVM_STEAL_PHYS_VALID)) {
|
||||
vcpu->arch.st.guest_addr = gpa;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check the address is in a valid memslot */
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
|
||||
ret = -EINVAL;
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
if (!ret) {
|
||||
vcpu->arch.st.guest_addr = gpa;
|
||||
vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
@ -753,6 +870,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
|
||||
case KVM_LOONGARCH_VCPU_CPUCFG:
|
||||
ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
|
||||
break;
|
||||
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
|
||||
ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1113,6 +1233,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
/* Control guest page CCA attribute */
|
||||
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
|
||||
/* Don't bother restoring registers multiple times unless necessary */
|
||||
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
|
||||
|
Loading…
x
Reference in New Issue
Block a user