RISC-V: KVM: Use SBI sync SRET call when available

Implement an optimized KVM world-switch using SBI sync SRET call
when SBI nested acceleration extension is available. This improves
KVM world-switch when KVM RISC-V is running as a Guest under some
other hypervisor.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20241020194734.58686-12-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Anup Patel 2024-10-21 01:17:32 +05:30 committed by Anup Patel
parent dab55604ae
commit 68c72a6557
3 changed files with 79 additions and 4 deletions

View File

@ -12,6 +12,8 @@
#include <asm/csr.h>
#include <asm/sbi.h>
struct kvm_vcpu_arch;
DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
#define kvm_riscv_nacl_available() \
static_branch_unlikely(&kvm_riscv_nacl_available)
@ -43,6 +45,10 @@ void __kvm_riscv_nacl_hfence(void *shmem,
unsigned long page_num,
unsigned long page_count);
void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch,
unsigned long sbi_ext_id,
unsigned long sbi_func_id);
int kvm_riscv_nacl_enable(void);
void kvm_riscv_nacl_disable(void);

View File

@ -766,19 +766,59 @@ static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *v
*/
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
void *nsh;
struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context;
struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;
kvm_riscv_vcpu_swap_in_guest_state(vcpu);
guest_state_enter_irqoff();
hcntx->hstatus = ncsr_swap(CSR_HSTATUS, gcntx->hstatus);
if (kvm_riscv_nacl_sync_sret_available()) {
nsh = nacl_shmem();
nsync_csr(-1UL);
if (kvm_riscv_nacl_autoswap_csr_available()) {
hcntx->hstatus =
nacl_csr_read(nsh, CSR_HSTATUS);
nacl_scratch_write_long(nsh,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
SBI_NACL_SHMEM_AUTOSWAP_HSTATUS,
gcntx->hstatus);
nacl_scratch_write_long(nsh,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS);
} else if (kvm_riscv_nacl_sync_csr_available()) {
hcntx->hstatus = nacl_csr_swap(nsh,
CSR_HSTATUS, gcntx->hstatus);
} else {
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
}
__kvm_riscv_switch_to(&vcpu->arch);
nacl_scratch_write_longs(nsh,
SBI_NACL_SHMEM_SRET_OFFSET +
SBI_NACL_SHMEM_SRET_X(1),
&gcntx->ra,
SBI_NACL_SHMEM_SRET_X_LAST);
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
__kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL,
SBI_EXT_NACL_SYNC_SRET);
if (kvm_riscv_nacl_autoswap_csr_available()) {
nacl_scratch_write_long(nsh,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
0);
gcntx->hstatus = nacl_scratch_read_long(nsh,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
SBI_NACL_SHMEM_AUTOSWAP_HSTATUS);
} else {
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
}
} else {
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
__kvm_riscv_switch_to(&vcpu->arch);
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
}
vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();

View File

@ -218,6 +218,35 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
ret
SYM_FUNC_END(__kvm_riscv_switch_to)
/*
* Parameters:
* A0 <= Pointer to struct kvm_vcpu_arch
* A1 <= SBI extension ID
* A2 <= SBI function ID
*/
SYM_FUNC_START(__kvm_riscv_nacl_switch_to)
SAVE_HOST_GPRS
SAVE_HOST_AND_RESTORE_GUEST_CSRS .Lkvm_nacl_switch_return
/* Resume Guest using SBI nested acceleration */
add a6, a2, zero
add a7, a1, zero
ecall
/* Back to Host */
.align 2
.Lkvm_nacl_switch_return:
SAVE_GUEST_GPRS
SAVE_GUEST_AND_RESTORE_HOST_CSRS
RESTORE_HOST_GPRS
/* Return to C code */
ret
SYM_FUNC_END(__kvm_riscv_nacl_switch_to)
SYM_CODE_START(__kvm_riscv_unpriv_trap)
/*
* We assume that faulting unpriv load/store instruction is