Merge branch kvm-arm64/smccc-filter-cleanups into kvmarm/next

* kvm-arm64/smccc-filter-cleanups:
  : Cleanup the management of KVM's SMCCC maple tree
  :
  : Avoid the cost of maintaining the SMCCC filter maple tree if userspace
  : hasn't writen a rule to the filter. While at it, rip out the now
  : unnecessary VM flag to indicate whether or not the SMCCC filter was
  : configured.
  KVM: arm64: Use mtree_empty() to determine if SMCCC filter configured
  KVM: arm64: Only insert reserved ranges when SMCCC filter is used
  KVM: arm64: Add a predicate for testing if SMCCC filter is configured

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2023-10-30 20:18:37 +00:00
commit 25a35c1a3d
2 changed files with 23 additions and 15 deletions

View File

@ -239,10 +239,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
/* Timer PPIs made immutable */
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
/* SMCCC filter initialized for the VM */
#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 7
/* Initial ID reg values loaded */
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 8
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
unsigned long flags;
/* VM-wide vCPU feature set */

View File

@ -133,12 +133,10 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
ARM_SMCCC_SMC_64, \
0, ARM_SMCCC_FUNC_MASK)
static void init_smccc_filter(struct kvm *kvm)
static int kvm_smccc_filter_insert_reserved(struct kvm *kvm)
{
int r;
mt_init(&kvm->arch.smccc_filter);
/*
* Prevent userspace from handling any SMCCC calls in the architecture
* range, avoiding the risk of misrepresenting Spectre mitigation status
@ -148,14 +146,25 @@ static void init_smccc_filter(struct kvm *kvm)
SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
GFP_KERNEL_ACCOUNT);
WARN_ON_ONCE(r);
if (r)
goto out_destroy;
r = mtree_insert_range(&kvm->arch.smccc_filter,
SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
GFP_KERNEL_ACCOUNT);
WARN_ON_ONCE(r);
if (r)
goto out_destroy;
return 0;
out_destroy:
mtree_destroy(&kvm->arch.smccc_filter);
return r;
}
static bool kvm_smccc_filter_configured(struct kvm *kvm)
{
return !mtree_empty(&kvm->arch.smccc_filter);
}
static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
@ -184,13 +193,14 @@ static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user
goto out_unlock;
}
if (!kvm_smccc_filter_configured(kvm)) {
r = kvm_smccc_filter_insert_reserved(kvm);
if (WARN_ON_ONCE(r))
goto out_unlock;
}
r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
if (r)
goto out_unlock;
set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return r;
@ -201,7 +211,7 @@ static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
unsigned long idx = func_id;
void *val;
if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags))
if (!kvm_smccc_filter_configured(kvm))
return KVM_SMCCC_FILTER_HANDLE;
/*
@ -387,7 +397,7 @@ void kvm_arm_init_hypercalls(struct kvm *kvm)
smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
init_smccc_filter(kvm);
mt_init(&kvm->arch.smccc_filter);
}
void kvm_arm_teardown_hypercalls(struct kvm *kvm)