mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
KVM/arm64 fixes for 6.4, take #2
- Address some fallout of the locking rework, this time affecting the way the vgic is configured - Fix an issue where the page table walker frees a subtree and then proceeds with walking what it has just freed... - Check that a given PA donated to the gues is actually memory (only affecting pKVM) - Correctly handle MTE CMOs by Set/Way -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmRuCOgPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpD08wP/1g00EJei1G0ZHDhoWI/F7axx2ibmoEU1dkn BYVgwC9y51O4h+sfMZw1nLLtOw65EPCMspAxgSxMXMBW09A2wRvj9HbKnlrtLi1W Y3pkTuJwN4o8YX/R5pdMtt0gNI2odlq7YokXpYB5+CbmTZa1P12eZz21t3e+CQh9 dkSW/zPP4ihnSYtB/47OrRGYpyv6HKwPjU9VtRbKcbomO0SYPAiizyEy2IE3UABK nBvt7mMXnlPpkLmhWwKYj4QuOoB3H48xXf059hbZLNcGWhqtQloE7TvvqILQ7PJW n68r6cnmZpW/+so9Wn8+/iznFf5C/YyXUEx4NiZtm395VyJRe1LCCP07soBvlwHh x+wPm2xDrHasFJeEg2f8wXpqM4LByHfyIV4KMdPg2dUMW8jkglETDyFWd4i0QzbH Sovfkt2AnSJ/soUXlkZA+f5bf023Q5zXDBBlNZyxXf24fynglVdaKiRXbzutqt+s pKAniy2UxHf9Yz0M9hR+LeJ8Cvchnq+7+ToEo5B1em9g17ngGnKzrWt22F57b8Ro U3ZSLQedob5G8ykt4jEJuBlKwf6bDijwRlpwo8aCfMzs2iqthOJx/wjcj8HMONRF A0stENROnCF2sz5ThJXfXNtLZk2gLrTk0p9EPhU7qDquKzTURauzI/uyFBsgeRxD 1RcxKUMd =Uwqe -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-6.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 6.4, take #2 - Address some fallout of the locking rework, this time affecting the way the vgic is configured - Fix an issue where the page table walker frees a subtree and then proceeds with walking what it has just freed... - Check that a given PA donated to the gues is actually memory (only affecting pKVM) - Correctly handle MTE CMOs by Set/Way
This commit is contained in:
commit
26f3149880
@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
*
|
||||
* The walker will walk the page-table entries corresponding to the input
|
||||
* address range specified, visiting entries according to the walker flags.
|
||||
* Invalid entries are treated as leaf entries. Leaf entries are reloaded
|
||||
* after invoking the walker callback, allowing the walker to descend into
|
||||
* a newly installed table.
|
||||
* Invalid entries are treated as leaf entries. The visited page table entry is
|
||||
* reloaded after invoking the walker callback, allowing the walker to descend
|
||||
* into a newly installed table.
|
||||
*
|
||||
* Returning a negative error code from the walker callback function will
|
||||
* terminate the walk immediately with the same error code.
|
||||
|
@ -115,8 +115,14 @@
|
||||
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
|
||||
|
||||
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
|
||||
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
|
||||
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
|
||||
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
|
||||
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
|
||||
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
|
||||
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
|
||||
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
|
||||
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
|
||||
|
||||
/*
|
||||
* Automatically generated definitions for system registers, the
|
||||
|
@ -575,7 +575,7 @@ struct pkvm_mem_donation {
|
||||
|
||||
struct check_walk_data {
|
||||
enum pkvm_page_state desired;
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
|
||||
};
|
||||
|
||||
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
{
|
||||
struct check_walk_data *d = ctx->arg;
|
||||
|
||||
if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
|
||||
return -EINVAL;
|
||||
|
||||
return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
|
||||
return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
|
||||
}
|
||||
|
||||
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
return kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
|
||||
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
{
|
||||
if (!addr_is_allowed_memory(addr))
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
if (!kvm_pte_valid(pte) && pte)
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx
|
||||
return host_stage2_set_owner_locked(addr, size, host_id);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
{
|
||||
if (!kvm_pte_valid(pte))
|
||||
return PKVM_NOPAGE;
|
||||
|
@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
||||
.flags = flags,
|
||||
};
|
||||
int ret = 0;
|
||||
bool reload = false;
|
||||
kvm_pteref_t childp;
|
||||
bool table = kvm_pte_table(ctx.old, level);
|
||||
|
||||
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
|
||||
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
|
||||
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
|
||||
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reload the page table after invoking the walker callback for leaf
|
||||
* entries or after pre-order traversal, to allow the walker to descend
|
||||
* into a newly installed or replaced table.
|
||||
*/
|
||||
if (reload) {
|
||||
ctx.old = READ_ONCE(*ptep);
|
||||
table = kvm_pte_table(ctx.old, level);
|
||||
}
|
||||
|
@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_dcgsw(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (!kvm_has_mte(vcpu->kvm)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
|
||||
return access_dcsw(vcpu, p, r);
|
||||
}
|
||||
|
||||
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
|
||||
{
|
||||
switch (r->aarch32_map) {
|
||||
@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
||||
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(0),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(1),
|
||||
|
@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
* KVM io device for the redistributor that belongs to this VCPU.
|
||||
*/
|
||||
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
mutex_lock(&vcpu->kvm->slots_lock);
|
||||
ret = vgic_register_redist_iodev(vcpu);
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
mutex_unlock(&vcpu->kvm->slots_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
|
||||
|
||||
/**
|
||||
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by userspace using the
|
||||
* KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
|
||||
* @kvm: kvm struct pointer
|
||||
*/
|
||||
@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
|
||||
int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
gpa_t dist_base;
|
||||
int ret = 0;
|
||||
|
||||
if (likely(vgic_ready(kvm)))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
else
|
||||
ret = vgic_v3_map_resources(kvm);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
__kvm_vgic_destroy(kvm);
|
||||
else
|
||||
dist->ready = true;
|
||||
goto out;
|
||||
}
|
||||
dist->ready = true;
|
||||
dist_base = dist->vgic_dist_base;
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist_base,
|
||||
kvm_vgic_global_state.type);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC dist MMIO regions\n");
|
||||
kvm_vgic_destroy(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
|
||||
|
||||
static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
{
|
||||
int ret;
|
||||
struct vgic_its *its;
|
||||
|
||||
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
|
||||
@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
if (!its)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
|
||||
if (vgic_initialized(dev->kvm)) {
|
||||
int ret = vgic_v4_init(dev->kvm);
|
||||
ret = vgic_v4_init(dev->kvm);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
kfree(its);
|
||||
return ret;
|
||||
}
|
||||
@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
|
||||
/* Yep, even more trickery for lock ordering... */
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
mutex_lock(&its->cmd_lock);
|
||||
mutex_lock(&its->its_lock);
|
||||
mutex_unlock(&its->its_lock);
|
||||
mutex_unlock(&its->cmd_lock);
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
#endif
|
||||
|
||||
its->vgic_its_base = VGIC_ADDR_UNDEF;
|
||||
@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
|
||||
dev->private = its;
|
||||
|
||||
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
|
||||
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
|
||||
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_its_destroy(struct kvm_device *kvm_dev)
|
||||
|
@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
||||
if (get_user(addr, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
/*
|
||||
* Since we can't hold config_lock while registering the redistributor
|
||||
* iodevs, take the slots_lock immediately.
|
||||
*/
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
switch (attr->attr) {
|
||||
case KVM_VGIC_V2_ADDR_TYPE_DIST:
|
||||
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (write) {
|
||||
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
|
||||
if (!r)
|
||||
@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
||||
} else {
|
||||
addr = *addr_ptr;
|
||||
}
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (!r && !write)
|
||||
r = put_user(addr, uaddr);
|
||||
|
@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
|
||||
struct vgic_redist_region *rdreg;
|
||||
gpa_t rd_base;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* We may be creating VCPUs before having set the base address for the
|
||||
@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
|
||||
if (!rdreg)
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
if (!vgic_v3_check_base(kvm))
|
||||
return -EINVAL;
|
||||
if (!vgic_v3_check_base(kvm)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
vgic_cpu->rdreg = rdreg;
|
||||
vgic_cpu->rdreg_index = rdreg->free_index;
|
||||
@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
|
||||
rd_dev->redist_vcpu = vcpu;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
|
||||
2 * SZ_64K, &rd_dev->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Protected by slots_lock */
|
||||
rdreg->free_index++;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
|
||||
/* The current c failed, so iterate over the previous ones. */
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
for (i = 0; i < c; i++) {
|
||||
vcpu = kvm_get_vcpu(kvm, i);
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
||||
if (ret) {
|
||||
struct vgic_redist_region *rdreg;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
|
||||
vgic_v3_free_redist_region(rdreg);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
||||
enum vgic_type type)
|
||||
{
|
||||
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
|
||||
int ret = 0;
|
||||
unsigned int len;
|
||||
|
||||
switch (type) {
|
||||
@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
||||
io_device->iodev_type = IODEV_DIST;
|
||||
io_device->redist_vcpu = NULL;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
|
||||
len, &io_device->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
return ret;
|
||||
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
|
||||
len, &io_device->dev);
|
||||
}
|
||||
|
@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC MMIO regions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
|
||||
kvm_vgic_global_state.vcpu_base,
|
||||
|
@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int ret = 0;
|
||||
unsigned long c;
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (kvm_vgic_global_state.has_gicv4_1)
|
||||
vgic_v4_configure_vsgis(kvm);
|
||||
|
||||
|
@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with the kvm lock held */
|
||||
void vgic_v4_configure_vsgis(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
kvm_arm_halt_guest(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
|
Loading…
Reference in New Issue
Block a user