Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git

This commit is contained in:
Stephen Rothwell 2022-06-28 08:56:28 +10:00
commit 1c2349ae2d
9 changed files with 369 additions and 139 deletions

View File

@ -443,18 +443,20 @@ asmlinkage void vmread_error(unsigned long field, bool fault)
noinline void vmwrite_error(unsigned long field, unsigned long value)
{
vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",
vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%u\n",
field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
}
noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
{
vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr);
vmx_insn_failed("kvm: vmclear failed: %p/%llx err=%u\n",
vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
}
noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
{
vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr);
vmx_insn_failed("kvm: vmptrld failed: %p/%llx err=%u\n",
vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
}
noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)

View File

@ -3239,10 +3239,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* only 0 or all 1s can be written to IA32_MCi_CTL
* some Linux kernels though clear bit 10 in bank 4 to
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
* this to avoid an uncatched #GP in the guest
* this to avoid an uncatched #GP in the guest.
*
* UNIXWARE clears bit 0 of MC1_CTL to ignore
* correctable, single-bit ECC data errors.
*/
if ((offset & 0x3) == 0 &&
data != 0 && (data | (1 << 10)) != ~(u64)0)
data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
return -1;
/* MCi_STATUS */

View File

@ -19,6 +19,7 @@ struct kvm_memslots;
enum kvm_mr_change;
#include <linux/bits.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>
@ -69,6 +70,7 @@ struct gfn_to_pfn_cache {
struct kvm_vcpu *vcpu;
struct list_head list;
rwlock_t lock;
struct mutex refresh_lock;
void *khva;
kvm_pfn_t pfn;
enum pfn_cache_usage usage;

View File

@ -14,6 +14,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
enum mop_target {
LOGICAL,
@ -691,34 +692,92 @@ static void test_errors(void)
kvm_vm_free(t.kvm_vm);
}
struct testdef {
const char *name;
void (*test)(void);
int extension;
} testlist[] = {
{
.name = "simple copy",
.test = test_copy,
},
{
.name = "generic error checks",
.test = test_errors,
},
{
.name = "copy with storage keys",
.test = test_copy_key,
.extension = 1,
},
{
.name = "copy with key storage protection override",
.test = test_copy_key_storage_prot_override,
.extension = 1,
},
{
.name = "copy with key fetch protection",
.test = test_copy_key_fetch_prot,
.extension = 1,
},
{
.name = "copy with key fetch protection override",
.test = test_copy_key_fetch_prot_override,
.extension = 1,
},
{
.name = "error checks with key",
.test = test_errors_key,
.extension = 1,
},
{
.name = "termination",
.test = test_termination,
.extension = 1,
},
{
.name = "error checks with key storage protection override",
.test = test_errors_key_storage_prot_override,
.extension = 1,
},
{
.name = "error checks without key fetch prot override",
.test = test_errors_key_fetch_prot_override_not_enabled,
.extension = 1,
},
{
.name = "error checks with key fetch prot override",
.test = test_errors_key_fetch_prot_override_enabled,
.extension = 1,
},
};
int main(int argc, char *argv[])
{
int memop_cap, extension_cap;
int memop_cap, extension_cap, idx;
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
ksft_print_header();
memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
if (!memop_cap) {
print_skip("CAP_S390_MEM_OP not supported");
exit(KSFT_SKIP);
ksft_exit_skip("CAP_S390_MEM_OP not supported.\n");
}
test_copy();
if (extension_cap > 0) {
test_copy_key();
test_copy_key_storage_prot_override();
test_copy_key_fetch_prot();
test_copy_key_fetch_prot_override();
test_errors_key();
test_termination();
test_errors_key_storage_prot_override();
test_errors_key_fetch_prot_override_not_enabled();
test_errors_key_fetch_prot_override_enabled();
} else {
print_skip("storage key memop extension not supported");
}
test_errors();
ksft_set_plan(ARRAY_SIZE(testlist));
return 0;
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (testlist[idx].extension >= extension_cap) {
testlist[idx].test();
ksft_test_result_pass("%s\n", testlist[idx].name);
} else {
ksft_test_result_skip("%s - extension level %d not supported\n",
testlist[idx].name,
testlist[idx].extension);
}
}
ksft_finished(); /* Print results and exit() accordingly */
}

View File

@ -12,6 +12,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
#define VCPU_ID 3
#define LOCAL_IRQS 32
@ -202,7 +203,7 @@ static void inject_irq(int cpu_id)
static void test_normal(void)
{
pr_info("Testing normal reset\n");
ksft_print_msg("Testing normal reset\n");
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
run = vcpu_state(vm, VCPU_ID);
@ -225,7 +226,7 @@ static void test_normal(void)
static void test_initial(void)
{
pr_info("Testing initial reset\n");
ksft_print_msg("Testing initial reset\n");
vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
run = vcpu_state(vm, VCPU_ID);
sync_regs = &run->s.regs;
@ -247,7 +248,7 @@ static void test_initial(void)
static void test_clear(void)
{
pr_info("Testing clear reset\n");
ksft_print_msg("Testing clear reset\n");
vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
run = vcpu_state(vm, VCPU_ID);
sync_regs = &run->s.regs;
@ -266,14 +267,35 @@ static void test_clear(void)
kvm_vm_free(vm);
}
struct testdef {
const char *name;
void (*test)(void);
bool needs_cap;
} testlist[] = {
{ "initial", test_initial, false },
{ "normal", test_normal, true },
{ "clear", test_clear, true },
};
int main(int argc, char *argv[])
{
bool has_s390_vcpu_resets = kvm_check_cap(KVM_CAP_S390_VCPU_RESETS);
int idx;
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
test_initial();
if (kvm_check_cap(KVM_CAP_S390_VCPU_RESETS)) {
test_normal();
test_clear();
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (!testlist[idx].needs_cap || has_s390_vcpu_resets) {
testlist[idx].test();
ksft_test_result_pass("%s\n", testlist[idx].name);
} else {
ksft_test_result_skip("%s - no VCPU_RESETS capability\n",
testlist[idx].name);
}
}
return 0;
ksft_finished(); /* Print results and exit() accordingly */
}

View File

@ -21,6 +21,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "diag318_test_handler.h"
#include "kselftest.h"
#define VCPU_ID 5
@ -74,27 +75,9 @@ static void compare_sregs(struct kvm_sregs *left, struct kvm_sync_regs *right)
#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS|KVM_SYNC_DIAG318)
#define INVALID_SYNC_FIELD 0x80000000
int main(int argc, char *argv[])
void test_read_invalid(struct kvm_vm *vm, struct kvm_run *run)
{
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_regs regs;
struct kvm_sregs sregs;
int rv, cap;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
if (!cap) {
print_skip("CAP_SYNC_REGS not supported");
exit(KSFT_SKIP);
}
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
int rv;
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
@ -110,6 +93,11 @@ int main(int argc, char *argv[])
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
}
void test_set_invalid(struct kvm_vm *vm, struct kvm_run *run)
{
int rv;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
@ -125,6 +113,13 @@ int main(int argc, char *argv[])
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
}
void test_req_and_verify_all_valid_regs(struct kvm_vm *vm, struct kvm_run *run)
{
struct kvm_sregs sregs;
struct kvm_regs regs;
int rv;
/* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
@ -146,6 +141,13 @@ int main(int argc, char *argv[])
vcpu_sregs_get(vm, VCPU_ID, &sregs);
compare_sregs(&sregs, &run->s.regs);
}
void test_set_and_verify_various_reg_values(struct kvm_vm *vm, struct kvm_run *run)
{
struct kvm_sregs sregs;
struct kvm_regs regs;
int rv;
/* Set and verify various register values */
run->s.regs.gprs[11] = 0xBAD1DEA;
@ -180,6 +182,11 @@ int main(int argc, char *argv[])
vcpu_sregs_get(vm, VCPU_ID, &sregs);
compare_sregs(&sregs, &run->s.regs);
}
void test_clear_kvm_dirty_regs_bits(struct kvm_vm *vm, struct kvm_run *run)
{
int rv;
/* Clear kvm_dirty_regs bits, verify new s.regs values are
* overwritten with existing guest values.
@ -200,8 +207,46 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.diag318 != 0x4B1D,
"diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318);
}
struct testdef {
const char *name;
void (*test)(struct kvm_vm *vm, struct kvm_run *run);
} testlist[] = {
{ "read invalid", test_read_invalid },
{ "set invalid", test_set_invalid },
{ "request+verify all valid regs", test_req_and_verify_all_valid_regs },
{ "set+verify various regs", test_set_and_verify_various_reg_values },
{ "clear kvm_dirty_regs bits", test_clear_kvm_dirty_regs_bits },
};
int main(int argc, char *argv[])
{
static struct kvm_run *run;
static struct kvm_vm *vm;
int idx;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
ksft_print_header();
if (!kvm_check_cap(KVM_CAP_SYNC_REGS))
ksft_exit_skip("CAP_SYNC_REGS not supported");
ksft_set_plan(ARRAY_SIZE(testlist));
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
testlist[idx].test(vm, run);
ksft_test_result_pass("%s\n", testlist[idx].name);
}
kvm_vm_free(vm);
return 0;
ksft_finished(); /* Print results and exit() accordingly */
}

View File

@ -8,6 +8,7 @@
#include <sys/mman.h>
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
@ -63,12 +64,12 @@ static enum permission test_protection(void *addr, uint8_t key)
}
enum stage {
STAGE_END,
STAGE_INIT_SIMPLE,
TEST_SIMPLE,
STAGE_INIT_FETCH_PROT_OVERRIDE,
TEST_FETCH_PROT_OVERRIDE,
TEST_STORAGE_PROT_OVERRIDE,
STAGE_END /* must be the last entry (it's the amount of tests) */
};
struct test {
@ -182,7 +183,7 @@ static void guest_code(void)
GUEST_SYNC(perform_next_stage(&i, mapped_0));
}
#define HOST_SYNC(vmp, stage) \
#define HOST_SYNC_NO_TAP(vmp, stage) \
({ \
struct kvm_vm *__vm = (vmp); \
struct ucall uc; \
@ -198,12 +199,21 @@ static void guest_code(void)
ASSERT_EQ(uc.args[1], __stage); \
})
#define HOST_SYNC(vmp, stage) \
({ \
HOST_SYNC_NO_TAP(vmp, stage); \
ksft_test_result_pass("" #stage "\n"); \
})
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_run *run;
vm_vaddr_t guest_0_page;
ksft_print_header();
ksft_set_plan(STAGE_END);
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
@ -212,9 +222,14 @@ int main(int argc, char *argv[])
HOST_SYNC(vm, TEST_SIMPLE);
guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
if (guest_0_page != 0)
print_skip("Did not allocate page at 0 for fetch protection override tests");
HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
if (guest_0_page != 0) {
/* Use NO_TAP so we don't get a PASS print */
HOST_SYNC_NO_TAP(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - "
"Did not allocate page at 0\n");
} else {
HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
}
if (guest_0_page == 0)
mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
@ -224,4 +239,8 @@ int main(int argc, char *argv[])
run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(vm, TEST_STORAGE_PROT_OVERRIDE);
kvm_vm_free(vm);
ksft_finished(); /* Print results and exit() accordingly */
}

View File

@ -724,6 +724,15 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
kvm->mn_active_invalidate_count++;
spin_unlock(&kvm->mn_invalidate_lock);
/*
* Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
* before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
* each cache's lock. There are relatively few caches in existence at
* any given time, and the caches themselves can check for hva overlap,
* i.e. don't need to rely on memslot overlap checks for performance.
* Because this runs without holding mmu_lock, the pfn caches must use
* mn_active_invalidate_count (see above) instead of mmu_notifier_count.
*/
gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
hva_range.may_block);

View File

@ -95,48 +95,143 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa)
static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
{
/* Unmap the old page if it was mapped before, and release it */
if (!is_error_noslot_pfn(pfn)) {
if (khva) {
if (pfn_valid(pfn))
kunmap(pfn_to_page(pfn));
/* Unmap the old pfn/page if it was mapped before. */
if (!is_error_noslot_pfn(pfn) && khva) {
if (pfn_valid(pfn))
kunmap(pfn_to_page(pfn));
#ifdef CONFIG_HAS_IOMEM
else
memunmap(khva);
else
memunmap(khva);
#endif
}
kvm_release_pfn(pfn, false);
}
}
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
{
/*
* mn_active_invalidate_count acts for all intents and purposes
* like mmu_notifier_count here; but the latter cannot be used
* here because the invalidation of caches in the mmu_notifier
* event occurs _before_ mmu_notifier_count is elevated.
*
* Note, it does not matter that mn_active_invalidate_count
* is not protected by gpc->lock. It is guaranteed to
* be elevated before the mmu_notifier acquires gpc->lock, and
* isn't dropped until after mmu_notifier_seq is updated.
*/
if (kvm->mn_active_invalidate_count)
return true;
/*
* Ensure mn_active_invalidate_count is read before
* mmu_notifier_seq. This pairs with the smp_wmb() in
* mmu_notifier_invalidate_range_end() to guarantee either the
* old (non-zero) value of mn_active_invalidate_count or the
* new (incremented) value of mmu_notifier_seq is observed.
*/
smp_rmb();
return kvm->mmu_notifier_seq != mmu_seq;
}
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
/* Note, the new page offset may be different than the old! */
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
void *new_khva = NULL;
unsigned long mmu_seq;
kvm_pfn_t new_pfn;
int retry;
lockdep_assert_held(&gpc->refresh_lock);
lockdep_assert_held_write(&gpc->lock);
/*
* Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
* assets have already been updated and so a concurrent check() from a
* different task may not fail the gpa/uhva/generation checks.
*/
gpc->valid = false;
do {
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
write_unlock_irq(&gpc->lock);
/*
* If the previous iteration "failed" due to an mmu_notifier
* event, release the pfn and unmap the kernel virtual address
* from the previous attempt. Unmapping might sleep, so this
* needs to be done after dropping the lock. Opportunistically
* check for resched while the lock isn't held.
*/
if (new_pfn != KVM_PFN_ERR_FAULT) {
/*
* Keep the mapping if the previous iteration reused
* the existing mapping and didn't create a new one.
*/
if (new_khva != old_khva)
gpc_unmap_khva(kvm, new_pfn, new_khva);
kvm_release_pfn_clean(new_pfn);
cond_resched();
}
/* We always request a writeable mapping */
new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL);
new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
if (is_error_noslot_pfn(new_pfn))
break;
goto out_error;
KVM_MMU_READ_LOCK(kvm);
retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva);
KVM_MMU_READ_UNLOCK(kvm);
if (!retry)
break;
/*
* Obtain a new kernel mapping if KVM itself will access the
* pfn. Note, kmap() and memremap() can both sleep, so this
* too must be done outside of gpc->lock!
*/
if (gpc->usage & KVM_HOST_USES_PFN) {
if (new_pfn == gpc->pfn) {
new_khva = old_khva;
} else if (pfn_valid(new_pfn)) {
new_khva = kmap(pfn_to_page(new_pfn));
#ifdef CONFIG_HAS_IOMEM
} else {
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
if (!new_khva) {
kvm_release_pfn_clean(new_pfn);
goto out_error;
}
}
cond_resched();
} while (1);
write_lock_irq(&gpc->lock);
return new_pfn;
/*
* Other tasks must wait for _this_ refresh to complete before
* attempting to refresh.
*/
WARN_ON_ONCE(gpc->valid);
} while (mmu_notifier_retry_cache(kvm, mmu_seq));
gpc->valid = true;
gpc->pfn = new_pfn;
gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
/*
* Put the reference to the _new_ pfn. The pfn is now tracked by the
* cache and can be safely migrated, swapped, etc... as the cache will
* invalidate any mappings in response to relevant mmu_notifier events.
*/
kvm_release_pfn_clean(new_pfn);
return 0;
out_error:
write_lock_irq(&gpc->lock);
return -EFAULT;
}
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
@ -146,9 +241,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
unsigned long page_offset = gpa & ~PAGE_MASK;
kvm_pfn_t old_pfn, new_pfn;
unsigned long old_uhva;
gpa_t old_gpa;
void *old_khva;
bool old_valid;
int ret = 0;
/*
@ -158,13 +251,18 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
if (page_offset + len > PAGE_SIZE)
return -EINVAL;
/*
* If another task is refreshing the cache, wait for it to complete.
* There is no guarantee that concurrent refreshes will see the same
* gpa, memslots generation, etc..., so they must be fully serialized.
*/
mutex_lock(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
old_gpa = gpc->gpa;
old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
old_valid = gpc->valid;
/* If the userspace HVA is invalid, refresh that first */
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
@ -177,64 +275,17 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
if (kvm_is_error_hva(gpc->uhva)) {
gpc->pfn = KVM_PFN_ERR_FAULT;
ret = -EFAULT;
goto out;
}
gpc->uhva += page_offset;
}
/*
* If the userspace HVA changed or the PFN was already invalid,
* drop the lock and do the HVA to PFN lookup again.
*/
if (!old_valid || old_uhva != gpc->uhva) {
unsigned long uhva = gpc->uhva;
void *new_khva = NULL;
/* Placeholders for "hva is valid but not yet mapped" */
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->khva = NULL;
gpc->valid = true;
write_unlock_irq(&gpc->lock);
new_pfn = hva_to_pfn_retry(kvm, uhva);
if (is_error_noslot_pfn(new_pfn)) {
ret = -EFAULT;
goto map_done;
}
if (gpc->usage & KVM_HOST_USES_PFN) {
if (new_pfn == old_pfn) {
new_khva = old_khva;
old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL;
} else if (pfn_valid(new_pfn)) {
new_khva = kmap(pfn_to_page(new_pfn));
#ifdef CONFIG_HAS_IOMEM
} else {
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
if (new_khva)
new_khva += page_offset;
else
ret = -EFAULT;
}
map_done:
write_lock_irq(&gpc->lock);
if (ret) {
gpc->valid = false;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->khva = NULL;
} else {
/* At this point, gpc->valid may already have been cleared */
gpc->pfn = new_pfn;
gpc->khva = new_khva;
}
if (!gpc->valid || old_uhva != gpc->uhva) {
ret = hva_to_pfn_retry(kvm, gpc);
} else {
/* If the HVA→PFN mapping was already valid, don't unmap it. */
old_pfn = KVM_PFN_ERR_FAULT;
@ -242,9 +293,26 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
}
out:
/*
* Invalidate the cache and purge the pfn/khva if the refresh failed.
* Some/all of the uhva, gpa, and memslot generation info may still be
* valid, leave it as is.
*/
if (ret) {
gpc->valid = false;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->khva = NULL;
}
/* Snapshot the new pfn before dropping the lock! */
new_pfn = gpc->pfn;
write_unlock_irq(&gpc->lock);
__release_gpc(kvm, old_pfn, old_khva, old_gpa);
mutex_unlock(&gpc->refresh_lock);
if (old_pfn != new_pfn)
gpc_unmap_khva(kvm, old_pfn, old_khva);
return ret;
}
@ -254,14 +322,13 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
void *old_khva;
kvm_pfn_t old_pfn;
gpa_t old_gpa;
mutex_lock(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
gpc->valid = false;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_gpa = gpc->gpa;
old_pfn = gpc->pfn;
/*
@ -272,8 +339,9 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->pfn = KVM_PFN_ERR_FAULT;
write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock);
__release_gpc(kvm, old_pfn, old_khva, old_gpa);
gpc_unmap_khva(kvm, old_pfn, old_khva);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
@ -286,6 +354,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
if (!gpc->active) {
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
gpc->khva = NULL;
gpc->pfn = KVM_PFN_ERR_FAULT;