mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 05:45:20 +00:00
Merge branch 'topic/ppc-kvm' into next
Merge one more commit from the topic branch we shared with the kvm-ppc tree. This brings in a fix to the code that scans for dirty pages during migration of a VM, which was incorrectly triggering a warning.
This commit is contained in:
commit
1395375c59
@ -635,6 +635,16 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
|
||||
unsigned long gpa, unsigned long hpa,
|
||||
unsigned long nbytes);
|
||||
|
||||
static inline pte_t *
|
||||
find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
|
||||
unsigned *hshift)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
|
||||
unsigned *hshift)
|
||||
{
|
||||
|
@ -1040,7 +1040,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
|
||||
{
|
||||
unsigned long gfn = memslot->base_gfn + pagenum;
|
||||
unsigned long gpa = gfn << PAGE_SHIFT;
|
||||
pte_t *ptep;
|
||||
pte_t *ptep, pte;
|
||||
unsigned int shift;
|
||||
int ret = 0;
|
||||
unsigned long old, *rmapp;
|
||||
@ -1048,12 +1048,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
|
||||
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
|
||||
return ret;
|
||||
|
||||
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
|
||||
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
|
||||
ret = 1;
|
||||
if (shift)
|
||||
ret = 1 << (shift - PAGE_SHIFT);
|
||||
/*
|
||||
* For performance reasons we don't hold kvm->mmu_lock while walking the
|
||||
* partition scoped table.
|
||||
*/
|
||||
ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
|
||||
if (!ptep)
|
||||
return 0;
|
||||
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (pte_present(pte) && pte_dirty(pte)) {
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
/*
|
||||
* Recheck the pte again
|
||||
*/
|
||||
if (pte_val(pte) != pte_val(*ptep)) {
|
||||
/*
|
||||
* We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
|
||||
* only find PAGE_SIZE pte entries here. We can continue
|
||||
* to use the pte addr returned by above page table
|
||||
* walk.
|
||||
*/
|
||||
if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
VM_BUG_ON(shift);
|
||||
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
|
||||
gpa, shift);
|
||||
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
|
||||
|
Loading…
x
Reference in New Issue
Block a user