LoongArch: KVM: Mark page accessed and dirty with page ref added

Function kvm_map_page_fast() is fast path of secondary mmu page fault
flow, pfn is parsed from secondary mmu page table walker. However the
corresponding page reference is not added, it is dangerious to access
page out of mmu_lock.

Here page ref is added inside mmu_lock, function kvm_set_pfn_accessed()
and kvm_set_pfn_dirty() is called with page ref added, so that the page
will not be freed by others.

Also kvm_set_pfn_accessed() is removed here since it is called in the
following function kvm_release_pfn_clean().

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Bibo Mao 2024-07-09 16:25:51 +08:00 committed by Huacai Chen
parent 8c34704252
commit ebf00272da

View File

@ -557,6 +557,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
struct kvm_memory_slot *slot;
struct page *page;
spin_lock(&kvm->mmu_lock);
@ -599,19 +600,22 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
if (changed) {
kvm_set_pte(ptep, new);
pfn = kvm_pte_pfn(new);
page = kvm_pfn_to_refcounted_page(pfn);
if (page)
get_page(page);
}
spin_unlock(&kvm->mmu_lock);
/*
* Fixme: pfn may be freed after mmu_lock
* kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
*/
if (kvm_pte_young(changed))
kvm_set_pfn_accessed(pfn);
if (changed) {
if (kvm_pte_young(changed))
kvm_set_pfn_accessed(pfn);
if (kvm_pte_dirty(changed)) {
mark_page_dirty(kvm, gfn);
kvm_set_pfn_dirty(pfn);
if (kvm_pte_dirty(changed)) {
mark_page_dirty(kvm, gfn);
kvm_set_pfn_dirty(pfn);
}
if (page)
put_page(page);
}
return ret;
out:
@ -920,7 +924,6 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
kvm_set_pfn_dirty(pfn);
}
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);