mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 18:55:12 +00:00
KVM: x86/mmu: count KVM mmu usage in secondary pagetable stats.
Count the pages used by KVM mmu on x86 in memory stats under secondary pagetable stats (e.g. "SecPageTables" in /proc/meminfo) to give better visibility into the memory consumption of KVM mmu in a similar way to how normal user page tables are accounted. Add the inner helper in common KVM, ARM will also use it to count stats in a future commit. Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Reviewed-by: Sean Christopherson <seanjc@google.com> Acked-by: Marc Zyngier <maz@kernel.org> # generic KVM changes Link: https://lore.kernel.org/r/20220823004639.2387269-3-yosryahmed@google.com Link: https://lore.kernel.org/r/20220823004639.2387269-4-yosryahmed@google.com [sean: squash x86 usage to workaround modpost issues] Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
ebc97a52b5
commit
43a063cab3
@ -1665,6 +1665,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
|
||||
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
|
||||
}
|
||||
|
||||
static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
kvm_mod_used_mmu_pages(kvm, +1);
|
||||
kvm_account_pgtable_pages((void *)sp->spt, +1);
|
||||
}
|
||||
|
||||
static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
kvm_mod_used_mmu_pages(kvm, -1);
|
||||
kvm_account_pgtable_pages((void *)sp->spt, -1);
|
||||
}
|
||||
|
||||
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
|
||||
{
|
||||
MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
|
||||
@ -2122,7 +2134,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
|
||||
*/
|
||||
sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
|
||||
list_add(&sp->link, &kvm->arch.active_mmu_pages);
|
||||
kvm_mod_used_mmu_pages(kvm, +1);
|
||||
kvm_account_mmu_page(kvm, sp);
|
||||
|
||||
sp->gfn = gfn;
|
||||
sp->role = role;
|
||||
@ -2456,7 +2468,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
|
||||
list_add(&sp->link, invalid_list);
|
||||
else
|
||||
list_move(&sp->link, invalid_list);
|
||||
kvm_mod_used_mmu_pages(kvm, -1);
|
||||
kvm_unaccount_mmu_page(kvm, sp);
|
||||
} else {
|
||||
/*
|
||||
* Remove the active root from the active page list, the root
|
||||
|
@ -372,6 +372,16 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
|
||||
}
|
||||
}
|
||||
|
||||
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
kvm_account_pgtable_pages((void *)sp->spt, +1);
|
||||
}
|
||||
|
||||
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
kvm_account_pgtable_pages((void *)sp->spt, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
|
||||
*
|
||||
@ -384,6 +394,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
|
||||
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
|
||||
bool shared)
|
||||
{
|
||||
tdp_unaccount_mmu_page(kvm, sp);
|
||||
if (shared)
|
||||
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
else
|
||||
@ -1132,6 +1143,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
|
||||
if (account_nx)
|
||||
account_huge_nx_page(kvm, sp);
|
||||
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
tdp_account_mmu_page(kvm, sp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2247,6 +2247,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
|
||||
|
||||
/*
|
||||
* If more than one page is being (un)accounted, @virt must be the address of
|
||||
* the first page of a block of pages what were allocated together (i.e
|
||||
* accounted together).
|
||||
*
|
||||
* kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
|
||||
* is thread-safe.
|
||||
*/
|
||||
static inline void kvm_account_pgtable_pages(void *virt, int nr)
|
||||
{
|
||||
mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* This defines how many reserved entries we want to keep before we
|
||||
* kick the vcpu to the userspace to avoid dirty ring full. This
|
||||
|
Loading…
Reference in New Issue
Block a user