KVM: x86/mmu: Use page-track notifiers iff there are external users

Disable the page-track notifier code at compile time if there are no
external users, i.e. if CONFIG_KVM_EXTERNAL_WRITE_TRACKING=n.  KVM itself
now hooks emulated writes directly instead of relying on the page-track
mechanism.

Provide a stub for "struct kvm_page_track_notifier_node" so that including
headers directly from the command line, e.g. for testing include guards,
doesn't fail due to a struct having an incomplete type.

Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Link: https://lore.kernel.org/r/20230729013535.1070024-23-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2023-07-28 18:35:28 -07:00 committed by Paolo Bonzini
parent 58ea7cf700
commit e998fb1a30
4 changed files with 47 additions and 16 deletions

View File

@ -1265,7 +1265,9 @@ struct kvm_arch {
* create an NX huge page (without hanging the guest).
*/
struct list_head possible_nx_huge_pages;
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
struct kvm_page_track_notifier_head track_notifier_head;
#endif
/*
* Protects marking pages unsync during page faults, as TDP MMU page
* faults only take mmu_lock for read. For simplicity, the unsync

View File

@ -9,6 +9,14 @@ enum kvm_page_track_mode {
KVM_PAGE_TRACK_MAX,
};
void kvm_slot_page_track_add_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode);
void kvm_slot_page_track_remove_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode);
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
/*
* The notifier represented by @kvm_page_track_notifier_node is linked into
* the head which will be notified when guest is triggering the track event.
@ -48,18 +56,18 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node);
};
void kvm_slot_page_track_add_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode);
void kvm_slot_page_track_remove_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode);
void
kvm_page_track_register_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void
kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
#else
/*
* Allow defining a node in a structure even if page tracking is disabled, e.g.
* to play nice with testing headers via direct inclusion from the command line.
*/
struct kvm_page_track_notifier_node {};
#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */
#endif

View File

@ -194,6 +194,7 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm,
return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
}
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
void kvm_page_track_cleanup(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
@ -255,14 +256,13 @@ EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
* The node should figure out if the written page is the one that node is
* interested in by itself.
*/
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes)
void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes)
{
struct kvm_page_track_notifier_head *head;
struct kvm_page_track_notifier_node *n;
int idx;
head = &vcpu->kvm->arch.track_notifier_head;
head = &kvm->arch.track_notifier_head;
if (hlist_empty(&head->track_notifier_list))
return;
@ -273,8 +273,6 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
if (n->track_write)
n->track_write(gpa, new, bytes, n);
srcu_read_unlock(&head->track_srcu, idx);
kvm_mmu_track_write(vcpu, gpa, new, bytes);
}
/*
@ -299,3 +297,5 @@ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
n->track_remove_region(slot->base_gfn, slot->npages, n);
srcu_read_unlock(&head->track_srcu, idx);
}
#endif

View File

@ -6,8 +6,6 @@
#include <asm/kvm_page_track.h>
int kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm);
int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot);
@ -21,13 +19,36 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm,
const struct kvm_memory_slot *slot,
gfn_t gfn, enum kvm_page_track_mode mode);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
int kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);
void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes);
void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
static inline bool kvm_page_track_has_external_user(struct kvm *kvm)
{
return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list);
}
#else
static inline int kvm_page_track_init(struct kvm *kvm) { return 0; }
static inline void kvm_page_track_cleanup(struct kvm *kvm) { }
static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa,
const u8 *new, int bytes) { }
static inline void kvm_page_track_delete_slot(struct kvm *kvm,
struct kvm_memory_slot *slot) { }
static inline bool kvm_page_track_has_external_user(struct kvm *kvm) { return false; }
#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */
static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
__kvm_page_track_write(vcpu->kvm, gpa, new, bytes);
kvm_mmu_track_write(vcpu, gpa, new, bytes);
}
#endif /* __KVM_X86_PAGE_TRACK_H */