mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
KVM: use kvm_memslots whenever possible
kvm_memslots provides lockdep checking. Use it consistently instead of explicit dereferencing of kvm->memslots. Reviewed-by: Radim Krcmar <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a47d2b07ea
commit
9f6b802978
@ -1155,7 +1155,8 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
|
|||||||
*/
|
*/
|
||||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
|
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
|
||||||
{
|
{
|
||||||
struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot);
|
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||||
|
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
|
||||||
phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
|
phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
|
||||||
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
|
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
|
||||||
|
|
||||||
|
@ -968,6 +968,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
|||||||
/* Get (and clear) the dirty memory log for a memory slot. */
|
/* Get (and clear) the dirty memory log for a memory slot. */
|
||||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||||
{
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
unsigned long ga, ga_end;
|
unsigned long ga, ga_end;
|
||||||
int is_dirty = 0;
|
int is_dirty = 0;
|
||||||
@ -982,7 +983,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
|||||||
|
|
||||||
/* If nothing is dirty, don't bother messing with page tables. */
|
/* If nothing is dirty, don't bother messing with page tables. */
|
||||||
if (is_dirty) {
|
if (is_dirty) {
|
||||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, log->slot);
|
||||||
|
|
||||||
ga = memslot->base_gfn << PAGE_SHIFT;
|
ga = memslot->base_gfn << PAGE_SHIFT;
|
||||||
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
||||||
|
@ -650,7 +650,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm)
|
|||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
|
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
slots = kvm->memslots;
|
slots = kvm_memslots(kvm);
|
||||||
kvm_for_each_memslot(memslot, slots) {
|
kvm_for_each_memslot(memslot, slots) {
|
||||||
/*
|
/*
|
||||||
* This assumes it is acceptable to lose reference and
|
* This assumes it is acceptable to lose reference and
|
||||||
|
@ -2321,6 +2321,7 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
|
|||||||
static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
||||||
struct kvm_dirty_log *log)
|
struct kvm_dirty_log *log)
|
||||||
{
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
int r;
|
int r;
|
||||||
unsigned long n;
|
unsigned long n;
|
||||||
@ -2331,7 +2332,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
|||||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, log->slot);
|
||||||
r = -ENOENT;
|
r = -ENOENT;
|
||||||
if (!memslot->dirty_bitmap)
|
if (!memslot->dirty_bitmap)
|
||||||
goto out;
|
goto out;
|
||||||
@ -2384,6 +2386,7 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
|
|||||||
const struct kvm_memory_slot *old)
|
const struct kvm_memory_slot *old)
|
||||||
{
|
{
|
||||||
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
|
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
|
|
||||||
if (npages && old->npages) {
|
if (npages && old->npages) {
|
||||||
@ -2393,7 +2396,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
|
|||||||
* since the rmap array starts out as all zeroes,
|
* since the rmap array starts out as all zeroes,
|
||||||
* i.e. no pages are dirty.
|
* i.e. no pages are dirty.
|
||||||
*/
|
*/
|
||||||
memslot = id_to_memslot(kvm->memslots, mem->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, mem->slot);
|
||||||
kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
|
kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1530,6 +1530,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||||||
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
||||||
struct kvm_dirty_log *log)
|
struct kvm_dirty_log *log)
|
||||||
{
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
ulong ga, ga_end;
|
ulong ga, ga_end;
|
||||||
@ -1545,7 +1546,8 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
|||||||
|
|
||||||
/* If nothing is dirty, don't bother messing with page tables. */
|
/* If nothing is dirty, don't bother messing with page tables. */
|
||||||
if (is_dirty) {
|
if (is_dirty) {
|
||||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, log->slot);
|
||||||
|
|
||||||
ga = memslot->base_gfn << PAGE_SHIFT;
|
ga = memslot->base_gfn << PAGE_SHIFT;
|
||||||
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
||||||
|
@ -236,6 +236,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
unsigned long n;
|
unsigned long n;
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
int is_dirty = 0;
|
int is_dirty = 0;
|
||||||
|
|
||||||
@ -245,7 +246,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
|||||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, log->slot);
|
||||||
r = -ENOENT;
|
r = -ENOENT;
|
||||||
if (!memslot->dirty_bitmap)
|
if (!memslot->dirty_bitmap)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -7782,6 +7782,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||||||
const struct kvm_memory_slot *old,
|
const struct kvm_memory_slot *old,
|
||||||
enum kvm_mr_change change)
|
enum kvm_mr_change change)
|
||||||
{
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *new;
|
struct kvm_memory_slot *new;
|
||||||
int nr_mmu_pages = 0;
|
int nr_mmu_pages = 0;
|
||||||
|
|
||||||
@ -7803,7 +7804,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||||||
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
||||||
|
|
||||||
/* It's OK to get 'new' slot here as it has already been installed */
|
/* It's OK to get 'new' slot here as it has already been installed */
|
||||||
new = id_to_memslot(kvm->memslots, mem->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
new = id_to_memslot(slots, mem->slot);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Dirty logging tracks sptes in 4k granularity, meaning that large
|
* Dirty logging tracks sptes in 4k granularity, meaning that large
|
||||||
|
@ -734,7 +734,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
|
|||||||
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
|
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
|
||||||
struct kvm_memslots *slots)
|
struct kvm_memslots *slots)
|
||||||
{
|
{
|
||||||
struct kvm_memslots *old_memslots = kvm->memslots;
|
struct kvm_memslots *old_memslots = kvm_memslots(kvm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the low bit in the generation, which disables SPTE caching
|
* Set the low bit in the generation, which disables SPTE caching
|
||||||
@ -799,7 +799,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||||||
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
slot = id_to_memslot(kvm->memslots, mem->slot);
|
slot = id_to_memslot(kvm_memslots(kvm), mem->slot);
|
||||||
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
|
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
|
||||||
npages = mem->memory_size >> PAGE_SHIFT;
|
npages = mem->memory_size >> PAGE_SHIFT;
|
||||||
|
|
||||||
@ -842,7 +842,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||||||
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
|
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
|
||||||
/* Check for overlaps */
|
/* Check for overlaps */
|
||||||
r = -EEXIST;
|
r = -EEXIST;
|
||||||
kvm_for_each_memslot(slot, kvm->memslots) {
|
kvm_for_each_memslot(slot, kvm_memslots(kvm)) {
|
||||||
if ((slot->id >= KVM_USER_MEM_SLOTS) ||
|
if ((slot->id >= KVM_USER_MEM_SLOTS) ||
|
||||||
(slot->id == mem->slot))
|
(slot->id == mem->slot))
|
||||||
continue;
|
continue;
|
||||||
@ -873,7 +873,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||||||
slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
|
slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
|
||||||
if (!slots)
|
if (!slots)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
|
memcpy(slots, kvm_memslots(kvm), sizeof(struct kvm_memslots));
|
||||||
|
|
||||||
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
|
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
|
||||||
slot = id_to_memslot(slots, mem->slot);
|
slot = id_to_memslot(slots, mem->slot);
|
||||||
@ -966,6 +966,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
|||||||
int kvm_get_dirty_log(struct kvm *kvm,
|
int kvm_get_dirty_log(struct kvm *kvm,
|
||||||
struct kvm_dirty_log *log, int *is_dirty)
|
struct kvm_dirty_log *log, int *is_dirty)
|
||||||
{
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
int r, i;
|
int r, i;
|
||||||
unsigned long n;
|
unsigned long n;
|
||||||
@ -975,7 +976,8 @@ int kvm_get_dirty_log(struct kvm *kvm,
|
|||||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, log->slot);
|
||||||
r = -ENOENT;
|
r = -ENOENT;
|
||||||
if (!memslot->dirty_bitmap)
|
if (!memslot->dirty_bitmap)
|
||||||
goto out;
|
goto out;
|
||||||
@ -1024,6 +1026,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
|
|||||||
int kvm_get_dirty_log_protect(struct kvm *kvm,
|
int kvm_get_dirty_log_protect(struct kvm *kvm,
|
||||||
struct kvm_dirty_log *log, bool *is_dirty)
|
struct kvm_dirty_log *log, bool *is_dirty)
|
||||||
{
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
int r, i;
|
int r, i;
|
||||||
unsigned long n;
|
unsigned long n;
|
||||||
@ -1034,7 +1037,8 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
|
|||||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
slots = kvm_memslots(kvm);
|
||||||
|
memslot = id_to_memslot(slots, log->slot);
|
||||||
|
|
||||||
dirty_bitmap = memslot->dirty_bitmap;
|
dirty_bitmap = memslot->dirty_bitmap;
|
||||||
r = -ENOENT;
|
r = -ENOENT;
|
||||||
|
Loading…
Reference in New Issue
Block a user