mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
KVM: remove KVM_REQ_UNHALT
KVM_REQ_UNHALT is now unnecessary because it is replaced by the return value of kvm_vcpu_block/kvm_vcpu_halt. Remove it. No functional change intended. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Acked-by: Marc Zyngier <maz@kernel.org> Message-Id: <20220921003201.1441511-13-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
599275c060
commit
c59fb12758
@ -97,7 +97,7 @@ VCPU requests are simply bit indices of the ``vcpu->requests`` bitmap.
|
||||
This means general bitops, like those documented in [atomic-ops]_ could
|
||||
also be used, e.g. ::
|
||||
|
||||
clear_bit(KVM_REQ_UNHALT & KVM_REQUEST_MASK, &vcpu->requests);
|
||||
clear_bit(KVM_REQ_UNBLOCK & KVM_REQUEST_MASK, &vcpu->requests);
|
||||
|
||||
However, VCPU request users should refrain from doing so, as it would
|
||||
break the abstraction. The first 8 bits are reserved for architecture
|
||||
@ -126,17 +126,6 @@ KVM_REQ_UNBLOCK
|
||||
or in order to update the interrupt routing and ensure that assigned
|
||||
devices will wake up the vCPU.
|
||||
|
||||
KVM_REQ_UNHALT
|
||||
|
||||
This request may be made from the KVM common function kvm_vcpu_block(),
|
||||
which is used to emulate an instruction that causes a CPU to halt until
|
||||
one of an architectural specific set of events and/or interrupts is
|
||||
received (determined by checking kvm_arch_vcpu_runnable()). When that
|
||||
event or interrupt arrives kvm_vcpu_block() makes the request. This is
|
||||
in contrast to when kvm_vcpu_block() returns due to any other reason,
|
||||
such as a pending signal, which does not indicate the VCPU's halt
|
||||
emulation should stop, and therefore does not make the request.
|
||||
|
||||
KVM_REQ_OUTSIDE_GUEST_MODE
|
||||
|
||||
This "request" ensures the target vCPU has exited guest mode prior to the
|
||||
@ -297,21 +286,6 @@ architecture dependent. kvm_vcpu_block() calls kvm_arch_vcpu_runnable()
|
||||
to check if it should awaken. One reason to do so is to provide
|
||||
architectures a function where requests may be checked if necessary.
|
||||
|
||||
Clearing Requests
|
||||
-----------------
|
||||
|
||||
Generally it only makes sense for the receiving VCPU thread to clear a
|
||||
request. However, in some circumstances, such as when the requesting
|
||||
thread and the receiving VCPU thread are executed serially, such as when
|
||||
they are the same thread, or when they are using some form of concurrency
|
||||
control to temporarily execute synchronously, then it's possible to know
|
||||
that the request may be cleared immediately, rather than waiting for the
|
||||
receiving VCPU thread to handle the request in VCPU RUN. The only current
|
||||
examples of this are kvm_vcpu_block() calls made by VCPUs to block
|
||||
themselves. A possible side-effect of that call is to make the
|
||||
KVM_REQ_UNHALT request, which may then be cleared immediately when the
|
||||
VCPU returns from the call.
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
|
@ -666,7 +666,6 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_vcpu_halt(vcpu);
|
||||
vcpu_clear_flag(vcpu, IN_WFIT);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
|
||||
preempt_disable();
|
||||
vgic_v4_load(vcpu);
|
||||
|
@ -958,7 +958,6 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
||||
* We are runnable, then definitely go off to user space to
|
||||
* check if any I/O interrupts are pending.
|
||||
*/
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
if (kvm_arch_vcpu_runnable(vcpu))
|
||||
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
|
||||
}
|
||||
|
@ -499,7 +499,6 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||
if (msr & MSR_POW) {
|
||||
if (!vcpu->arch.pending_exceptions) {
|
||||
kvm_vcpu_halt(vcpu);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
vcpu->stat.generic.halt_wakeup++;
|
||||
|
||||
/* Unset POW bit after we woke up */
|
||||
|
@ -393,7 +393,6 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
|
||||
case H_CEDE:
|
||||
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
|
||||
kvm_vcpu_halt(vcpu);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
vcpu->stat.generic.halt_wakeup++;
|
||||
return EMULATE_DONE;
|
||||
case H_LOGICAL_CI_LOAD:
|
||||
|
@ -719,7 +719,6 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.shared->msr & MSR_WE) {
|
||||
local_irq_enable();
|
||||
kvm_vcpu_halt(vcpu);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
hard_irq_disable();
|
||||
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
||||
|
@ -239,7 +239,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||
case EV_HCALL_TOKEN(EV_IDLE):
|
||||
r = EV_SUCCESS;
|
||||
kvm_vcpu_halt(vcpu);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
break;
|
||||
default:
|
||||
r = EV_UNIMPLEMENTED;
|
||||
|
@ -191,7 +191,6 @@ void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||
kvm_vcpu_srcu_read_unlock(vcpu);
|
||||
kvm_vcpu_halt(vcpu);
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4343,8 +4343,6 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* nothing to do, just clear the request */
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
/* we left the vsie handler, nothing to do, just clear the request */
|
||||
kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
|
||||
|
||||
|
@ -10813,8 +10813,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
|
||||
if (hv_timer)
|
||||
kvm_lapic_switch_to_hv_timer(vcpu);
|
||||
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
|
||||
/*
|
||||
* If the vCPU is not runnable, a signal or another host event
|
||||
* of some kind is pending; service it without changing the
|
||||
@ -11034,7 +11032,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
r = -EAGAIN;
|
||||
if (signal_pending(current)) {
|
||||
r = -EINTR;
|
||||
|
@ -1065,7 +1065,6 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
|
||||
del_timer(&vcpu->arch.xen.poll_timer);
|
||||
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
}
|
||||
|
||||
vcpu->arch.xen.poll_evtchn = 0;
|
||||
|
@ -151,12 +151,11 @@ static inline bool is_error_page(struct page *page)
|
||||
#define KVM_REQUEST_NO_ACTION BIT(10)
|
||||
/*
|
||||
* Architecture-independent vcpu->requests bit members
|
||||
* Bits 4-7 are reserved for more arch-independent bits.
|
||||
* Bits 3-7 are reserved for more arch-independent bits.
|
||||
*/
|
||||
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_UNBLOCK 2
|
||||
#define KVM_REQ_UNHALT 3
|
||||
#define KVM_REQUEST_ARCH_BASE 8
|
||||
|
||||
/*
|
||||
|
@ -3409,10 +3409,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
|
||||
int ret = -EINTR;
|
||||
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
if (kvm_arch_vcpu_runnable(vcpu)) {
|
||||
kvm_make_request(KVM_REQ_UNHALT, vcpu);
|
||||
if (kvm_arch_vcpu_runnable(vcpu))
|
||||
goto out;
|
||||
}
|
||||
if (kvm_cpu_has_pending_timer(vcpu))
|
||||
goto out;
|
||||
if (signal_pending(current))
|
||||
|
Loading…
Reference in New Issue
Block a user