mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 14:50:19 +00:00
KVM: s390: introduce kvm_s390_fpu_(store|load)
It's a bit nicer than having multiple lines and will help if there's another re-work since we'll only have to change one location. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
778666df60
commit
4a59932874
@ -584,11 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
|
||||
mci.val = mchk->mcic;
|
||||
/* take care of lazy register loading */
|
||||
fpu_stfpc(&vcpu->run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
else
|
||||
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
|
||||
kvm_s390_fpu_store(vcpu->run);
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
|
||||
save_gs_cb(current->thread.gs_cb);
|
||||
|
@ -4949,11 +4949,7 @@ static void sync_regs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
save_access_regs(vcpu->arch.host_acrs);
|
||||
restore_access_regs(vcpu->run->s.regs.acrs);
|
||||
fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
else
|
||||
load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
|
||||
kvm_s390_fpu_load(vcpu->run);
|
||||
/* Sync fmt2 only data */
|
||||
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
|
||||
sync_regs_fmt2(vcpu);
|
||||
@ -5014,11 +5010,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
|
||||
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
restore_access_regs(vcpu->arch.host_acrs);
|
||||
fpu_stfpc(&vcpu->run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
else
|
||||
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
|
||||
kvm_s390_fpu_store(vcpu->run);
|
||||
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
|
||||
store_regs_fmt2(vcpu);
|
||||
}
|
||||
@ -5167,11 +5159,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
* switch in the run ioctl. Let's update our copies before we save
|
||||
* it into the save area
|
||||
*/
|
||||
fpu_stfpc(&vcpu->run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
else
|
||||
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
|
||||
kvm_s390_fpu_store(vcpu->run);
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
|
||||
return kvm_s390_store_status_unloaded(vcpu, addr);
|
||||
|
@ -20,6 +20,24 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
static inline void kvm_s390_fpu_store(struct kvm_run *run)
|
||||
{
|
||||
fpu_stfpc(&run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
save_vx_regs((__vector128 *)&run->s.regs.vrs);
|
||||
else
|
||||
save_fp_regs((freg_t *)&run->s.regs.fprs);
|
||||
}
|
||||
|
||||
static inline void kvm_s390_fpu_load(struct kvm_run *run)
|
||||
{
|
||||
fpu_lfpc_safe(&run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
load_vx_regs((__vector128 *)&run->s.regs.vrs);
|
||||
else
|
||||
load_fp_regs((freg_t *)&run->s.regs.fprs);
|
||||
}
|
||||
|
||||
/* Transactional Memory Execution related macros */
|
||||
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
|
||||
#define TDB_FORMAT1 1
|
||||
|
Loading…
x
Reference in New Issue
Block a user