mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-08 14:23:19 +00:00
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/pti updates from Thomas Gleixner: "Another set of melted spectrum updates: - Iron out the last late microcode loading issues by actually checking whether new microcode is present and preventing the CPU synchronization to run into a timeout induced hang. - Remove Skylake C2 from the microcode blacklist according to the latest Intel documentation - Fix the VM86 POPF emulation which traps if VIP is set, but VIF is not. Enhance the selftests to catch that kind of issue - Annotate indirect calls/jumps for objtool on 32bit. This is not a functional issue, but for consistency sake its the right thing to do. - Fix a jump label build warning observed on SPARC64 which uses 32bit storage for the code location which is casted to 64 bit pointer w/o extending it to 64bit first. - Add two new cpufeature bits. Not really an urgent issue, but provides them for both x86 and x86/kvm work. No impact on the current kernel" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode: Fix CPU synchronization routine x86/microcode: Attempt late loading only when new microcode is present x86/speculation: Remove Skylake C2 from Speculation Control microcode blacklist jump_label: Fix sparc64 warning x86/speculation, objtool: Annotate indirect calls/jumps for objtool on 32-bit kernels x86/vm86/32: Fix POPF emulation selftests/x86/entry_from_vm86: Add test cases for POPF selftests/x86/entry_from_vm86: Exit with 1 if we fail x86/cpufeatures: Add Intel PCONFIG cpufeature x86/cpufeatures: Add Intel Total Memory Encryption cpufeature
This commit is contained in:
commit
9e1909b9da
@ -316,6 +316,7 @@
|
|||||||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
||||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||||
|
#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
|
||||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
||||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||||
@ -328,6 +329,7 @@
|
|||||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||||
|
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||||
|
@ -39,6 +39,7 @@ struct device;
|
|||||||
|
|
||||||
enum ucode_state {
|
enum ucode_state {
|
||||||
UCODE_OK = 0,
|
UCODE_OK = 0,
|
||||||
|
UCODE_NEW,
|
||||||
UCODE_UPDATED,
|
UCODE_UPDATED,
|
||||||
UCODE_NFOUND,
|
UCODE_NFOUND,
|
||||||
UCODE_ERROR,
|
UCODE_ERROR,
|
||||||
|
@ -183,7 +183,10 @@
|
|||||||
* otherwise we'll run out of registers. We don't care about CET
|
* otherwise we'll run out of registers. We don't care about CET
|
||||||
* here, anyway.
|
* here, anyway.
|
||||||
*/
|
*/
|
||||||
# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
|
# define CALL_NOSPEC \
|
||||||
|
ALTERNATIVE( \
|
||||||
|
ANNOTATE_RETPOLINE_SAFE \
|
||||||
|
"call *%[thunk_target]\n", \
|
||||||
" jmp 904f;\n" \
|
" jmp 904f;\n" \
|
||||||
" .align 16\n" \
|
" .align 16\n" \
|
||||||
"901: call 903f;\n" \
|
"901: call 903f;\n" \
|
||||||
|
@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
|
|||||||
/*
|
/*
|
||||||
* Early microcode releases for the Spectre v2 mitigation were broken.
|
* Early microcode releases for the Spectre v2 mitigation were broken.
|
||||||
* Information taken from;
|
* Information taken from;
|
||||||
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
|
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
|
||||||
* - https://kb.vmware.com/s/article/52345
|
* - https://kb.vmware.com/s/article/52345
|
||||||
* - Microcode revisions observed in the wild
|
* - Microcode revisions observed in the wild
|
||||||
* - Release note from 20180108 microcode release
|
* - Release note from 20180108 microcode release
|
||||||
@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
|
|||||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
|
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
|
||||||
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
|
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
|
||||||
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
|
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
|
||||||
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
|
|
||||||
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
|
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
|
||||||
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
|
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
|
||||||
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
|
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
|
||||||
|
@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||||
if (ret != UCODE_OK)
|
if (ret > UCODE_UPDATED)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
|||||||
static enum ucode_state
|
static enum ucode_state
|
||||||
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||||
{
|
{
|
||||||
|
struct ucode_patch *p;
|
||||||
enum ucode_state ret;
|
enum ucode_state ret;
|
||||||
|
|
||||||
/* free old equiv table */
|
/* free old equiv table */
|
||||||
free_equiv_cpu_table();
|
free_equiv_cpu_table();
|
||||||
|
|
||||||
ret = __load_microcode_amd(family, data, size);
|
ret = __load_microcode_amd(family, data, size);
|
||||||
|
if (ret != UCODE_OK) {
|
||||||
if (ret != UCODE_OK)
|
|
||||||
cleanup();
|
cleanup();
|
||||||
|
return ret;
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
/* save BSP's matching patch for early load */
|
|
||||||
if (save) {
|
|
||||||
struct ucode_patch *p = find_patch(0);
|
|
||||||
if (p) {
|
|
||||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
|
||||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
|
||||||
PATCH_MAX_SIZE));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
p = find_patch(0);
|
||||||
|
if (!p) {
|
||||||
|
return ret;
|
||||||
|
} else {
|
||||||
|
if (boot_cpu_data.microcode == p->patch_id)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = UCODE_NEW;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* save BSP's matching patch for early load */
|
||||||
|
if (!save)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||||
|
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -517,7 +517,29 @@ static int check_online_cpus(void)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static atomic_t late_cpus;
|
static atomic_t late_cpus_in;
|
||||||
|
static atomic_t late_cpus_out;
|
||||||
|
|
||||||
|
static int __wait_for_cpus(atomic_t *t, long long timeout)
|
||||||
|
{
|
||||||
|
int all_cpus = num_online_cpus();
|
||||||
|
|
||||||
|
atomic_inc(t);
|
||||||
|
|
||||||
|
while (atomic_read(t) < all_cpus) {
|
||||||
|
if (timeout < SPINUNIT) {
|
||||||
|
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
||||||
|
all_cpus - atomic_read(t));
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
ndelay(SPINUNIT);
|
||||||
|
timeout -= SPINUNIT;
|
||||||
|
|
||||||
|
touch_nmi_watchdog();
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns:
|
* Returns:
|
||||||
@ -527,30 +549,16 @@ static atomic_t late_cpus;
|
|||||||
*/
|
*/
|
||||||
static int __reload_late(void *info)
|
static int __reload_late(void *info)
|
||||||
{
|
{
|
||||||
unsigned int timeout = NSEC_PER_SEC;
|
|
||||||
int all_cpus = num_online_cpus();
|
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
enum ucode_state err;
|
enum ucode_state err;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
atomic_dec(&late_cpus);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for all CPUs to arrive. A load will not be attempted unless all
|
* Wait for all CPUs to arrive. A load will not be attempted unless all
|
||||||
* CPUs show up.
|
* CPUs show up.
|
||||||
* */
|
* */
|
||||||
while (atomic_read(&late_cpus)) {
|
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
||||||
if (timeout < SPINUNIT) {
|
return -1;
|
||||||
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
|
||||||
atomic_read(&late_cpus));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ndelay(SPINUNIT);
|
|
||||||
timeout -= SPINUNIT;
|
|
||||||
|
|
||||||
touch_nmi_watchdog();
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&update_lock);
|
spin_lock(&update_lock);
|
||||||
apply_microcode_local(&err);
|
apply_microcode_local(&err);
|
||||||
@ -558,15 +566,22 @@ static int __reload_late(void *info)
|
|||||||
|
|
||||||
if (err > UCODE_NFOUND) {
|
if (err > UCODE_NFOUND) {
|
||||||
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||||
ret = -1;
|
return -1;
|
||||||
} else if (err == UCODE_UPDATED) {
|
/* siblings return UCODE_OK because their engine got updated already */
|
||||||
|
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
|
||||||
ret = 1;
|
ret = 1;
|
||||||
|
} else {
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_inc(&late_cpus);
|
/*
|
||||||
|
* Increase the wait timeout to a safe value here since we're
|
||||||
while (atomic_read(&late_cpus) != all_cpus)
|
* serializing the microcode update and that could take a while on a
|
||||||
cpu_relax();
|
* large number of CPUs. And that is fine as the *actual* timeout will
|
||||||
|
* be determined by the last CPU finished updating and thus cut short.
|
||||||
|
*/
|
||||||
|
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
|
||||||
|
panic("Timeout during microcode update!\n");
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -579,12 +594,11 @@ static int microcode_reload_late(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
atomic_set(&late_cpus, num_online_cpus());
|
atomic_set(&late_cpus_in, 0);
|
||||||
|
atomic_set(&late_cpus_out, 0);
|
||||||
|
|
||||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||||
if (ret < 0)
|
if (ret > 0)
|
||||||
return ret;
|
|
||||||
else if (ret > 0)
|
|
||||||
microcode_check();
|
microcode_check();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -607,7 +621,7 @@ static ssize_t reload_store(struct device *dev,
|
|||||||
return size;
|
return size;
|
||||||
|
|
||||||
tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
|
tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
|
||||||
if (tmp_ret != UCODE_OK)
|
if (tmp_ret != UCODE_NEW)
|
||||||
return size;
|
return size;
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
@ -691,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
|
|||||||
if (system_state != SYSTEM_RUNNING)
|
if (system_state != SYSTEM_RUNNING)
|
||||||
return UCODE_NFOUND;
|
return UCODE_NFOUND;
|
||||||
|
|
||||||
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
|
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw);
|
||||||
refresh_fw);
|
if (ustate == UCODE_NEW) {
|
||||||
|
|
||||||
if (ustate == UCODE_OK) {
|
|
||||||
pr_debug("CPU%d updated upon init\n", cpu);
|
pr_debug("CPU%d updated upon init\n", cpu);
|
||||||
apply_microcode_on_target(cpu);
|
apply_microcode_on_target(cpu);
|
||||||
}
|
}
|
||||||
|
@ -862,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||||||
unsigned int leftover = size;
|
unsigned int leftover = size;
|
||||||
unsigned int curr_mc_size = 0, new_mc_size = 0;
|
unsigned int curr_mc_size = 0, new_mc_size = 0;
|
||||||
unsigned int csig, cpf;
|
unsigned int csig, cpf;
|
||||||
|
enum ucode_state ret = UCODE_OK;
|
||||||
|
|
||||||
while (leftover) {
|
while (leftover) {
|
||||||
struct microcode_header_intel mc_header;
|
struct microcode_header_intel mc_header;
|
||||||
@ -903,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||||||
new_mc = mc;
|
new_mc = mc;
|
||||||
new_mc_size = mc_size;
|
new_mc_size = mc_size;
|
||||||
mc = NULL; /* trigger new vmalloc */
|
mc = NULL; /* trigger new vmalloc */
|
||||||
|
ret = UCODE_NEW;
|
||||||
}
|
}
|
||||||
|
|
||||||
ucode_ptr += mc_size;
|
ucode_ptr += mc_size;
|
||||||
@ -932,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||||||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||||
cpu, new_rev, uci->cpu_sig.rev);
|
cpu, new_rev, uci->cpu_sig.rev);
|
||||||
|
|
||||||
return UCODE_OK;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_ucode_fw(void *to, const void *from, size_t n)
|
static int get_ucode_fw(void *to, const void *from, size_t n)
|
||||||
|
@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
check_vip:
|
check_vip:
|
||||||
if (VEFLAGS & X86_EFLAGS_VIP) {
|
if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
|
||||||
|
(X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
|
||||||
save_v86_state(regs, VM86_STI);
|
save_v86_state(regs, VM86_STI);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -373,7 +373,8 @@ static void __jump_label_update(struct static_key *key,
|
|||||||
if (kernel_text_address(entry->code))
|
if (kernel_text_address(entry->code))
|
||||||
arch_jump_label_transform(entry, jump_label_type(entry));
|
arch_jump_label_transform(entry, jump_label_type(entry));
|
||||||
else
|
else
|
||||||
WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
|
WARN_ONCE(1, "can't patch jump_label at %pS",
|
||||||
|
(void *)(unsigned long)entry->code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -95,6 +95,10 @@ asm (
|
|||||||
"int3\n\t"
|
"int3\n\t"
|
||||||
"vmcode_int80:\n\t"
|
"vmcode_int80:\n\t"
|
||||||
"int $0x80\n\t"
|
"int $0x80\n\t"
|
||||||
|
"vmcode_popf_hlt:\n\t"
|
||||||
|
"push %ax\n\t"
|
||||||
|
"popf\n\t"
|
||||||
|
"hlt\n\t"
|
||||||
"vmcode_umip:\n\t"
|
"vmcode_umip:\n\t"
|
||||||
/* addressing via displacements */
|
/* addressing via displacements */
|
||||||
"smsw (2052)\n\t"
|
"smsw (2052)\n\t"
|
||||||
@ -124,8 +128,8 @@ asm (
|
|||||||
|
|
||||||
extern unsigned char vmcode[], end_vmcode[];
|
extern unsigned char vmcode[], end_vmcode[];
|
||||||
extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
|
extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
|
||||||
vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[],
|
vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[],
|
||||||
vmcode_umip_str[], vmcode_umip_sldt[];
|
vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[];
|
||||||
|
|
||||||
/* Returns false if the test was skipped. */
|
/* Returns false if the test was skipped. */
|
||||||
static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
|
static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
|
||||||
@ -175,7 +179,7 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
|
|||||||
(VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
|
(VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
|
||||||
printf("[OK]\tReturned correctly\n");
|
printf("[OK]\tReturned correctly\n");
|
||||||
} else {
|
} else {
|
||||||
printf("[FAIL]\tIncorrect return reason\n");
|
printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip);
|
||||||
nerrs++;
|
nerrs++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -264,6 +268,9 @@ int main(void)
|
|||||||
v86.regs.ds = load_addr / 16;
|
v86.regs.ds = load_addr / 16;
|
||||||
v86.regs.es = load_addr / 16;
|
v86.regs.es = load_addr / 16;
|
||||||
|
|
||||||
|
/* Use the end of the page as our stack. */
|
||||||
|
v86.regs.esp = 4096;
|
||||||
|
|
||||||
assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
|
assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
|
||||||
|
|
||||||
/* #BR -- should deliver SIG??? */
|
/* #BR -- should deliver SIG??? */
|
||||||
@ -295,6 +302,23 @@ int main(void)
|
|||||||
v86.regs.eflags &= ~X86_EFLAGS_IF;
|
v86.regs.eflags &= ~X86_EFLAGS_IF;
|
||||||
do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
|
do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
|
||||||
|
|
||||||
|
/* POPF with VIP set but IF clear: should not trap */
|
||||||
|
v86.regs.eflags = X86_EFLAGS_VIP;
|
||||||
|
v86.regs.eax = 0;
|
||||||
|
do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear");
|
||||||
|
|
||||||
|
/* POPF with VIP set and IF set: should trap */
|
||||||
|
v86.regs.eflags = X86_EFLAGS_VIP;
|
||||||
|
v86.regs.eax = X86_EFLAGS_IF;
|
||||||
|
do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set");
|
||||||
|
|
||||||
|
/* POPF with VIP clear and IF set: should not trap */
|
||||||
|
v86.regs.eflags = 0;
|
||||||
|
v86.regs.eax = X86_EFLAGS_IF;
|
||||||
|
do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set");
|
||||||
|
|
||||||
|
v86.regs.eflags = 0;
|
||||||
|
|
||||||
/* INT3 -- should cause #BP */
|
/* INT3 -- should cause #BP */
|
||||||
do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
|
do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
|
||||||
|
|
||||||
@ -318,7 +342,7 @@ int main(void)
|
|||||||
clearhandler(SIGSEGV);
|
clearhandler(SIGSEGV);
|
||||||
|
|
||||||
/* Make sure nothing explodes if we fork. */
|
/* Make sure nothing explodes if we fork. */
|
||||||
if (fork() > 0)
|
if (fork() == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return (nerrs == 0 ? 0 : 1);
|
return (nerrs == 0 ? 0 : 1);
|
||||||
|
Loading…
Reference in New Issue
Block a user