diff options
Diffstat (limited to 'target/i386')
-rw-r--r-- | target/i386/cpu.c | 3 | ||||
-rw-r--r-- | target/i386/hvf/hvf.c | 4 | ||||
-rw-r--r-- | target/i386/hvf/x86hvf.c | 21 | ||||
-rw-r--r-- | target/i386/kvm/kvm.c | 46 | ||||
-rw-r--r-- | target/i386/machine.c | 19 | ||||
-rw-r--r-- | target/i386/nvmm/nvmm-all.c | 24 | ||||
-rw-r--r-- | target/i386/tcg/system/seg_helper.c | 2 | ||||
-rw-r--r-- | target/i386/tcg/system/svm_helper.c | 4 | ||||
-rw-r--r-- | target/i386/whpx/whpx-all.c | 34 |
9 files changed, 85 insertions, 72 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 673f858..6d85149 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -8946,9 +8946,6 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) /* PDCM is fixed1 bit for TDX */ if (!cpu->enable_pmu && !is_tdx_vm()) { - mark_unavailable_features(cpu, FEAT_1_ECX, - env->user_features[FEAT_1_ECX] & CPUID_EXT_PDCM, - "This feature is not available due to PMU being disabled"); env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM; } diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 818b504..8445cad 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -773,9 +773,9 @@ int hvf_vcpu_exec(CPUState *cpu) switch (exit_reason) { case EXIT_REASON_HLT: { macvm_set_rip(cpu, rip + ins_len); - if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) - && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) && + && !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI) && !(idtvec_info & VMCS_IDT_VEC_VALID)) { cpu->halted = 1; ret = EXCP_HLT; diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 17fce1d..9e05e0e 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -395,7 +395,7 @@ bool hvf_inject_interrupts(CPUState *cs) }; } - if (cs->interrupt_request & CPU_INTERRUPT_NMI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { cs->interrupt_request &= ~CPU_INTERRUPT_NMI; info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; @@ -406,7 +406,7 @@ bool hvf_inject_interrupts(CPUState *cs) } if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && - (cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { int line = cpu_get_pic_interrupt(env); cs->interrupt_request &= ~CPU_INTERRUPT_HARD; @@ -415,11 +415,10 @@ bool hvf_inject_interrupts(CPUState *cs) VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) { vmx_set_int_window_exiting(cs); } - return (cs->interrupt_request - & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); + return cpu_test_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR); } int hvf_process_events(CPUState *cs) @@ -432,25 +431,25 @@ int hvf_process_events(CPUState *cs) env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); } - if (cs->interrupt_request & CPU_INTERRUPT_INIT) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT)) { cpu_synchronize_state(cs); do_cpu_init(cpu); } - if (cs->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) { cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); } - if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cs->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { cs->halted = 0; } - if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) { cpu_synchronize_state(cs); do_cpu_sipi(cpu); } - if (cs->interrupt_request & CPU_INTERRUPT_TPR) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) { cs->interrupt_request &= ~CPU_INTERRUPT_TPR; cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 369626f..306430a 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -5453,8 +5453,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) int ret; /* Inject NMI */ - if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { - if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; bql_unlock(); @@ -5465,7 +5465,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) strerror(-ret)); } } - if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; bql_unlock(); @@ -5478,31 +5478,30 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } - if (!kvm_pic_in_kernel()) { - bql_lock(); - } /* Force the VCPU out of its inner loop to process any INIT requests * or (for userspace APIC, but it is cheap to combine the checks here) * pending TPR access reports. */ - if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, 1); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { - cpu->exit_request = 1; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { + qatomic_set(&cpu->exit_request, 1); } } if (!kvm_pic_in_kernel()) { /* Try to inject an interrupt if the guest can accept it */ if (run->ready_for_interrupt_injection && - (cpu->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) { int irq; + bql_lock(); + cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { @@ -5517,13 +5516,14 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) strerror(-ret)); } } + bql_unlock(); } /* If we have an interrupt but the guest is not ready to receive an * interrupt, request an interrupt window exit. This will * cause a return to userspace as soon as the guest is ready to * receive interrupts. */ - if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { run->request_interrupt_window = 1; } else { run->request_interrupt_window = 0; @@ -5531,8 +5531,6 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) DPRINTF("setting tpr\n"); run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); - - bql_unlock(); } } @@ -5595,7 +5593,7 @@ int kvm_arch_process_async_events(CPUState *cs) X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (cs->interrupt_request & CPU_INTERRUPT_MCE) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_MCE)) { /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ assert(env->mcg_cap); @@ -5618,7 +5616,7 @@ int kvm_arch_process_async_events(CPUState *cs) } } - if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { kvm_cpu_synchronize_state(cs); do_cpu_init(cpu); @@ -5628,20 +5626,20 @@ int kvm_arch_process_async_events(CPUState *cs) return 0; } - if (cs->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) { cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); } - if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cs->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { cs->halted = 0; } - if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) { kvm_cpu_synchronize_state(cs); do_cpu_sipi(cpu); } - if (cs->interrupt_request & CPU_INTERRUPT_TPR) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) { cs->interrupt_request &= ~CPU_INTERRUPT_TPR; kvm_cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, @@ -5656,9 +5654,9 @@ static int kvm_handle_halt(X86CPU *cpu) CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; - if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && - !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { + !cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { cs->halted = 1; return EXCP_HLT; } diff --git a/target/i386/machine.c b/target/i386/machine.c index dd2dac1..45b7cea 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -462,6 +462,24 @@ static const VMStateDescription vmstate_exception_info = { } }; +static bool cpu_errcode_needed(void *opaque) +{ + X86CPU *cpu = opaque; + + return cpu->env.has_error_code != 0; +} + +static const VMStateDescription vmstate_error_code = { + .name = "cpu/error_code", + .version_id = 1, + .minimum_version_id = 1, + .needed = cpu_errcode_needed, + .fields = (const VMStateField[]) { + VMSTATE_INT32(env.error_code, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + /* Poll control MSR enabled by default */ static bool poll_control_msr_needed(void *opaque) { @@ -1746,6 +1764,7 @@ const VMStateDescription vmstate_x86_cpu = { }, .subsections = (const VMStateDescription * const []) { &vmstate_exception_info, + &vmstate_error_code, &vmstate_async_pf_msr, &vmstate_async_pf_int_msr, &vmstate_pv_eoi_msr, diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 92e3b8b..c1ac74c 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -413,11 +413,11 @@ nvmm_vcpu_pre_run(CPUState *cpu) * Force the VCPU out of its inner loop to process any INIT requests * or commit pending TPR access. */ - if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { cpu->exit_request = 1; } - if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { if (nvmm_can_take_nmi(cpu)) { cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; event->type = NVMM_VCPU_EVENT_INTR; @@ -426,7 +426,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) } } - if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { if (nvmm_can_take_int(cpu)) { cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; event->type = NVMM_VCPU_EVENT_INTR; @@ -436,7 +436,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) } /* Don't want SMIs. */ - if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; } @@ -651,9 +651,9 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, bql_lock(); - if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (cpu_env(cpu)->eflags & IF_MASK)) && - !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->exception_index = EXCP_HLT; cpu->halted = true; ret = 1; @@ -691,25 +691,25 @@ nvmm_vcpu_loop(CPUState *cpu) * Some asynchronous events must be handled outside of the inner * VCPU loop. They are handled here. */ - if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT)) { nvmm_cpu_synchronize_state(cpu); do_cpu_init(x86_cpu); /* set int/nmi windows back to the reset state */ } - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(x86_cpu->apic_state); } - if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->halted = false; } - if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) { nvmm_cpu_synchronize_state(cpu); do_cpu_sipi(x86_cpu); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; nvmm_cpu_synchronize_state(cpu); apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c index d4ea890..794a23d 100644 --- a/target/i386/tcg/system/seg_helper.c +++ b/target/i386/tcg/system/seg_helper.c @@ -133,7 +133,7 @@ bool x86_cpu_exec_halt(CPUState *cpu) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { bql_lock(); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); diff --git a/target/i386/tcg/system/svm_helper.c b/target/i386/tcg/system/svm_helper.c index b27049b..3569196 100644 --- a/target/i386/tcg/system/svm_helper.c +++ b/target/i386/tcg/system/svm_helper.c @@ -49,7 +49,7 @@ static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr, static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base) { uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env); - *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt); + *seg_base = (((int64_t) *seg_base) << shift_amt) >> shift_amt; } static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr, @@ -403,7 +403,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->hflags2 |= HF2_GIF_MASK; if (ctl_has_irq(env)) { - cs->interrupt_request |= CPU_INTERRUPT_VIRQ; + cpu_set_interrupt(cs, CPU_INTERRUPT_VIRQ); } if (virtual_gif_set(env)) { diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index b72dcff..878cdd1 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -1436,9 +1436,9 @@ static int whpx_handle_halt(CPUState *cpu) int ret = 0; bql_lock(); - if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (cpu_env(cpu)->eflags & IF_MASK)) && - !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->exception_index = EXCP_HLT; cpu->halted = true; ret = 1; @@ -1469,15 +1469,15 @@ static void whpx_vcpu_pre_run(CPUState *cpu) /* Inject NMI */ if (!vcpu->interruption_pending && - cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { - if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; vcpu->interruptable = false; new_int.InterruptionType = WHvX64PendingNmi; new_int.InterruptionPending = 1; new_int.InterruptionVector = 2; } - if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; } } @@ -1486,12 +1486,12 @@ static void whpx_vcpu_pre_run(CPUState *cpu) * Force the VCPU out of its inner loop to process any INIT requests or * commit pending TPR access. */ - if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { cpu->exit_request = 1; } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { cpu->exit_request = 1; } } @@ -1501,7 +1501,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) if (!vcpu->interruption_pending && vcpu->interruptable && (env->eflags & IF_MASK)) { assert(!new_int.InterruptionPending); - if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { @@ -1519,7 +1519,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) reg_count += 1; } } else if (vcpu->ready_for_pic_interrupt && - (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { @@ -1546,7 +1546,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) /* Update the state of the interrupt delivery notification */ if (!vcpu->window_registered && - cpu->interrupt_request & CPU_INTERRUPT_HARD) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { reg_values[reg_count].DeliverabilityNotifications = (WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER) { .InterruptNotification = 1 @@ -1599,30 +1599,30 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) CPUX86State *env = &x86_cpu->env; AccelCPUState *vcpu = cpu->accel; - if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { whpx_cpu_synchronize_state(cpu); do_cpu_init(x86_cpu); vcpu->interruptable = true; } - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(x86_cpu->apic_state); } - if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->halted = false; } - if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) { whpx_cpu_synchronize_state(cpu); do_cpu_sipi(x86_cpu); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; whpx_cpu_synchronize_state(cpu); apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, |