aboutsummaryrefslogtreecommitdiff
path: root/target/i386
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2025-08-31 09:08:09 +1000
committerRichard Henderson <richard.henderson@linaro.org>2025-08-31 09:08:09 +1000
commit91589bcd9fee0e66b241d04e5f37cd4f218187a2 (patch)
treefb58d2189037c8368c606650ff142f6697c51f3c /target/i386
parente101d33792530093fa0b0a6e5f43e4d8cfe4581e (diff)
parent83bd8e65bc70cef03a207df315004f8b1301dc53 (diff)
downloadqemu-master.zip
qemu-master.tar.gz
qemu-master.tar.bz2
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into stagingHEADmaster
* hw/i386: split isapc from PCI boards * cpu-exec, accel: remove BQL usage for interrupt_request != 0 * memory, hpet, pmtimer: introduce BQL-free PIO/MMIO # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmixiO4UHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroMTowf9EmIcSgFXrP8QR/rVQ+Z8+csR4md7 # QDzQwoDHaP9F/J728AoT/nDwwlfiHRbcH8AQbzzMrsmMnqhaWCFWD5snGelzPJAo # BPaOa4eYvwgssW1apfxGgzae71B3Hbx/sMYHdRcUvBnvS6cKEcOcgK8pANuZGzGQ # uRquCMvk14WhnQV/NFqr2PmtmxXjdDNefdi1RfpaPDEt4VZsh4B3afU+I+L4LvIQ # NOPh0PbDk+BLRt2fRPgdwF6KqS5ajPEzKnBlS0uxSXKxpLOLM/2SNDOGDDVUrAwV # ILrnchZrpxHsHwBCjaBhKZDTTQUcH0HUrZhRJbUPsg5feHRs3KoaFJjmCQ== # =RMLB # -----END PGP SIGNATURE----- # gpg: Signature made Fri 29 Aug 2025 09:03:10 PM AEST # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [unknown] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [unknown] # gpg: WARNING: The key's User ID is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (28 commits) tcg: move interrupt caching and single step masking closer to user kvm: i386: irqchip: take BQL only if there is an interrupt hpet: make main counter read lock-less hpet: move out main counter read into a separate block hpet: switch to fine-grained device locking acpi: mark PMTIMER as unlocked memory: reintroduce BQL-free fine-grained PIO/MMIO add cpu_test_interrupt()/cpu_set_interrupt() helpers and use them tree wide user-exec: ensure interrupt_request is not used hw/i386/isapc.c: replace rom_memory with system_memory hw/i386/pc_piix.c: replace rom_memory with pci_memory hw/i386/pc_piix.c: remove unused headers after isapc machine split hw/i386: move isapc machine to separate isapc.c file hw/i386/pc_piix.c: assume pcmc->pci_enabled is always true in pc_init1() hw/i386/pc_piix.c: always initialise ISA IDE drives in pc_init_isa() hw/i386/pc_piix.c: remove pc_system_flash_cleanup_unused() from pc_init_isa() hw/i386/pc_piix.c: hardcode hole64_size to 0 in pc_init_isa() hw/i386/pc_piix.c: simplify RAM size logic in pc_init_isa() hw/i386/pc_piix.c: remove nvdimm initialisation from pc_init_isa() hw/i386/pc_piix.c: remove SGX initialisation from pc_init_isa() ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/hvf/hvf.c4
-rw-r--r--target/i386/hvf/x86hvf.c21
-rw-r--r--target/i386/kvm/kvm.c46
-rw-r--r--target/i386/nvmm/nvmm-all.c24
-rw-r--r--target/i386/tcg/system/seg_helper.c2
-rw-r--r--target/i386/tcg/system/svm_helper.c2
-rw-r--r--target/i386/whpx/whpx-all.c34
7 files changed, 65 insertions, 68 deletions
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index 818b504..8445cad 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -773,9 +773,9 @@ int hvf_vcpu_exec(CPUState *cpu)
switch (exit_reason) {
case EXIT_REASON_HLT: {
macvm_set_rip(cpu, rip + ins_len);
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK))
- && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
+ && !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu->halted = 1;
ret = EXCP_HLT;
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 17fce1d..9e05e0e 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -395,7 +395,7 @@ bool hvf_inject_interrupts(CPUState *cs)
};
}
- if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
@@ -406,7 +406,7 @@ bool hvf_inject_interrupts(CPUState *cs)
}
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
- (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(env);
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
@@ -415,11 +415,10 @@ bool hvf_inject_interrupts(CPUState *cs)
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
}
}
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
vmx_set_int_window_exiting(cs);
}
- return (cs->interrupt_request
- & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
+ return cpu_test_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR);
}
int hvf_process_events(CPUState *cs)
@@ -432,25 +431,25 @@ int hvf_process_events(CPUState *cs)
env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
}
- if (cs->interrupt_request & CPU_INTERRUPT_INIT) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT)) {
cpu_synchronize_state(cs);
do_cpu_init(cpu);
}
- if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) {
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(cpu->apic_state);
}
- if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
cs->halted = 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) {
cpu_synchronize_state(cs);
do_cpu_sipi(cpu);
}
- if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) {
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 369626f..306430a 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -5453,8 +5453,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
int ret;
/* Inject NMI */
- if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
- if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
bql_lock();
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bql_unlock();
@@ -5465,7 +5465,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
strerror(-ret));
}
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
bql_lock();
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
bql_unlock();
@@ -5478,31 +5478,30 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
- if (!kvm_pic_in_kernel()) {
- bql_lock();
- }
/* Force the VCPU out of its inner loop to process any INIT requests
* or (for userspace APIC, but it is cheap to combine the checks here)
* pending TPR access reports.
*/
- if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
- cpu->exit_request = 1;
+ qatomic_set(&cpu->exit_request, 1);
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu->exit_request = 1;
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
+ qatomic_set(&cpu->exit_request, 1);
}
}
if (!kvm_pic_in_kernel()) {
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
- (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) {
int irq;
+ bql_lock();
+
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
@@ -5517,13 +5516,14 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
strerror(-ret));
}
}
+ bql_unlock();
}
/* If we have an interrupt but the guest is not ready to receive an
* interrupt, request an interrupt window exit. This will
* cause a return to userspace as soon as the guest is ready to
* receive interrupts. */
- if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
run->request_interrupt_window = 1;
} else {
run->request_interrupt_window = 0;
@@ -5531,8 +5531,6 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
DPRINTF("setting tpr\n");
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
-
- bql_unlock();
}
}
@@ -5595,7 +5593,7 @@ int kvm_arch_process_async_events(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_MCE)) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
@@ -5618,7 +5616,7 @@ int kvm_arch_process_async_events(CPUState *cs)
}
}
- if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
kvm_cpu_synchronize_state(cs);
do_cpu_init(cpu);
@@ -5628,20 +5626,20 @@ int kvm_arch_process_async_events(CPUState *cs)
return 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) {
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(cpu->apic_state);
}
- if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
cs->halted = 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) {
kvm_cpu_synchronize_state(cs);
do_cpu_sipi(cpu);
}
- if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) {
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
kvm_cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
@@ -5656,9 +5654,9 @@ static int kvm_handle_halt(X86CPU *cpu)
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
- if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!(cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
- !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ !cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
cs->halted = 1;
return EXCP_HLT;
}
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
index 92e3b8b..c1ac74c 100644
--- a/target/i386/nvmm/nvmm-all.c
+++ b/target/i386/nvmm/nvmm-all.c
@@ -413,11 +413,11 @@ nvmm_vcpu_pre_run(CPUState *cpu)
* Force the VCPU out of its inner loop to process any INIT requests
* or commit pending TPR access.
*/
- if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
cpu->exit_request = 1;
}
- if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
if (nvmm_can_take_nmi(cpu)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
event->type = NVMM_VCPU_EVENT_INTR;
@@ -426,7 +426,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
}
}
- if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
if (nvmm_can_take_int(cpu)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
event->type = NVMM_VCPU_EVENT_INTR;
@@ -436,7 +436,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
}
/* Don't want SMIs. */
- if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
}
@@ -651,9 +651,9 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
bql_lock();
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(cpu_env(cpu)->eflags & IF_MASK)) &&
- !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->exception_index = EXCP_HLT;
cpu->halted = true;
ret = 1;
@@ -691,25 +691,25 @@ nvmm_vcpu_loop(CPUState *cpu)
* Some asynchronous events must be handled outside of the inner
* VCPU loop. They are handled here.
*/
- if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT)) {
nvmm_cpu_synchronize_state(cpu);
do_cpu_init(x86_cpu);
/* set int/nmi windows back to the reset state */
}
- if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(x86_cpu->apic_state);
}
- if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->halted = false;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) {
nvmm_cpu_synchronize_state(cpu);
do_cpu_sipi(x86_cpu);
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
nvmm_cpu_synchronize_state(cpu);
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c
index d4ea890..794a23d 100644
--- a/target/i386/tcg/system/seg_helper.c
+++ b/target/i386/tcg/system/seg_helper.c
@@ -133,7 +133,7 @@ bool x86_cpu_exec_halt(CPUState *cpu)
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
- if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
bql_lock();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
diff --git a/target/i386/tcg/system/svm_helper.c b/target/i386/tcg/system/svm_helper.c
index dea039b..3569196 100644
--- a/target/i386/tcg/system/svm_helper.c
+++ b/target/i386/tcg/system/svm_helper.c
@@ -403,7 +403,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->hflags2 |= HF2_GIF_MASK;
if (ctl_has_irq(env)) {
- cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ cpu_set_interrupt(cs, CPU_INTERRUPT_VIRQ);
}
if (virtual_gif_set(env)) {
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index b72dcff..878cdd1 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -1436,9 +1436,9 @@ static int whpx_handle_halt(CPUState *cpu)
int ret = 0;
bql_lock();
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(cpu_env(cpu)->eflags & IF_MASK)) &&
- !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->exception_index = EXCP_HLT;
cpu->halted = true;
ret = 1;
@@ -1469,15 +1469,15 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
/* Inject NMI */
if (!vcpu->interruption_pending &&
- cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
- if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
vcpu->interruptable = false;
new_int.InterruptionType = WHvX64PendingNmi;
new_int.InterruptionPending = 1;
new_int.InterruptionVector = 2;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
}
}
@@ -1486,12 +1486,12 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
* Force the VCPU out of its inner loop to process any INIT requests or
* commit pending TPR access.
*/
- if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
cpu->exit_request = 1;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
cpu->exit_request = 1;
}
}
@@ -1501,7 +1501,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (!vcpu->interruption_pending &&
vcpu->interruptable && (env->eflags & IF_MASK)) {
assert(!new_int.InterruptionPending);
- if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
@@ -1519,7 +1519,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
reg_count += 1;
}
} else if (vcpu->ready_for_pic_interrupt &&
- (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
@@ -1546,7 +1546,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
/* Update the state of the interrupt delivery notification */
if (!vcpu->window_registered &&
- cpu->interrupt_request & CPU_INTERRUPT_HARD) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
reg_values[reg_count].DeliverabilityNotifications =
(WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER) {
.InterruptNotification = 1
@@ -1599,30 +1599,30 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
CPUX86State *env = &x86_cpu->env;
AccelCPUState *vcpu = cpu->accel;
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
whpx_cpu_synchronize_state(cpu);
do_cpu_init(x86_cpu);
vcpu->interruptable = true;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(x86_cpu->apic_state);
}
- if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->halted = false;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) {
whpx_cpu_synchronize_state(cpu);
do_cpu_sipi(x86_cpu);
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
whpx_cpu_synchronize_state(cpu);
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,