diff options
author | Alexander Graf <agraf@csgraf.de> | 2021-06-03 14:09:34 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-06-03 16:43:27 +0100 |
commit | b533450e74500dd67f0aa49775809ea33bc465b7 (patch) | |
tree | ed4a54d0c402c554796ffaac9b7418848d53469f /target/i386/hvf/x86.c | |
parent | d662ede2b1eb033883b7c96866e84e8b54524ccb (diff) | |
download | qemu-b533450e74500dd67f0aa49775809ea33bc465b7.zip qemu-b533450e74500dd67f0aa49775809ea33bc465b7.tar.gz qemu-b533450e74500dd67f0aa49775809ea33bc465b7.tar.bz2 |
hvf: Introduce hvf vcpu struct
We will need more than a single field for hvf going forward. To keep
the global vcpu struct uncluttered, let's allocate a special hvf vcpu
struct, similar to how hax does it.
Signed-off-by: Alexander Graf <agraf@csgraf.de>
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Sergio Lopez <slp@redhat.com>
Message-id: 20210519202253.76782-12-agraf@csgraf.de
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/i386/hvf/x86.c')
-rw-r--r-- | target/i386/hvf/x86.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index cd04518..2898bb7 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -62,11 +62,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu, } if (GDT_SEL == sel.ti) { - base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE); - limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT); + base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); + limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE); - limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT); + base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); + limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); } if (sel.index * 8 >= limit) { @@ -85,11 +85,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, uint32_t limit; if (GDT_SEL == sel.ti) { - base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE); - limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT); + base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); + limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE); - limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT); + base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); + limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); } if (sel.index * 8 >= limit) { @@ -103,8 +103,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate) { - target_ulong base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE); - uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT); + target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE); + uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT); memset(idt_desc, 0, sizeof(*idt_desc)); if (gate * 8 >= limit) { @@ -118,7 +118,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, bool x86_is_protected(struct CPUState *cpu) { - uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); + uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); return cr0 & CR0_PE; } @@ -136,7 +136,7 @@ bool x86_is_v8086(struct CPUState *cpu) bool x86_is_long_mode(struct CPUState *cpu) { - return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; + return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; } bool x86_is_long64_mode(struct CPUState *cpu) @@ -149,13 +149,13 @@ bool x86_is_long64_mode(struct CPUState *cpu) bool x86_is_paging_mode(struct CPUState *cpu) { - uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); + uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); return cr0 & CR0_PG; } bool x86_is_pae_enabled(struct CPUState *cpu) { - uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4); + uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); return cr4 & CR4_PAE; } |