aboutsummaryrefslogtreecommitdiff
path: root/target/i386
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/cpu.c23
-rw-r--r--target/i386/cpu.h8
-rw-r--r--target/i386/helper.c4
-rw-r--r--target/i386/kvm/kvm.c23
-rw-r--r--target/i386/monitor.c134
-rw-r--r--target/i386/tcg/decode-new.c.inc2
-rw-r--r--target/i386/tcg/seg_helper.c2
-rw-r--r--target/i386/tcg/system/excp_helper.c3
-rw-r--r--target/i386/tcg/system/seg_helper.c1
-rw-r--r--target/i386/tcg/system/smm_helper.c10
10 files changed, 129 insertions, 81 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index ab18de8..455caff 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1397,7 +1397,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
"no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL,
- NULL, NULL, "null-sel-clr-base", NULL,
+ NULL, "verw-clear", "null-sel-clr-base", NULL,
"auto-ibrs", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -1415,6 +1415,22 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.tcg_features = 0,
.unmigratable_flags = 0,
},
+ [FEAT_8000_0021_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, "tsa-sq-no", "tsa-l1-no", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0x80000021, .reg = R_ECX, },
+ .tcg_features = 0,
+ .unmigratable_flags = 0,
+ },
[FEAT_8000_0022_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -8526,6 +8542,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax = *ebx = *ecx = *edx = 0;
*eax = env->features[FEAT_8000_0021_EAX];
*ebx = env->features[FEAT_8000_0021_EBX];
+ *ecx = env->features[FEAT_8000_0021_ECX];
break;
case 0x80000022:
*eax = *ebx = *ecx = *edx = 0;
@@ -8632,7 +8649,11 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type)
env->idt.limit = 0xffff;
env->gdt.limit = 0xffff;
+#if defined(CONFIG_USER_ONLY)
+ env->ldt.limit = 0;
+#else
env->ldt.limit = 0xffff;
+#endif
env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
env->tr.limit = 0xffff;
env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 8b7c173..ce94886 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -643,6 +643,7 @@ typedef enum FeatureWord {
FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */
+ FEAT_8000_0021_ECX, /* CPUID[8000_0021].ECX */
FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
@@ -1103,6 +1104,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
+/* Memory form of VERW mitigates TSA */
+#define CPUID_8000_0021_EAX_VERW_CLEAR (1U << 5)
/* Null Selector Clears Base */
#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
@@ -1126,6 +1129,11 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
*/
#define CPUID_8000_0021_EBX_RAPSIZE (8U << 16)
+/* CPU is not vulnerable TSA SA-SQ attack */
+#define CPUID_8000_0021_ECX_TSA_SQ_NO (1U << 1)
+/* CPU is not vulnerable TSA SA-L1 attack */
+#define CPUID_8000_0021_ECX_TSA_L1_NO (1U << 2)
+
/* Performance Monitoring Version 2 */
#define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0)
diff --git a/target/i386/helper.c b/target/i386/helper.c
index 651041c..72b2e19 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -621,6 +621,10 @@ void do_cpu_init(X86CPU *cpu)
void do_cpu_sipi(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
+ if (env->hflags & HF_SMM_MASK) {
+ return;
+ }
apic_sipi(cpu->apic_state);
}
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index db40caa..309f043 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -653,6 +653,23 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
must_be_one = (uint32_t)value;
can_be_one = (uint32_t)(value >> 32);
return can_be_one & ~must_be_one;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ /*
+ * Special handling for fb-clear bit in ARCH_CAPABILITIES MSR.
+ * KVM will only report the bit if it is enabled in the host,
+ * but, for live migration capability purposes, we want to
+ * expose the bit to the guest even if it is disabled in the
+ * host, as long as the host itself is not vulnerable to
+ * the issue that the fb-clear bit is meant to mitigate.
+ */
+ if ((value & MSR_ARCH_CAP_MDS_NO) &&
+ (value & MSR_ARCH_CAP_TAA_NO) &&
+ (value & MSR_ARCH_CAP_SBDR_SSDP_NO) &&
+ (value & MSR_ARCH_CAP_FBSDP_NO) &&
+ (value & MSR_ARCH_CAP_PSDP_NO)) {
+ value |= MSR_ARCH_CAP_FB_CLEAR;
+ }
+ return value;
default:
return value;
@@ -3907,7 +3924,7 @@ static void kvm_init_msrs(X86CPU *cpu)
assert(kvm_buf_set_msrs(cpu) == 0);
}
-static int kvm_put_msrs(X86CPU *cpu, int level)
+static int kvm_put_msrs(X86CPU *cpu, KvmPutState level)
{
CPUX86State *env = &cpu->env;
int i;
@@ -5027,7 +5044,7 @@ static int kvm_get_apic(X86CPU *cpu)
return 0;
}
-static int kvm_put_vcpu_events(X86CPU *cpu, int level)
+static int kvm_put_vcpu_events(X86CPU *cpu, KvmPutState level)
{
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
@@ -5270,7 +5287,7 @@ static int kvm_get_nested_state(X86CPU *cpu)
return ret;
}
-int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp)
+int kvm_arch_put_registers(CPUState *cpu, KvmPutState level, Error **errp)
{
X86CPU *x86_cpu = X86_CPU(cpu);
int ret;
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 3c9b6ca..d2bb873 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -30,6 +30,7 @@
#include "qobject/qdict.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
+#include "system/memory.h"
/* Perform linear address sign extension */
static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
@@ -68,23 +69,23 @@ static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
pte & PG_RW_MASK ? 'W' : '-');
}
-static void tlb_info_32(Monitor *mon, CPUArchState *env)
+static void tlb_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2;
uint32_t pgd, pde, pte;
pgd = env->cr[3] & ~0xfff;
for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
+ pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL);
if (pde & PG_PRESENT_MASK) {
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
/* 4M pages */
print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
} else {
for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
+ pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 22) + (l2 << 12),
pte & ~PG_PSE_MASK,
@@ -96,21 +97,20 @@ static void tlb_info_32(Monitor *mon, CPUArchState *env)
}
}
-static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
+static void tlb_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2, l3;
uint64_t pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
pdp_addr = env->cr[3] & ~0x1f;
for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL);
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
/* 2M pages with PAE, CR4.PSE is ignored */
@@ -119,8 +119,8 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l3 * 8,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 30) + (l2 << 21)
+ (l3 << 12),
@@ -136,24 +136,23 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
}
#ifdef TARGET_X86_64
-static void tlb_info_la48(Monitor *mon, CPUArchState *env,
+static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as,
uint64_t l0, uint64_t pml4_addr)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
if (!(pml4e & PG_PRESENT_MASK)) {
continue;
}
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
if (!(pdpe & PG_PRESENT_MASK)) {
continue;
}
@@ -167,8 +166,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8, attrs, NULL);
if (!(pde & PG_PRESENT_MASK)) {
continue;
}
@@ -182,10 +180,8 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l4 * 8,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l0 << 48) + (l1 << 39) +
(l2 << 30) + (l3 << 21) + (l4 << 12),
@@ -197,18 +193,18 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
}
}
-static void tlb_info_la57(Monitor *mon, CPUArchState *env)
+static void tlb_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
uint64_t l0;
uint64_t pml5e;
uint64_t pml5_addr;
pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL);
if (pml5e & PG_PRESENT_MASK) {
- tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
+ tlb_info_la48(mon, env, as, l0, pml5e & 0x3fffffffff000ULL);
}
}
}
@@ -217,6 +213,7 @@ static void tlb_info_la57(Monitor *mon, CPUArchState *env)
void hmp_info_tlb(Monitor *mon, const QDict *qdict)
{
CPUArchState *env;
+ AddressSpace *as;
env = mon_get_cpu_env(mon);
if (!env) {
@@ -228,21 +225,22 @@ void hmp_info_tlb(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "PG disabled\n");
return;
}
+ as = cpu_get_address_space(env_cpu(env), X86ASIdx_MEM);
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
- tlb_info_la57(mon, env);
+ tlb_info_la57(mon, env, as);
} else {
- tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
+ tlb_info_la48(mon, env, as, 0, env->cr[3] & 0x3fffffffff000ULL);
}
} else
#endif
{
- tlb_info_pae32(mon, env);
+ tlb_info_pae32(mon, env, as);
}
} else {
- tlb_info_32(mon, env);
+ tlb_info_32(mon, env, as);
}
}
@@ -271,8 +269,9 @@ static void mem_print(Monitor *mon, CPUArchState *env,
}
}
-static void mem_info_32(Monitor *mon, CPUArchState *env)
+static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2;
int prot, last_prot;
uint32_t pgd, pde, pte;
@@ -282,8 +281,7 @@ static void mem_info_32(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
+ pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL);
end = l1 << 22;
if (pde & PG_PRESENT_MASK) {
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -291,8 +289,8 @@ static void mem_info_32(Monitor *mon, CPUArchState *env)
mem_print(mon, env, &start, &last_prot, end, prot);
} else {
for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
+ pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4,
+ attrs, NULL);
end = (l1 << 22) + (l2 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde &
@@ -312,8 +310,9 @@ static void mem_info_32(Monitor *mon, CPUArchState *env)
mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
}
-static void mem_info_pae32(Monitor *mon, CPUArchState *env)
+static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2, l3;
int prot, last_prot;
uint64_t pdpe, pde, pte;
@@ -324,14 +323,12 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL);
end = l1 << 30;
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL);
end = (l1 << 30) + (l2 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -341,8 +338,8 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l3 * 8,
+ attrs, NULL);
end = (l1 << 30) + (l2 << 21) + (l3 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
@@ -369,8 +366,9 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
#ifdef TARGET_X86_64
-static void mem_info_la48(Monitor *mon, CPUArchState *env)
+static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int prot, last_prot;
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
@@ -380,14 +378,12 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
end = l1 << 39;
if (pml4e & PG_PRESENT_MASK) {
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
end = (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
if (pdpe & PG_PSE_MASK) {
@@ -398,8 +394,8 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
} else {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8,
+ attrs, NULL);
end = (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -411,10 +407,10 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as,
+ pt_addr
+ + l4 * 8,
+ attrs, NULL);
end = (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
@@ -449,8 +445,9 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
}
-static void mem_info_la57(Monitor *mon, CPUArchState *env)
+static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int prot, last_prot;
uint64_t l0, l1, l2, l3, l4;
uint64_t pml5e, pml4e, pdpe, pde, pte;
@@ -460,8 +457,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL);
end = l0 << 48;
if (!(pml5e & PG_PRESENT_MASK)) {
prot = 0;
@@ -471,8 +467,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pml4_addr = pml5e & 0x3fffffffff000ULL;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
end = (l0 << 48) + (l1 << 39);
if (!(pml4e & PG_PRESENT_MASK)) {
prot = 0;
@@ -482,8 +477,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
prot = 0;
@@ -501,8 +495,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8,
+ attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
prot = 0;
@@ -520,8 +514,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l4 * 8,
+ attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
@@ -545,6 +539,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
void hmp_info_mem(Monitor *mon, const QDict *qdict)
{
CPUArchState *env;
+ AddressSpace *as;
env = mon_get_cpu_env(mon);
if (!env) {
@@ -556,21 +551,22 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "PG disabled\n");
return;
}
+ as = cpu_get_address_space(env_cpu(env), X86ASIdx_MEM);
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
- mem_info_la57(mon, env);
+ mem_info_la57(mon, env, as);
} else {
- mem_info_la48(mon, env);
+ mem_info_la48(mon, env, as);
}
} else
#endif
{
- mem_info_pae32(mon, env);
+ mem_info_pae32(mon, env, as);
}
} else {
- mem_info_32(mon, env);
+ mem_info_32(mon, env, as);
}
}
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 5103865..a50f57d 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -1541,7 +1541,7 @@ static void decode_group4_5(DisasContext *s, CPUX86State *env, X86OpEntry *entry
[0x0b] = X86_OP_ENTRYr(CALLF_m, M,p),
[0x0c] = X86_OP_ENTRYr(JMP_m, E,f64, zextT0),
[0x0d] = X86_OP_ENTRYr(JMPF_m, M,p),
- [0x0e] = X86_OP_ENTRYr(PUSH, E,f64),
+ [0x0e] = X86_OP_ENTRYr(PUSH, E,d64),
};
int w = (*b & 1);
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index 071f3fb..f49fe85 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -456,7 +456,7 @@ static void switch_tss_ra(CPUX86State *env, int tss_selector,
new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
}
new_ldt = access_ldw(&new, tss_base + 0x60);
- new_trap = access_ldl(&new, tss_base + 0x64);
+ new_trap = access_ldw(&new, tss_base + 0x64) & 1;
} else {
/* 16 bit */
new_cr3 = 0;
diff --git a/target/i386/tcg/system/excp_helper.c b/target/i386/tcg/system/excp_helper.c
index 50040f6..f622b5d 100644
--- a/target/i386/tcg/system/excp_helper.c
+++ b/target/i386/tcg/system/excp_helper.c
@@ -592,7 +592,8 @@ static bool get_physical_address(CPUX86State *env, vaddr addr,
if (sext != 0 && sext != -1) {
*err = (TranslateFault){
.exception_index = EXCP0D_GPF,
- .cr2 = addr,
+ /* non-canonical #GP doesn't change CR2 */
+ .cr2 = env->cr[2],
};
return false;
}
diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c
index 38072e5..8c7856b 100644
--- a/target/i386/tcg/system/seg_helper.c
+++ b/target/i386/tcg/system/seg_helper.c
@@ -182,6 +182,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
apic_poll_irq(cpu->apic_state);
break;
case CPU_INTERRUPT_SIPI:
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_SIPI);
do_cpu_sipi(cpu);
break;
case CPU_INTERRUPT_SMI:
diff --git a/target/i386/tcg/system/smm_helper.c b/target/i386/tcg/system/smm_helper.c
index 251eb78..fb028a8 100644
--- a/target/i386/tcg/system/smm_helper.c
+++ b/target/i386/tcg/system/smm_helper.c
@@ -168,7 +168,7 @@ void do_smm_enter(X86CPU *cpu)
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
CR0_PG_MASK));
cpu_x86_update_cr4(env, 0);
- env->dr[7] = 0x00000400;
+ helper_set_dr(env, 7, 0x00000400);
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
0xffffffff,
@@ -233,8 +233,8 @@ void helper_rsm(CPUX86State *env)
env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
- env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
+ helper_set_dr(env, 6, x86_ldl_phys(cs, sm_state + 0x7f68));
+ helper_set_dr(env, 7, x86_ldl_phys(cs, sm_state + 0x7f60));
cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
@@ -268,8 +268,8 @@ void helper_rsm(CPUX86State *env)
env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
- env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
- env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
+ helper_set_dr(env, 6, x86_ldl_phys(cs, sm_state + 0x7fcc));
+ helper_set_dr(env, 7, x86_ldl_phys(cs, sm_state + 0x7fc8));
env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);