aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-07-17 15:40:28 +1000
committerRichard Henderson <richard.henderson@linaro.org>2024-07-17 15:40:28 +1000
commit58ee924b97d1c0898555647a31820c5a20d55a73 (patch)
treee4e65183458220e401bca4dde85236c6cb04a9ec /target
parente2f346aa98646e84eabe0256f89d08e89b1837cf (diff)
parent6a079f2e68e1832ebca0e7d64bc31ffebde9b2dd (diff)
downloadqemu-58ee924b97d1c0898555647a31820c5a20d55a73.zip
qemu-58ee924b97d1c0898555647a31820c5a20d55a73.tar.gz
qemu-58ee924b97d1c0898555647a31820c5a20d55a73.tar.bz2
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
* target/i386/tcg: fixes for seg_helper.c * SEV: Don't allow automatic fallback to legacy KVM_SEV_INIT, but also don't use it by default * scsi: honor bootindex again for legacy drives * hpet, utils, scsi, build, cpu: miscellaneous bugfixes # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmaWoP0UHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroOqfggAg3jxUp6B8dFTEid5aV6qvT4M6nwD # TAYcAl5kRqTOklEmXiPCoA5PeS0rbr+5xzWLAKgkumjCVXbxMoYSr0xJHVuDwQWv # XunUm4kpxJBLKK3uTGAIW9A21thOaA5eAoLIcqu2smBMU953TBevMqA7T67h22rp # y8NnZWWdyQRH0RAaWsCBaHVkkf+DuHSG5LHMYhkdyxzno+UWkTADFppVhaDO78Ba # Egk49oMO+G6of4+dY//p1OtAkAf4bEHePKgxnbZePInJrkgHzr0TJWf9gERWFzdK # JiM0q6DeqopZm+vENxS+WOx7AyDzdN0qOrf6t9bziXMg0Rr2Z8bu01yBCQ== # =cZhV # -----END PGP SIGNATURE----- # gpg: Signature made Wed 17 Jul 2024 02:34:05 AM AEST # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: target/i386/tcg: save current task state before loading new one target/i386/tcg: use X86Access for TSS access target/i386/tcg: check for correct busy state before switching to a new task target/i386/tcg: Compute MMU index once target/i386/tcg: Introduce x86_mmu_index_{kernel_,}pl target/i386/tcg: Reorg push/pop within seg_helper.c target/i386/tcg: use PUSHL/PUSHW for error code target/i386/tcg: Allow IRET from user mode to user mode with SMAP target/i386/tcg: Remove SEG_ADDL target/i386/tcg: fix POP to memory in long mode hpet: fix HPET_TN_SETVAL for high 32-bits of the comparator hpet: fix clamping of period docs: Update description of 'user=username' for '-run-with' qemu/timer: Add host ticks function for LoongArch scsi: fix regression and honor bootindex again for legacy drives hw/scsi/lsi53c895a: bump instruction limit in scripts processing to fix regression disas: Fix build against Capstone v6 cpu: Free queued CPU work Revert "qemu-char: do not operate on sources from finalize callbacks" i386/sev: Don't allow automatic fallback to legacy KVM_SEV*_INIT Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/i386/cpu.c27
-rw-r--r--target/i386/cpu.h11
-rw-r--r--target/i386/sev.c85
-rw-r--r--target/i386/tcg/decode-new.c.inc2
-rw-r--r--target/i386/tcg/emit.c.inc1
-rw-r--r--target/i386/tcg/seg_helper.c662
6 files changed, 458 insertions, 330 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index c05765e..4688d14 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -8122,18 +8122,39 @@ static bool x86_cpu_has_work(CPUState *cs)
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
}
-static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
+int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
{
- CPUX86State *env = cpu_env(cs);
int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1;
int mmu_index_base =
- (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER64_IDX :
+ pl == 3 ? MMU_USER64_IDX :
!(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
(env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
return mmu_index_base + mmu_index_32;
}
+static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ CPUX86State *env = cpu_env(cs);
+ return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK);
+}
+
+static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
+{
+ int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
+ int mmu_index_base =
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ (pl < 3 && (env->eflags & AC_MASK)
+ ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
+
+ return mmu_index_base + mmu_index_32;
+}
+
+int cpu_mmu_index_kernel(CPUX86State *env)
+{
+ return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
+}
+
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
{
X86CPU *cpu = X86_CPU(cs);
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c43ac01..1e121ac 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2445,15 +2445,8 @@ static inline bool is_mmu_index_32(int mmu_index)
return mmu_index & 1;
}
-static inline int cpu_mmu_index_kernel(CPUX86State *env)
-{
- int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
- int mmu_index_base =
- !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
- ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
-
- return mmu_index_base + mmu_index_32;
-}
+int x86_mmu_index_pl(CPUX86State *env, unsigned pl);
+int cpu_mmu_index_kernel(CPUX86State *env);
#define CC_DST (env->cc_dst)
#define CC_SRC (env->cc_src)
diff --git a/target/i386/sev.c b/target/i386/sev.c
index 2ba5f51..a1157c0 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -144,7 +144,7 @@ struct SevGuestState {
uint32_t policy;
char *dh_cert_file;
char *session_file;
- bool legacy_vm_type;
+ OnOffAuto legacy_vm_type;
};
struct SevSnpGuestState {
@@ -1369,6 +1369,17 @@ sev_vm_state_change(void *opaque, bool running, RunState state)
}
}
+/*
+ * This helper is to examine sev-guest properties and determine if any options
+ * have been set which rely on the newer KVM_SEV_INIT2 interface and associated
+ * KVM VM types.
+ */
+static bool sev_init2_required(SevGuestState *sev_guest)
+{
+ /* Currently no KVM_SEV_INIT2-specific options are exposed via QEMU */
+ return false;
+}
+
static int sev_kvm_type(X86ConfidentialGuest *cg)
{
SevCommonState *sev_common = SEV_COMMON(cg);
@@ -1379,14 +1390,39 @@ static int sev_kvm_type(X86ConfidentialGuest *cg)
goto out;
}
+ /* These are the only cases where legacy VM types can be used. */
+ if (sev_guest->legacy_vm_type == ON_OFF_AUTO_ON ||
+ (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO &&
+ !sev_init2_required(sev_guest))) {
+ sev_common->kvm_type = KVM_X86_DEFAULT_VM;
+ goto out;
+ }
+
+ /*
+ * Newer VM types are required, either explicitly via legacy-vm-type=on, or
+ * implicitly via legacy-vm-type=auto along with additional sev-guest
+ * properties that require the newer VM types.
+ */
kvm_type = (sev_guest->policy & SEV_POLICY_ES) ?
KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM;
- if (kvm_is_vm_type_supported(kvm_type) && !sev_guest->legacy_vm_type) {
- sev_common->kvm_type = kvm_type;
- } else {
- sev_common->kvm_type = KVM_X86_DEFAULT_VM;
+ if (!kvm_is_vm_type_supported(kvm_type)) {
+ if (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO) {
+ error_report("SEV: host kernel does not support requested %s VM type, which is required "
+ "for the set of options specified. To allow use of the legacy "
+ "KVM_X86_DEFAULT_VM VM type, please disable any options that are not "
+ "compatible with the legacy VM type, or upgrade your kernel.",
+ kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
+ } else {
+ error_report("SEV: host kernel does not support requested %s VM type. To allow use of "
+ "the legacy KVM_X86_DEFAULT_VM VM type, the 'legacy-vm-type' argument "
+ "must be set to 'on' or 'auto' for the sev-guest object.",
+ kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
+ }
+
+ return -1;
}
+ sev_common->kvm_type = kvm_type;
out:
return sev_common->kvm_type;
}
@@ -1477,14 +1513,24 @@ static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
}
trace_kvm_sev_init();
- if (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common)) == KVM_X86_DEFAULT_VM) {
+ switch (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common))) {
+ case KVM_X86_DEFAULT_VM:
cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT;
ret = sev_ioctl(sev_common->sev_fd, cmd, NULL, &fw_error);
- } else {
+ break;
+ case KVM_X86_SEV_VM:
+ case KVM_X86_SEV_ES_VM:
+ case KVM_X86_SNP_VM: {
struct kvm_sev_init args = { 0 };
ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error);
+ break;
+ }
+ default:
+ error_setg(errp, "%s: host kernel does not support the requested SEV configuration.",
+ __func__);
+ return -1;
}
if (ret) {
@@ -2074,14 +2120,23 @@ sev_guest_set_session_file(Object *obj, const char *value, Error **errp)
SEV_GUEST(obj)->session_file = g_strdup(value);
}
-static bool sev_guest_get_legacy_vm_type(Object *obj, Error **errp)
+static void sev_guest_get_legacy_vm_type(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
- return SEV_GUEST(obj)->legacy_vm_type;
+ SevGuestState *sev_guest = SEV_GUEST(obj);
+ OnOffAuto legacy_vm_type = sev_guest->legacy_vm_type;
+
+ visit_type_OnOffAuto(v, name, &legacy_vm_type, errp);
}
-static void sev_guest_set_legacy_vm_type(Object *obj, bool value, Error **errp)
+static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
{
- SEV_GUEST(obj)->legacy_vm_type = value;
+ SevGuestState *sev_guest = SEV_GUEST(obj);
+
+ visit_type_OnOffAuto(v, name, &sev_guest->legacy_vm_type, errp);
}
static void
@@ -2107,9 +2162,9 @@ sev_guest_class_init(ObjectClass *oc, void *data)
sev_guest_set_session_file);
object_class_property_set_description(oc, "session-file",
"guest owners session parameters (encoded with base64)");
- object_class_property_add_bool(oc, "legacy-vm-type",
- sev_guest_get_legacy_vm_type,
- sev_guest_set_legacy_vm_type);
+ object_class_property_add(oc, "legacy-vm-type", "OnOffAuto",
+ sev_guest_get_legacy_vm_type,
+ sev_guest_set_legacy_vm_type, NULL, NULL);
object_class_property_set_description(oc, "legacy-vm-type",
"use legacy VM type to maintain measurement compatibility with older QEMU or kernel versions.");
}
@@ -2125,6 +2180,8 @@ sev_guest_instance_init(Object *obj)
object_property_add_uint32_ptr(obj, "policy", &sev_guest->policy,
OBJ_PROP_FLAG_READWRITE);
object_apply_compat_props(obj);
+
+ sev_guest->legacy_vm_type = ON_OFF_AUTO_AUTO;
}
/* guest info specific sev/sev-es */
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 0d846c3..d2da1d3 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -1717,7 +1717,7 @@ static const X86OpEntry opcodes_root[256] = {
[0x8C] = X86_OP_ENTRYwr(MOV, E,v, S,w, op0_Mw),
[0x8D] = X86_OP_ENTRYwr(LEA, G,v, M,v, nolea),
[0x8E] = X86_OP_ENTRYwr(MOV, S,w, E,w),
- [0x8F] = X86_OP_GROUPw(group1A, E,v),
+ [0x8F] = X86_OP_GROUPw(group1A, E,d64),
[0x98] = X86_OP_ENTRY1(CBW, 0,v), /* rAX */
[0x99] = X86_OP_ENTRYwr(CWD, 2,v, 0,v), /* rDX, rAX */
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index fc74778..016dce8 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -2788,6 +2788,7 @@ static void gen_POP(DisasContext *s, X86DecodedInsn *decode)
X86DecodedOp *op = &decode->op[0];
MemOp ot = gen_pop_T0(s);
+ assert(ot >= op->ot);
if (op->has_ea || op->unit == X86_OP_SEG) {
/* NOTE: order is important for MMU exceptions */
gen_writeback(s, decode, 0, s->T0);
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index aee3d19..aac092a 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -27,6 +27,70 @@
#include "exec/log.h"
#include "helper-tcg.h"
#include "seg_helper.h"
+#include "access.h"
+
+#ifdef TARGET_X86_64
+#define SET_ESP(val, sp_mask) \
+ do { \
+ if ((sp_mask) == 0xffff) { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
+ ((val) & 0xffff); \
+ } else if ((sp_mask) == 0xffffffffLL) { \
+ env->regs[R_ESP] = (uint32_t)(val); \
+ } else { \
+ env->regs[R_ESP] = (val); \
+ } \
+ } while (0)
+#else
+#define SET_ESP(val, sp_mask) \
+ do { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
+ ((val) & (sp_mask)); \
+ } while (0)
+#endif
+
+/* XXX: use mmu_index to have proper DPL support */
+typedef struct StackAccess
+{
+ CPUX86State *env;
+ uintptr_t ra;
+ target_ulong ss_base;
+ target_ulong sp;
+ target_ulong sp_mask;
+ int mmu_index;
+} StackAccess;
+
+static void pushw(StackAccess *sa, uint16_t val)
+{
+ sa->sp -= 2;
+ cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
+ val, sa->mmu_index, sa->ra);
+}
+
+static void pushl(StackAccess *sa, uint32_t val)
+{
+ sa->sp -= 4;
+ cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
+ val, sa->mmu_index, sa->ra);
+}
+
+static uint16_t popw(StackAccess *sa)
+{
+ uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
+ sa->ss_base + (sa->sp & sa->sp_mask),
+ sa->mmu_index, sa->ra);
+ sa->sp += 2;
+ return ret;
+}
+
+static uint32_t popl(StackAccess *sa)
+{
+ uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
+ sa->ss_base + (sa->sp & sa->sp_mask),
+ sa->mmu_index, sa->ra);
+ sa->sp += 4;
+ return ret;
+}
int get_pg_mode(CPUX86State *env)
{
@@ -250,14 +314,15 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip, uintptr_t retaddr)
{
- int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
+ int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
target_ulong tss_base;
uint32_t new_regs[8], new_segs[6];
uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
uint32_t old_eflags, eflags_mask;
SegmentCache *dt;
- int index;
+ int mmu_index, index;
target_ulong ptr;
+ X86Access old, new;
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
@@ -306,35 +371,86 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
old_tss_limit_max = 43;
}
+ /* new TSS must be busy iff the source is an IRET instruction */
+ if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+ }
+
+ /* X86Access avoids memory exceptions during the task switch */
+ mmu_index = cpu_mmu_index_kernel(env);
+ access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max,
+ MMU_DATA_STORE, mmu_index, retaddr);
+
+ if (source == SWITCH_TSS_CALL) {
+ /* Probe for future write of parent task */
+ probe_access(env, tss_base, 2, MMU_DATA_STORE,
+ mmu_index, retaddr);
+ }
+ access_prepare_mmu(&new, env, tss_base, tss_limit,
+ MMU_DATA_LOAD, mmu_index, retaddr);
+
+ /* save the current state in the old TSS */
+ old_eflags = cpu_compute_eflags(env);
+ if (old_type & 8) {
+ /* 32 bit */
+ access_stl(&old, env->tr.base + 0x20, next_eip);
+ access_stl(&old, env->tr.base + 0x24, old_eflags);
+ access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
+ access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
+ access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
+ access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
+ access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
+ access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
+ access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
+ access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
+ for (i = 0; i < 6; i++) {
+ access_stw(&old, env->tr.base + (0x48 + i * 4),
+ env->segs[i].selector);
+ }
+ } else {
+ /* 16 bit */
+ access_stw(&old, env->tr.base + 0x0e, next_eip);
+ access_stw(&old, env->tr.base + 0x10, old_eflags);
+ access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
+ access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
+ access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
+ access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
+ access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
+ access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
+ access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
+ access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
+ for (i = 0; i < 4; i++) {
+ access_stw(&old, env->tr.base + (0x22 + i * 2),
+ env->segs[i].selector);
+ }
+ }
+
/* read all the registers from the new TSS */
if (type & 8) {
/* 32 bit */
- new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
- new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
- new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
+ new_cr3 = access_ldl(&new, tss_base + 0x1c);
+ new_eip = access_ldl(&new, tss_base + 0x20);
+ new_eflags = access_ldl(&new, tss_base + 0x24);
for (i = 0; i < 8; i++) {
- new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
- retaddr);
+ new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
}
for (i = 0; i < 6; i++) {
- new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
- retaddr);
+ new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
}
- new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
- new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
+ new_ldt = access_ldw(&new, tss_base + 0x60);
+ new_trap = access_ldl(&new, tss_base + 0x64);
} else {
/* 16 bit */
new_cr3 = 0;
- new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
- new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
+ new_eip = access_ldw(&new, tss_base + 0x0e);
+ new_eflags = access_ldw(&new, tss_base + 0x10);
for (i = 0; i < 8; i++) {
- new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
+ new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
}
for (i = 0; i < 4; i++) {
- new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
- retaddr);
+ new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
}
- new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
+ new_ldt = access_ldw(&new, tss_base + 0x2a);
new_segs[R_FS] = 0;
new_segs[R_GS] = 0;
new_trap = 0;
@@ -344,65 +460,26 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
(void)new_trap;
- /* NOTE: we must avoid memory exceptions during the task switch,
- so we make dummy accesses before */
- /* XXX: it can still fail in some cases, so a bigger hack is
- necessary to valid the TLB after having done the accesses */
-
- v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
- v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
- cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
- cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
-
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
tss_set_busy(env, env->tr.selector, 0, retaddr);
}
- old_eflags = cpu_compute_eflags(env);
+
if (source == SWITCH_TSS_IRET) {
old_eflags &= ~NT_MASK;
+ if (old_type & 8) {
+ access_stl(&old, env->tr.base + 0x24, old_eflags);
+ } else {
+ access_stw(&old, env->tr.base + 0x10, old_eflags);
+ }
}
- /* save the current state in the old TSS */
- if (old_type & 8) {
- /* 32 bit */
- cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
- cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
- for (i = 0; i < 6; i++) {
- cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
- env->segs[i].selector, retaddr);
- }
- } else {
- /* 16 bit */
- cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
- cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
- for (i = 0; i < 4; i++) {
- cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
- env->segs[i].selector, retaddr);
- }
- }
-
- /* now if an exception occurs, it will occurs in the next task
- context */
-
if (source == SWITCH_TSS_CALL) {
- cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
+ /*
+ * Thanks to the probe_access above, we know the first two
+ * bytes addressed by &new are writable too.
+ */
+ access_stw(&new, tss_base, env->tr.selector);
new_eflags |= NT_MASK;
}
@@ -412,7 +489,9 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
}
/* set the new CPU state */
- /* from this point, any exception which occurs can give problems */
+
+ /* now if an exception occurs, it will occur in the next task context */
+
env->cr[0] |= CR0_TS_MASK;
env->hflags |= HF_TS_MASK;
env->tr.selector = tss_selector;
@@ -559,72 +638,19 @@ int exception_has_error_code(int intno)
return 0;
}
-#ifdef TARGET_X86_64
-#define SET_ESP(val, sp_mask) \
- do { \
- if ((sp_mask) == 0xffff) { \
- env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
- ((val) & 0xffff); \
- } else if ((sp_mask) == 0xffffffffLL) { \
- env->regs[R_ESP] = (uint32_t)(val); \
- } else { \
- env->regs[R_ESP] = (val); \
- } \
- } while (0)
-#else
-#define SET_ESP(val, sp_mask) \
- do { \
- env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
- ((val) & (sp_mask)); \
- } while (0)
-#endif
-
-/* in 64-bit machines, this can overflow. So this segment addition macro
- * can be used to trim the value to 32-bit whenever needed */
-#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
-
-/* XXX: add a is_user flag to have proper security support */
-#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
- { \
- sp -= 2; \
- cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
- }
-
-#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
- { \
- sp -= 4; \
- cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
- }
-
-#define POPW_RA(ssp, sp, sp_mask, val, ra) \
- { \
- val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
- sp += 2; \
- }
-
-#define POPL_RA(ssp, sp, sp_mask, val, ra) \
- { \
- val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
- sp += 4; \
- }
-
-#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
-#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
-#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
-#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
-
/* protected mode interrupt */
static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
int error_code, unsigned int next_eip,
int is_hw)
{
SegmentCache *dt;
- target_ulong ptr, ssp;
+ target_ulong ptr;
int type, dpl, selector, ss_dpl, cpl;
int has_error_code, new_stack, shift;
- uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
- uint32_t old_eip, sp_mask, eflags;
+ uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
+ uint32_t old_eip, eflags;
int vm86 = env->eflags & VM_MASK;
+ StackAccess sa;
bool set_rf;
has_error_code = 0;
@@ -666,6 +692,10 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
}
+ sa.env = env;
+ sa.ra = 0;
+ sa.mmu_index = cpu_mmu_index_kernel(env);
+
if (type == 5) {
/* task gate */
/* must do that check here to return the correct error code */
@@ -674,22 +704,20 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
}
shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
if (has_error_code) {
- uint32_t mask;
-
/* push the error code */
if (env->segs[R_SS].flags & DESC_B_MASK) {
- mask = 0xffffffff;
+ sa.sp_mask = 0xffffffff;
} else {
- mask = 0xffff;
+ sa.sp_mask = 0xffff;
}
- esp = (env->regs[R_ESP] - (2 << shift)) & mask;
- ssp = env->segs[R_SS].base + esp;
+ sa.sp = env->regs[R_ESP];
+ sa.ss_base = env->segs[R_SS].base;
if (shift) {
- cpu_stl_kernel(env, ssp, error_code);
+ pushl(&sa, error_code);
} else {
- cpu_stw_kernel(env, ssp, error_code);
+ pushw(&sa, error_code);
}
- SET_ESP(esp, mask);
+ SET_ESP(sa.sp, sa.sp_mask);
}
return;
}
@@ -723,6 +751,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
}
if (dpl < cpl) {
/* to inner privilege */
+ uint32_t esp;
get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
if ((ss & 0xfffc) == 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
@@ -746,17 +775,18 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
new_stack = 1;
- sp_mask = get_sp_mask(ss_e2);
- ssp = get_seg_base(ss_e1, ss_e2);
+ sa.sp = esp;
+ sa.sp_mask = get_sp_mask(ss_e2);
+ sa.ss_base = get_seg_base(ss_e1, ss_e2);
} else {
/* to same privilege */
if (vm86) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
new_stack = 0;
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- esp = env->regs[R_ESP];
+ sa.sp = env->regs[R_ESP];
+ sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ sa.ss_base = env->segs[R_SS].base;
}
shift = type >> 3;
@@ -781,36 +811,36 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (shift == 1) {
if (new_stack) {
if (vm86) {
- PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
- PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
- PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
- PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ pushl(&sa, env->segs[R_GS].selector);
+ pushl(&sa, env->segs[R_FS].selector);
+ pushl(&sa, env->segs[R_DS].selector);
+ pushl(&sa, env->segs[R_ES].selector);
}
- PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
- PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
+ pushl(&sa, env->segs[R_SS].selector);
+ pushl(&sa, env->regs[R_ESP]);
}
- PUSHL(ssp, esp, sp_mask, eflags);
- PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, esp, sp_mask, old_eip);
+ pushl(&sa, eflags);
+ pushl(&sa, env->segs[R_CS].selector);
+ pushl(&sa, old_eip);
if (has_error_code) {
- PUSHL(ssp, esp, sp_mask, error_code);
+ pushl(&sa, error_code);
}
} else {
if (new_stack) {
if (vm86) {
- PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
- PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
- PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
- PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ pushw(&sa, env->segs[R_GS].selector);
+ pushw(&sa, env->segs[R_FS].selector);
+ pushw(&sa, env->segs[R_DS].selector);
+ pushw(&sa, env->segs[R_ES].selector);
}
- PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
- PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
+ pushw(&sa, env->segs[R_SS].selector);
+ pushw(&sa, env->regs[R_ESP]);
}
- PUSHW(ssp, esp, sp_mask, eflags);
- PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, esp, sp_mask, old_eip);
+ pushw(&sa, eflags);
+ pushw(&sa, env->segs[R_CS].selector);
+ pushw(&sa, old_eip);
if (has_error_code) {
- PUSHW(ssp, esp, sp_mask, error_code);
+ pushw(&sa, error_code);
}
}
@@ -828,10 +858,10 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
}
ss = (ss & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss,
- ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
+ cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
+ get_seg_limit(ss_e1, ss_e2), ss_e2);
}
- SET_ESP(esp, sp_mask);
+ SET_ESP(sa.sp, sa.sp_mask);
selector = (selector & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_CS, selector,
@@ -843,20 +873,18 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
#ifdef TARGET_X86_64
-#define PUSHQ_RA(sp, val, ra) \
- { \
- sp -= 8; \
- cpu_stq_kernel_ra(env, sp, (val), ra); \
- }
-
-#define POPQ_RA(sp, val, ra) \
- { \
- val = cpu_ldq_kernel_ra(env, sp, ra); \
- sp += 8; \
- }
+static void pushq(StackAccess *sa, uint64_t val)
+{
+ sa->sp -= 8;
+ cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
+}
-#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
-#define POPQ(sp, val) POPQ_RA(sp, val, 0)
+static uint64_t popq(StackAccess *sa)
+{
+ uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
+ sa->sp += 8;
+ return ret;
+}
static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
{
@@ -899,8 +927,9 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
int type, dpl, selector, cpl, ist;
int has_error_code, new_stack;
uint32_t e1, e2, e3, ss, eflags;
- target_ulong old_eip, esp, offset;
+ target_ulong old_eip, offset;
bool set_rf;
+ StackAccess sa;
has_error_code = 0;
if (!is_int && !is_hw) {
@@ -968,10 +997,16 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
if (e2 & DESC_C_MASK) {
dpl = cpl;
}
+
+ sa.env = env;
+ sa.ra = 0;
+ sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.sp_mask = -1;
+ sa.ss_base = 0;
if (dpl < cpl || ist != 0) {
/* to inner privilege */
new_stack = 1;
- esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
+ sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
ss = 0;
} else {
/* to same privilege */
@@ -979,9 +1014,9 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
new_stack = 0;
- esp = env->regs[R_ESP];
+ sa.sp = env->regs[R_ESP];
}
- esp &= ~0xfLL; /* align stack */
+ sa.sp &= ~0xfLL; /* align stack */
/* See do_interrupt_protected. */
eflags = cpu_compute_eflags(env);
@@ -989,13 +1024,13 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
eflags |= RF_MASK;
}
- PUSHQ(esp, env->segs[R_SS].selector);
- PUSHQ(esp, env->regs[R_ESP]);
- PUSHQ(esp, eflags);
- PUSHQ(esp, env->segs[R_CS].selector);
- PUSHQ(esp, old_eip);
+ pushq(&sa, env->segs[R_SS].selector);
+ pushq(&sa, env->regs[R_ESP]);
+ pushq(&sa, eflags);
+ pushq(&sa, env->segs[R_CS].selector);
+ pushq(&sa, old_eip);
if (has_error_code) {
- PUSHQ(esp, error_code);
+ pushq(&sa, error_code);
}
/* interrupt gate clear IF mask */
@@ -1008,7 +1043,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
ss = 0 | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
}
- env->regs[R_ESP] = esp;
+ env->regs[R_ESP] = sa.sp;
selector = (selector & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_CS, selector,
@@ -1080,10 +1115,11 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
int error_code, unsigned int next_eip)
{
SegmentCache *dt;
- target_ulong ptr, ssp;
+ target_ulong ptr;
int selector;
- uint32_t offset, esp;
+ uint32_t offset;
uint32_t old_cs, old_eip;
+ StackAccess sa;
/* real mode (simpler!) */
dt = &env->idt;
@@ -1093,8 +1129,14 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
ptr = dt->base + intno * 4;
offset = cpu_lduw_kernel(env, ptr);
selector = cpu_lduw_kernel(env, ptr + 2);
- esp = env->regs[R_ESP];
- ssp = env->segs[R_SS].base;
+
+ sa.env = env;
+ sa.ra = 0;
+ sa.sp = env->regs[R_ESP];
+ sa.sp_mask = 0xffff;
+ sa.ss_base = env->segs[R_SS].base;
+ sa.mmu_index = cpu_mmu_index_kernel(env);
+
if (is_int) {
old_eip = next_eip;
} else {
@@ -1102,12 +1144,12 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
}
old_cs = env->segs[R_CS].selector;
/* XXX: use SS segment size? */
- PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
- PUSHW(ssp, esp, 0xffff, old_cs);
- PUSHW(ssp, esp, 0xffff, old_eip);
+ pushw(&sa, cpu_compute_eflags(env));
+ pushw(&sa, old_cs);
+ pushw(&sa, old_eip);
/* update processor state */
- env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
+ SET_ESP(sa.sp, sa.sp_mask);
env->eip = offset;
env->segs[R_CS].selector = selector;
env->segs[R_CS].base = (selector << 4);
@@ -1550,21 +1592,24 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
int shift, uint32_t next_eip)
{
- uint32_t esp, esp_mask;
- target_ulong ssp;
+ StackAccess sa;
+
+ sa.env = env;
+ sa.ra = GETPC();
+ sa.sp = env->regs[R_ESP];
+ sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ sa.ss_base = env->segs[R_SS].base;
+ sa.mmu_index = cpu_mmu_index_kernel(env);
- esp = env->regs[R_ESP];
- esp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
if (shift) {
- PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
- PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
+ pushl(&sa, env->segs[R_CS].selector);
+ pushl(&sa, next_eip);
} else {
- PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
- PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
+ pushw(&sa, env->segs[R_CS].selector);
+ pushw(&sa, next_eip);
}
- SET_ESP(esp, esp_mask);
+ SET_ESP(sa.sp, sa.sp_mask);
env->eip = new_eip;
env->segs[R_CS].selector = new_cs;
env->segs[R_CS].base = (new_cs << 4);
@@ -1576,9 +1621,10 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
{
int new_stack, i;
uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
- uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
+ uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
uint32_t val, limit, old_sp_mask;
- target_ulong ssp, old_ssp, offset, sp;
+ target_ulong old_ssp, offset;
+ StackAccess sa;
LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
LOG_PCALL_STATE(env_cpu(env));
@@ -1590,6 +1636,11 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
}
cpl = env->hflags & HF_CPL_MASK;
LOG_PCALL("desc=%08x:%08x\n", e1, e2);
+
+ sa.env = env;
+ sa.ra = GETPC();
+ sa.mmu_index = cpu_mmu_index_kernel(env);
+
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK)) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
@@ -1617,14 +1668,14 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
#ifdef TARGET_X86_64
/* XXX: check 16/32 bit cases in long mode */
if (shift == 2) {
- target_ulong rsp;
-
/* 64 bit case */
- rsp = env->regs[R_ESP];
- PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
- PUSHQ_RA(rsp, next_eip, GETPC());
+ sa.sp = env->regs[R_ESP];
+ sa.sp_mask = -1;
+ sa.ss_base = 0;
+ pushq(&sa, env->segs[R_CS].selector);
+ pushq(&sa, next_eip);
/* from this point, not restartable */
- env->regs[R_ESP] = rsp;
+ env->regs[R_ESP] = sa.sp;
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2),
get_seg_limit(e1, e2), e2);
@@ -1632,15 +1683,15 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
} else
#endif
{
- sp = env->regs[R_ESP];
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
+ sa.sp = env->regs[R_ESP];
+ sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ sa.ss_base = env->segs[R_SS].base;
if (shift) {
- PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
- PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ pushl(&sa, env->segs[R_CS].selector);
+ pushl(&sa, next_eip);
} else {
- PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
- PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ pushw(&sa, env->segs[R_CS].selector);
+ pushw(&sa, next_eip);
}
limit = get_seg_limit(e1, e2);
@@ -1648,7 +1699,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
/* from this point, not restartable */
- SET_ESP(sp, sp_mask);
+ SET_ESP(sa.sp, sa.sp_mask);
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
env->eip = new_eip;
@@ -1743,13 +1794,13 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
/* to inner privilege */
#ifdef TARGET_X86_64
if (shift == 2) {
- sp = get_rsp_from_tss(env, dpl);
ss = dpl; /* SS = NULL selector with RPL = new CPL */
new_stack = 1;
- sp_mask = 0;
- ssp = 0; /* SS base is always zero in IA-32e mode */
+ sa.sp = get_rsp_from_tss(env, dpl);
+ sa.sp_mask = -1;
+ sa.ss_base = 0; /* SS base is always zero in IA-32e mode */
LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
- TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
+ TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
} else
#endif
{
@@ -1758,7 +1809,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
TARGET_FMT_lx "\n", ss, sp32, param_count,
env->regs[R_ESP]);
- sp = sp32;
if ((ss & 0xfffc) == 0) {
raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
@@ -1781,63 +1831,64 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
}
- sp_mask = get_sp_mask(ss_e2);
- ssp = get_seg_base(ss_e1, ss_e2);
+ sa.sp = sp32;
+ sa.sp_mask = get_sp_mask(ss_e2);
+ sa.ss_base = get_seg_base(ss_e1, ss_e2);
}
/* push_size = ((param_count * 2) + 8) << shift; */
-
old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
old_ssp = env->segs[R_SS].base;
+
#ifdef TARGET_X86_64
if (shift == 2) {
/* XXX: verify if new stack address is canonical */
- PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
- PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
+ pushq(&sa, env->segs[R_SS].selector);
+ pushq(&sa, env->regs[R_ESP]);
/* parameters aren't supported for 64-bit call gates */
} else
#endif
if (shift == 1) {
- PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
- PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+ pushl(&sa, env->segs[R_SS].selector);
+ pushl(&sa, env->regs[R_ESP]);
for (i = param_count - 1; i >= 0; i--) {
- val = cpu_ldl_kernel_ra(env, old_ssp +
- ((env->regs[R_ESP] + i * 4) &
- old_sp_mask), GETPC());
- PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
+ val = cpu_ldl_data_ra(env,
+ old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
+ GETPC());
+ pushl(&sa, val);
}
} else {
- PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
- PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+ pushw(&sa, env->segs[R_SS].selector);
+ pushw(&sa, env->regs[R_ESP]);
for (i = param_count - 1; i >= 0; i--) {
- val = cpu_lduw_kernel_ra(env, old_ssp +
- ((env->regs[R_ESP] + i * 2) &
- old_sp_mask), GETPC());
- PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
+ val = cpu_lduw_data_ra(env,
+ old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
+ GETPC());
+ pushw(&sa, val);
}
}
new_stack = 1;
} else {
/* to same privilege */
- sp = env->regs[R_ESP];
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
+ sa.sp = env->regs[R_ESP];
+ sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ sa.ss_base = env->segs[R_SS].base;
/* push_size = (4 << shift); */
new_stack = 0;
}
#ifdef TARGET_X86_64
if (shift == 2) {
- PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
- PUSHQ_RA(sp, next_eip, GETPC());
+ pushq(&sa, env->segs[R_CS].selector);
+ pushq(&sa, next_eip);
} else
#endif
if (shift == 1) {
- PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
- PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ pushl(&sa, env->segs[R_CS].selector);
+ pushl(&sa, next_eip);
} else {
- PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
- PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ pushw(&sa, env->segs[R_CS].selector);
+ pushw(&sa, next_eip);
}
/* from this point, not restartable */
@@ -1851,7 +1902,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
{
ss = (ss & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss,
- ssp,
+ sa.ss_base,
get_seg_limit(ss_e1, ss_e2),
ss_e2);
}
@@ -1862,7 +1913,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- SET_ESP(sp, sp_mask);
+ SET_ESP(sa.sp, sa.sp_mask);
env->eip = offset;
}
}
@@ -1870,26 +1921,29 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
/* real and vm86 mode iret */
void helper_iret_real(CPUX86State *env, int shift)
{
- uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
- target_ulong ssp;
+ uint32_t new_cs, new_eip, new_eflags;
int eflags_mask;
+ StackAccess sa;
+
+ sa.env = env;
+ sa.ra = GETPC();
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
+ sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
+ sa.sp = env->regs[R_ESP];
+ sa.ss_base = env->segs[R_SS].base;
- sp_mask = 0xffff; /* XXXX: use SS segment size? */
- sp = env->regs[R_ESP];
- ssp = env->segs[R_SS].base;
if (shift == 1) {
/* 32 bits */
- POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
- POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
- new_cs &= 0xffff;
- POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+ new_eip = popl(&sa);
+ new_cs = popl(&sa) & 0xffff;
+ new_eflags = popl(&sa);
} else {
/* 16 bits */
- POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
- POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
- POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+ new_eip = popw(&sa);
+ new_cs = popw(&sa);
+ new_eflags = popw(&sa);
}
- env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
+ SET_ESP(sa.sp, sa.sp_mask);
env->segs[R_CS].selector = new_cs;
env->segs[R_CS].base = (new_cs << 4);
env->eip = new_eip;
@@ -1942,47 +1996,52 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
uint32_t new_es, new_ds, new_fs, new_gs;
uint32_t e1, e2, ss_e1, ss_e2;
int cpl, dpl, rpl, eflags_mask, iopl;
- target_ulong ssp, sp, new_eip, new_esp, sp_mask;
+ target_ulong new_eip, new_esp;
+ StackAccess sa;
+
+ cpl = env->hflags & HF_CPL_MASK;
+
+ sa.env = env;
+ sa.ra = retaddr;
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
#ifdef TARGET_X86_64
if (shift == 2) {
- sp_mask = -1;
+ sa.sp_mask = -1;
} else
#endif
{
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
}
- sp = env->regs[R_ESP];
- ssp = env->segs[R_SS].base;
+ sa.sp = env->regs[R_ESP];
+ sa.ss_base = env->segs[R_SS].base;
new_eflags = 0; /* avoid warning */
#ifdef TARGET_X86_64
if (shift == 2) {
- POPQ_RA(sp, new_eip, retaddr);
- POPQ_RA(sp, new_cs, retaddr);
- new_cs &= 0xffff;
+ new_eip = popq(&sa);
+ new_cs = popq(&sa) & 0xffff;
if (is_iret) {
- POPQ_RA(sp, new_eflags, retaddr);
+ new_eflags = popq(&sa);
}
} else
#endif
{
if (shift == 1) {
/* 32 bits */
- POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
- new_cs &= 0xffff;
+ new_eip = popl(&sa);
+ new_cs = popl(&sa) & 0xffff;
if (is_iret) {
- POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+ new_eflags = popl(&sa);
if (new_eflags & VM_MASK) {
goto return_to_vm86;
}
}
} else {
/* 16 bits */
- POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
- POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
+ new_eip = popw(&sa);
+ new_cs = popw(&sa);
if (is_iret) {
- POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+ new_eflags = popw(&sa);
}
}
}
@@ -1999,7 +2058,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
!(e2 & DESC_CS_MASK)) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
}
- cpl = env->hflags & HF_CPL_MASK;
rpl = new_cs & 3;
if (rpl < cpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
@@ -2018,7 +2076,7 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
}
- sp += addend;
+ sa.sp += addend;
if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
((env->hflags & HF_CS64_MASK) && !is_iret))) {
/* return to same privilege level */
@@ -2030,21 +2088,19 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
/* return to different privilege level */
#ifdef TARGET_X86_64
if (shift == 2) {
- POPQ_RA(sp, new_esp, retaddr);
- POPQ_RA(sp, new_ss, retaddr);
- new_ss &= 0xffff;
+ new_esp = popq(&sa);
+ new_ss = popq(&sa) & 0xffff;
} else
#endif
{
if (shift == 1) {
/* 32 bits */
- POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
- new_ss &= 0xffff;
+ new_esp = popl(&sa);
+ new_ss = popl(&sa) & 0xffff;
} else {
/* 16 bits */
- POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
- POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ new_esp = popw(&sa);
+ new_ss = popw(&sa);
}
}
LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
@@ -2094,14 +2150,14 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- sp = new_esp;
+ sa.sp = new_esp;
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
- sp_mask = -1;
+ sa.sp_mask = -1;
} else
#endif
{
- sp_mask = get_sp_mask(ss_e2);
+ sa.sp_mask = get_sp_mask(ss_e2);
}
/* validate data segments */
@@ -2110,9 +2166,9 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
validate_seg(env, R_FS, rpl);
validate_seg(env, R_GS, rpl);
- sp += addend;
+ sa.sp += addend;
}
- SET_ESP(sp, sp_mask);
+ SET_ESP(sa.sp, sa.sp_mask);
env->eip = new_eip;
if (is_iret) {
/* NOTE: 'cpl' is the _old_ CPL */
@@ -2132,12 +2188,12 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
return;
return_to_vm86:
- POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
- POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
+ new_esp = popl(&sa);
+ new_ss = popl(&sa);
+ new_es = popl(&sa);
+ new_ds = popl(&sa);
+ new_fs = popl(&sa);
+ new_gs = popl(&sa);
/* modify processor state */
cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |