aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorBlue Swirl <blauwirbel@gmail.com>2012-04-29 19:47:06 +0000
committerBlue Swirl <blauwirbel@gmail.com>2012-08-14 19:01:26 +0000
commit2999a0b20074a7e4a58f56572bb1436749368f59 (patch)
treed3aa76ce12c4349cd53b11ac97df2930917f3460 /target-i386
parent4a7443be520f5737009ea47f93e4aa0328eecbca (diff)
downloadqemu-2999a0b20074a7e4a58f56572bb1436749368f59.zip
qemu-2999a0b20074a7e4a58f56572bb1436749368f59.tar.gz
qemu-2999a0b20074a7e4a58f56572bb1436749368f59.tar.bz2
x86: avoid AREG0 in segmentation helpers
Add an explicit CPUX86State parameter instead of relying on AREG0. Rename remains of op_helper.c to seg_helper.c. Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/Makefile.objs1
-rw-r--r--target-i386/helper.h38
-rw-r--r--target-i386/seg_helper.c217
-rw-r--r--target-i386/translate.c54
4 files changed, 150 insertions, 160 deletions
diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs
index 13a7f6a..71d7d3b 100644
--- a/target-i386/Makefile.objs
+++ b/target-i386/Makefile.objs
@@ -8,4 +8,3 @@ obj-$(CONFIG_LINUX_USER) += ioport-user.o
obj-$(CONFIG_BSD_USER) += ioport-user.o
$(obj)/mem_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
-$(obj)/seg_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
diff --git a/target-i386/helper.h b/target-i386/helper.h
index 9a9c064..0f02103 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -28,19 +28,19 @@ DEF_HELPER_1(aas, void, env)
DEF_HELPER_1(daa, void, env)
DEF_HELPER_1(das, void, env)
-DEF_HELPER_1(lsl, tl, tl)
-DEF_HELPER_1(lar, tl, tl)
-DEF_HELPER_1(verr, void, tl)
-DEF_HELPER_1(verw, void, tl)
-DEF_HELPER_1(lldt, void, int)
-DEF_HELPER_1(ltr, void, int)
-DEF_HELPER_2(load_seg, void, int, int)
-DEF_HELPER_3(ljmp_protected, void, int, tl, int)
-DEF_HELPER_4(lcall_real, void, int, tl, int, int)
-DEF_HELPER_4(lcall_protected, void, int, tl, int, int)
-DEF_HELPER_1(iret_real, void, int)
-DEF_HELPER_2(iret_protected, void, int, int)
-DEF_HELPER_2(lret_protected, void, int, int)
+DEF_HELPER_2(lsl, tl, env, tl)
+DEF_HELPER_2(lar, tl, env, tl)
+DEF_HELPER_2(verr, void, env, tl)
+DEF_HELPER_2(verw, void, env, tl)
+DEF_HELPER_2(lldt, void, env, int)
+DEF_HELPER_2(ltr, void, env, int)
+DEF_HELPER_3(load_seg, void, env, int, int)
+DEF_HELPER_4(ljmp_protected, void, env, int, tl, int)
+DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
+DEF_HELPER_5(lcall_protected, void, env, int, tl, int, int)
+DEF_HELPER_2(iret_real, void, env, int)
+DEF_HELPER_3(iret_protected, void, env, int, int)
+DEF_HELPER_3(lret_protected, void, env, int, int)
DEF_HELPER_2(read_crN, tl, env, int)
DEF_HELPER_3(write_crN, void, env, int, tl)
DEF_HELPER_2(lmsw, void, env, tl)
@@ -48,15 +48,15 @@ DEF_HELPER_1(clts, void, env)
DEF_HELPER_3(movl_drN_T0, void, env, int, tl)
DEF_HELPER_2(invlpg, void, env, tl)
-DEF_HELPER_3(enter_level, void, int, int, tl)
+DEF_HELPER_4(enter_level, void, env, int, int, tl)
#ifdef TARGET_X86_64
-DEF_HELPER_3(enter64_level, void, int, int, tl)
+DEF_HELPER_4(enter64_level, void, env, int, int, tl)
#endif
-DEF_HELPER_0(sysenter, void)
-DEF_HELPER_1(sysexit, void, int)
+DEF_HELPER_1(sysenter, void, env)
+DEF_HELPER_2(sysexit, void, env, int)
#ifdef TARGET_X86_64
-DEF_HELPER_1(syscall, void, int)
-DEF_HELPER_1(sysret, void, int)
+DEF_HELPER_2(syscall, void, env, int)
+DEF_HELPER_2(sysret, void, env, int)
#endif
DEF_HELPER_2(hlt, void, env, int)
DEF_HELPER_2(monitor, void, env, tl)
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index f5dcf01..f136128 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -19,7 +19,6 @@
*/
#include "cpu.h"
-#include "dyngen-exec.h"
#include "qemu-log.h"
#include "helper.h"
@@ -35,8 +34,8 @@
#endif
/* return non zero if error */
-static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
- int selector)
+static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector)
{
SegmentCache *dt;
int index;
@@ -82,14 +81,14 @@ static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
}
/* init the segment cache in vm86 mode. */
-static inline void load_seg_vm(int seg, int selector)
+static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
{
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg, selector,
(selector << 4), 0xffff, 0);
}
-static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
+static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
uint32_t *esp_ptr, int dpl)
{
int type, index, shift;
@@ -130,13 +129,13 @@ static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
}
/* XXX: merge with load_seg() */
-static void tss_load_seg(int seg_reg, int selector)
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
{
uint32_t e1, e2;
int rpl, dpl, cpl;
if ((selector & 0xfffc) != 0) {
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK)) {
@@ -195,7 +194,7 @@ static void tss_load_seg(int seg_reg, int selector)
#define SWITCH_TSS_CALL 2
/* XXX: restore CPU state in registers (PowerPC case) */
-static void switch_tss(int tss_selector,
+static void switch_tss(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip)
{
@@ -221,7 +220,7 @@ static void switch_tss(int tss_selector,
if (tss_selector & 4) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
- if (load_segment(&e1, &e2, tss_selector) != 0) {
+ if (load_segment(env, &e1, &e2, tss_selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
}
if (e2 & DESC_S_MASK) {
@@ -403,7 +402,7 @@ static void switch_tss(int tss_selector,
EDI = new_regs[7];
if (new_eflags & VM_MASK) {
for (i = 0; i < 6; i++) {
- load_seg_vm(i, new_segs[i]);
+ load_seg_vm(env, i, new_segs[i]);
}
/* in vm86, CPL is always 3 */
cpu_x86_set_cpl(env, 3);
@@ -446,12 +445,12 @@ static void switch_tss(int tss_selector,
/* load the segments */
if (!(new_eflags & VM_MASK)) {
- tss_load_seg(R_CS, new_segs[R_CS]);
- tss_load_seg(R_SS, new_segs[R_SS]);
- tss_load_seg(R_ES, new_segs[R_ES]);
- tss_load_seg(R_DS, new_segs[R_DS]);
- tss_load_seg(R_FS, new_segs[R_FS]);
- tss_load_seg(R_GS, new_segs[R_GS]);
+ tss_load_seg(env, R_CS, new_segs[R_CS]);
+ tss_load_seg(env, R_SS, new_segs[R_SS]);
+ tss_load_seg(env, R_ES, new_segs[R_ES]);
+ tss_load_seg(env, R_DS, new_segs[R_DS]);
+ tss_load_seg(env, R_FS, new_segs[R_FS]);
+ tss_load_seg(env, R_GS, new_segs[R_GS]);
}
/* check that EIP is in the CS segment limits */
@@ -545,8 +544,9 @@ static int exception_has_error_code(int intno)
}
/* protected mode interrupt */
-static void do_interrupt_protected(int intno, int is_int, int error_code,
- unsigned int next_eip, int is_hw)
+static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip,
+ int is_hw)
{
SegmentCache *dt;
target_ulong ptr, ssp;
@@ -580,7 +580,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
- switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
if (has_error_code) {
int type;
uint32_t mask;
@@ -627,7 +627,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
if ((selector & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
@@ -642,14 +642,14 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
}
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(&ss, &esp, dpl);
+ get_ss_esp_from_tss(env, &ss, &esp, dpl);
if ((ss & 0xfffc) == 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
if ((ss & 3) != dpl) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
- if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
+ if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
@@ -773,7 +773,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
sp += 8; \
}
-static inline target_ulong get_rsp_from_tss(int level)
+static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
{
int index;
@@ -793,8 +793,8 @@ static inline target_ulong get_rsp_from_tss(int level)
}
/* 64 bit interrupt */
-static void do_interrupt64(int intno, int is_int, int error_code,
- target_ulong next_eip, int is_hw)
+static void do_interrupt64(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
{
SegmentCache *dt;
target_ulong ptr;
@@ -848,7 +848,7 @@ static void do_interrupt64(int intno, int is_int, int error_code,
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
@@ -867,9 +867,9 @@ static void do_interrupt64(int intno, int is_int, int error_code,
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
/* to inner privilege */
if (ist != 0) {
- esp = get_rsp_from_tss(ist + 3);
+ esp = get_rsp_from_tss(env, ist + 3);
} else {
- esp = get_rsp_from_tss(dpl);
+ esp = get_rsp_from_tss(env, dpl);
}
esp &= ~0xfLL; /* align stack */
ss = 0;
@@ -881,7 +881,7 @@ static void do_interrupt64(int intno, int is_int, int error_code,
}
new_stack = 0;
if (ist != 0) {
- esp = get_rsp_from_tss(ist + 3);
+ esp = get_rsp_from_tss(env, ist + 3);
} else {
esp = ESP;
}
@@ -926,14 +926,14 @@ static void do_interrupt64(int intno, int is_int, int error_code,
#ifdef TARGET_X86_64
#if defined(CONFIG_USER_ONLY)
-void helper_syscall(int next_eip_addend)
+void helper_syscall(CPUX86State *env, int next_eip_addend)
{
env->exception_index = EXCP_SYSCALL;
env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(env);
}
#else
-void helper_syscall(int next_eip_addend)
+void helper_syscall(CPUX86State *env, int next_eip_addend)
{
int selector;
@@ -990,7 +990,7 @@ void helper_syscall(int next_eip_addend)
#endif
#ifdef TARGET_X86_64
-void helper_sysret(int dflag)
+void helper_sysret(CPUX86State *env, int dflag)
{
int cpl, selector;
@@ -1047,8 +1047,8 @@ void helper_sysret(int dflag)
#endif
/* real mode interrupt */
-static void do_interrupt_real(int intno, int is_int, int error_code,
- unsigned int next_eip)
+static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip)
{
SegmentCache *dt;
target_ulong ptr, ssp;
@@ -1087,8 +1087,8 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
#if defined(CONFIG_USER_ONLY)
/* fake user mode interrupt */
-static void do_interrupt_user(int intno, int is_int, int error_code,
- target_ulong next_eip)
+static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip)
{
SegmentCache *dt;
target_ulong ptr;
@@ -1121,8 +1121,8 @@ static void do_interrupt_user(int intno, int is_int, int error_code,
#else
-static void handle_even_inj(int intno, int is_int, int error_code,
- int is_hw, int rm)
+static void handle_even_inj(CPUX86State *env, int intno, int is_int,
+ int error_code, int is_hw, int rm)
{
uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
@@ -1153,8 +1153,8 @@ static void handle_even_inj(int intno, int is_int, int error_code,
* the int instruction. next_eip is the EIP value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
-static void do_interrupt_all(int intno, int is_int, int error_code,
- target_ulong next_eip, int is_hw)
+static void do_interrupt_all(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
{
if (qemu_loglevel_mask(CPU_LOG_INT)) {
if ((env->cr[0] & CR0_PE_MASK)) {
@@ -1193,24 +1193,25 @@ static void do_interrupt_all(int intno, int is_int, int error_code,
if (env->cr[0] & CR0_PE_MASK) {
#if !defined(CONFIG_USER_ONLY)
if (env->hflags & HF_SVMI_MASK) {
- handle_even_inj(intno, is_int, error_code, is_hw, 0);
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
}
#endif
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
- do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
+ do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
} else
#endif
{
- do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
+ do_interrupt_protected(env, intno, is_int, error_code, next_eip,
+ is_hw);
}
} else {
#if !defined(CONFIG_USER_ONLY)
if (env->hflags & HF_SVMI_MASK) {
- handle_even_inj(intno, is_int, error_code, is_hw, 1);
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
}
#endif
- do_interrupt_real(intno, is_int, error_code, next_eip);
+ do_interrupt_real(env, intno, is_int, error_code, next_eip);
}
#if !defined(CONFIG_USER_ONLY)
@@ -1225,17 +1226,13 @@ static void do_interrupt_all(int intno, int is_int, int error_code,
#endif
}
-void do_interrupt(CPUX86State *env1)
+void do_interrupt(CPUX86State *env)
{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
- do_interrupt_user(env->exception_index,
+ do_interrupt_user(env, env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
@@ -1245,27 +1242,22 @@ void do_interrupt(CPUX86State *env1)
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
- do_interrupt_all(env->exception_index,
+ do_interrupt_all(env, env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
/* successfully delivered */
env->old_exception = -1;
#endif
- env = saved_env;
}
-void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
- do_interrupt_all(intno, 0, 0, 0, is_hw);
- env = saved_env;
+ do_interrupt_all(env, intno, 0, 0, 0, is_hw);
}
-void helper_enter_level(int level, int data32, target_ulong t1)
+void helper_enter_level(CPUX86State *env, int level, int data32,
+ target_ulong t1)
{
target_ulong ssp;
uint32_t esp_mask, esp, ebp;
@@ -1300,7 +1292,8 @@ void helper_enter_level(int level, int data32, target_ulong t1)
}
#ifdef TARGET_X86_64
-void helper_enter64_level(int level, int data64, target_ulong t1)
+void helper_enter64_level(CPUX86State *env, int level, int data64,
+ target_ulong t1)
{
target_ulong esp, ebp;
@@ -1331,7 +1324,7 @@ void helper_enter64_level(int level, int data64, target_ulong t1)
}
#endif
-void helper_lldt(int selector)
+void helper_lldt(CPUX86State *env, int selector)
{
SegmentCache *dt;
uint32_t e1, e2;
@@ -1385,7 +1378,7 @@ void helper_lldt(int selector)
env->ldt.selector = selector;
}
-void helper_ltr(int selector)
+void helper_ltr(CPUX86State *env, int selector)
{
SegmentCache *dt;
uint32_t e1, e2;
@@ -1449,7 +1442,7 @@ void helper_ltr(int selector)
}
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
-void helper_load_seg(int seg_reg, int selector)
+void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
{
uint32_t e1, e2;
int cpl, dpl, rpl;
@@ -1537,7 +1530,7 @@ void helper_load_seg(int seg_reg, int selector)
}
/* protected mode jump */
-void helper_ljmp_protected(int new_cs, target_ulong new_eip,
+void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
int next_eip_addend)
{
int gate_cs, type;
@@ -1547,7 +1540,7 @@ void helper_ljmp_protected(int new_cs, target_ulong new_eip,
if ((new_cs & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, new_cs) != 0) {
+ if (load_segment(env, &e1, &e2, new_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
cpl = env->hflags & HF_CPL_MASK;
@@ -1596,7 +1589,7 @@ void helper_ljmp_protected(int new_cs, target_ulong new_eip,
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
next_eip = env->eip + next_eip_addend;
- switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
+ switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
CC_OP = CC_OP_EFLAGS;
break;
case 4: /* 286 call gate */
@@ -1612,7 +1605,7 @@ void helper_ljmp_protected(int new_cs, target_ulong new_eip,
if (type == 12) {
new_eip |= (e2 & 0xffff0000);
}
- if (load_segment(&e1, &e2, gate_cs) != 0) {
+ if (load_segment(env, &e1, &e2, gate_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
@@ -1644,7 +1637,7 @@ void helper_ljmp_protected(int new_cs, target_ulong new_eip,
}
/* real mode call */
-void helper_lcall_real(int new_cs, target_ulong new_eip1,
+void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
int shift, int next_eip)
{
int new_eip;
@@ -1670,7 +1663,7 @@ void helper_lcall_real(int new_cs, target_ulong new_eip1,
}
/* protected mode call */
-void helper_lcall_protected(int new_cs, target_ulong new_eip,
+void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
int shift, int next_eip_addend)
{
int new_stack, i;
@@ -1685,7 +1678,7 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
if ((new_cs & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, new_cs) != 0) {
+ if (load_segment(env, &e1, &e2, new_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
cpl = env->hflags & HF_CPL_MASK;
@@ -1765,7 +1758,7 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
- switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
+ switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
CC_OP = CC_OP_EFLAGS;
return;
case 4: /* 286 call gate */
@@ -1791,7 +1784,7 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
@@ -1807,7 +1800,7 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(&ss, &sp, dpl);
+ get_ss_esp_from_tss(env, &ss, &sp, dpl);
LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
ss, sp, param_count, ESP);
@@ -1817,7 +1810,7 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
if ((ss & 3) != dpl) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
- if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
+ if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
@@ -1897,7 +1890,7 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
}
/* real and vm86 mode iret */
-void helper_iret_real(int shift)
+void helper_iret_real(CPUX86State *env, int shift)
{
uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
target_ulong ssp;
@@ -1936,7 +1929,7 @@ void helper_iret_real(int shift)
env->hflags2 &= ~HF2_NMI_MASK;
}
-static inline void validate_seg(int seg_reg, int cpl)
+static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
{
int dpl;
uint32_t e2;
@@ -1960,7 +1953,8 @@ static inline void validate_seg(int seg_reg, int cpl)
}
/* protected mode iret */
-static inline void helper_ret_protected(int shift, int is_iret, int addend)
+static inline void helper_ret_protected(CPUX86State *env, int shift,
+ int is_iret, int addend)
{
uint32_t new_cs, new_eflags, new_ss;
uint32_t new_es, new_ds, new_fs, new_gs;
@@ -2016,7 +2010,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
if ((new_cs & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
- if (load_segment(&e1, &e2, new_cs) != 0) {
+ if (load_segment(env, &e1, &e2, new_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
if (!(e2 & DESC_S_MASK) ||
@@ -2093,7 +2087,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
if ((new_ss & 3) != rpl) {
raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
}
- if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
+ if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
}
if (!(ss_e2 & DESC_S_MASK) ||
@@ -2130,10 +2124,10 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
}
/* validate data segments */
- validate_seg(R_ES, rpl);
- validate_seg(R_DS, rpl);
- validate_seg(R_FS, rpl);
- validate_seg(R_GS, rpl);
+ validate_seg(env, R_ES, rpl);
+ validate_seg(env, R_DS, rpl);
+ validate_seg(env, R_FS, rpl);
+ validate_seg(env, R_GS, rpl);
sp += addend;
}
@@ -2168,19 +2162,19 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
VIP_MASK);
- load_seg_vm(R_CS, new_cs & 0xffff);
+ load_seg_vm(env, R_CS, new_cs & 0xffff);
cpu_x86_set_cpl(env, 3);
- load_seg_vm(R_SS, new_ss & 0xffff);
- load_seg_vm(R_ES, new_es & 0xffff);
- load_seg_vm(R_DS, new_ds & 0xffff);
- load_seg_vm(R_FS, new_fs & 0xffff);
- load_seg_vm(R_GS, new_gs & 0xffff);
+ load_seg_vm(env, R_SS, new_ss & 0xffff);
+ load_seg_vm(env, R_ES, new_es & 0xffff);
+ load_seg_vm(env, R_DS, new_ds & 0xffff);
+ load_seg_vm(env, R_FS, new_fs & 0xffff);
+ load_seg_vm(env, R_GS, new_gs & 0xffff);
env->eip = new_eip & 0xffff;
ESP = new_esp;
}
-void helper_iret_protected(int shift, int next_eip)
+void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
{
int tss_selector, type;
uint32_t e1, e2;
@@ -2196,7 +2190,7 @@ void helper_iret_protected(int shift, int next_eip)
if (tss_selector & 4) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
- if (load_segment(&e1, &e2, tss_selector) != 0) {
+ if (load_segment(env, &e1, &e2, tss_selector) != 0) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
@@ -2204,19 +2198,19 @@ void helper_iret_protected(int shift, int next_eip)
if (type != 3) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
- switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
+ switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
} else {
- helper_ret_protected(shift, 1, 0);
+ helper_ret_protected(env, shift, 1, 0);
}
env->hflags2 &= ~HF2_NMI_MASK;
}
-void helper_lret_protected(int shift, int addend)
+void helper_lret_protected(CPUX86State *env, int shift, int addend)
{
- helper_ret_protected(shift, 0, addend);
+ helper_ret_protected(env, shift, 0, addend);
}
-void helper_sysenter(void)
+void helper_sysenter(CPUX86State *env)
{
if (env->sysenter_cs == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
@@ -2250,7 +2244,7 @@ void helper_sysenter(void)
EIP = env->sysenter_eip;
}
-void helper_sysexit(int dflag)
+void helper_sysexit(CPUX86State *env, int dflag)
{
int cpl;
@@ -2290,7 +2284,7 @@ void helper_sysexit(int dflag)
EIP = EDX;
}
-target_ulong helper_lsl(target_ulong selector1)
+target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
{
unsigned int limit;
uint32_t e1, e2, eflags, selector;
@@ -2301,7 +2295,7 @@ target_ulong helper_lsl(target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
rpl = selector & 3;
@@ -2338,7 +2332,7 @@ target_ulong helper_lsl(target_ulong selector1)
return limit;
}
-target_ulong helper_lar(target_ulong selector1)
+target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
{
uint32_t e1, e2, eflags, selector;
int rpl, dpl, cpl, type;
@@ -2348,7 +2342,7 @@ target_ulong helper_lar(target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
rpl = selector & 3;
@@ -2387,7 +2381,7 @@ target_ulong helper_lar(target_ulong selector1)
return e2 & 0x00f0ff00;
}
-void helper_verr(target_ulong selector1)
+void helper_verr(CPUX86State *env, target_ulong selector1)
{
uint32_t e1, e2, eflags, selector;
int rpl, dpl, cpl;
@@ -2397,7 +2391,7 @@ void helper_verr(target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
if (!(e2 & DESC_S_MASK)) {
@@ -2425,7 +2419,7 @@ void helper_verr(target_ulong selector1)
CC_SRC = eflags | CC_Z;
}
-void helper_verw(target_ulong selector1)
+void helper_verw(CPUX86State *env, target_ulong selector1)
{
uint32_t e1, e2, eflags, selector;
int rpl, dpl, cpl;
@@ -2435,7 +2429,7 @@ void helper_verw(target_ulong selector1)
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
if (!(e2 & DESC_S_MASK)) {
@@ -2460,19 +2454,14 @@ void helper_verw(target_ulong selector1)
}
#if defined(CONFIG_USER_ONLY)
-void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
+void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = s;
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
(selector << 4), 0xffff, 0);
} else {
- helper_load_seg(seg_reg, selector);
+ helper_load_seg(env, seg_reg, selector);
}
- env = saved_env;
}
#endif
diff --git a/target-i386/translate.c b/target-i386/translate.c
index a4c2ae0..26091f9 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -2443,7 +2443,7 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(cur_eip);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
+ gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
@@ -2680,7 +2680,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
gen_op_st_T0_A0(ot + s->mem_index);
if (level) {
/* XXX: must save state */
- gen_helper_enter64_level(tcg_const_i32(level),
+ gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
tcg_const_i32((ot == OT_QUAD)),
cpu_T[1]);
}
@@ -2705,7 +2705,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
gen_op_st_T0_A0(ot + s->mem_index);
if (level) {
/* XXX: must save state */
- gen_helper_enter_level(tcg_const_i32(level),
+ gen_helper_enter_level(cpu_env, tcg_const_i32(level),
tcg_const_i32(s->dflag),
cpu_T[1]);
}
@@ -4759,13 +4759,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - pc_start));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - s->cs_base));
}
gen_eob(s);
@@ -4786,7 +4786,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
+ gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
tcg_const_i32(s->pc - pc_start));
} else {
gen_op_movl_seg_T0_vm(R_CS);
@@ -6320,7 +6320,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_lret_protected(tcg_const_i32(s->dflag),
+ gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
tcg_const_i32(val));
} else {
gen_stack_A0(s);
@@ -6347,20 +6347,20 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
if (!s->pe) {
/* real mode */
- gen_helper_iret_real(tcg_const_i32(s->dflag));
+ gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
s->cc_op = CC_OP_EFLAGS;
} else if (s->vm86) {
if (s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_helper_iret_real(tcg_const_i32(s->dflag));
+ gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
s->cc_op = CC_OP_EFLAGS;
}
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_iret_protected(tcg_const_i32(s->dflag),
+ gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
tcg_const_i32(s->pc - s->cs_base));
s->cc_op = CC_OP_EFLAGS;
}
@@ -7028,7 +7028,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_sysenter();
+ gen_helper_sysenter(cpu_env);
gen_eob(s);
}
break;
@@ -7041,7 +7041,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_sysexit(tcg_const_i32(dflag));
+ gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
gen_eob(s);
}
break;
@@ -7050,7 +7050,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* XXX: is it usable in real mode ? */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
+ gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
case 0x107: /* sysret */
@@ -7059,7 +7059,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_sysret(tcg_const_i32(s->dflag));
+ gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
/* condition codes are modified only in long mode */
if (s->lma)
s->cc_op = CC_OP_EFLAGS;
@@ -7109,7 +7109,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lldt(cpu_tmp2_i32);
+ gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
break;
case 1: /* str */
@@ -7132,7 +7132,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_ltr(cpu_tmp2_i32);
+ gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
break;
case 4: /* verr */
@@ -7142,10 +7142,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- if (op == 4)
- gen_helper_verr(cpu_T[0]);
- else
- gen_helper_verw(cpu_T[0]);
+ if (op == 4) {
+ gen_helper_verr(cpu_env, cpu_T[0]);
+ } else {
+ gen_helper_verw(cpu_env, cpu_T[0]);
+ }
s->cc_op = CC_OP_EFLAGS;
break;
default:
@@ -7506,10 +7507,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
t0 = tcg_temp_local_new();
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- if (b == 0x102)
- gen_helper_lar(t0, cpu_T[0]);
- else
- gen_helper_lsl(t0, cpu_T[0]);
+ if (b == 0x102) {
+ gen_helper_lar(t0, cpu_env, cpu_T[0]);
+ } else {
+ gen_helper_lsl(t0, cpu_env, cpu_T[0]);
+ }
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);