aboutsummaryrefslogtreecommitdiff
path: root/target/i386/tcg/seg_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/tcg/seg_helper.c')
-rw-r--r--target/i386/tcg/seg_helper.c125
1 files changed, 79 insertions, 46 deletions
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index aac092a..071f3fb 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -22,12 +22,13 @@
#include "cpu.h"
#include "qemu/log.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/log.h"
#include "helper-tcg.h"
#include "seg_helper.h"
#include "access.h"
+#include "tcg-cpu.h"
#ifdef TARGET_X86_64
#define SET_ESP(val, sp_mask) \
@@ -94,7 +95,7 @@ static uint32_t popl(StackAccess *sa)
int get_pg_mode(CPUX86State *env)
{
- int pg_mode = 0;
+ int pg_mode = PG_MODE_PG;
if (!(env->cr[0] & CR0_PG_MASK)) {
return 0;
}
@@ -128,6 +129,22 @@ int get_pg_mode(CPUX86State *env)
return pg_mode;
}
+static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
+{
+ int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
+ int mmu_index_base =
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ (pl < 3 && (env->eflags & AC_MASK)
+ ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
+
+ return mmu_index_base + mmu_index_32;
+}
+
+int cpu_mmu_index_kernel(CPUX86State *env)
+{
+ return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
+}
+
/* return non zero if error */
static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
uint32_t *e2_ptr, int selector,
@@ -309,10 +326,10 @@ static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
-/* return 0 if switching to a 16-bit selector */
-static int switch_tss_ra(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip, uintptr_t retaddr)
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, bool has_error_code,
+ uint32_t error_code, uintptr_t retaddr)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
target_ulong tss_base;
@@ -378,7 +395,7 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
/* X86Access avoids memory exceptions during the task switch */
mmu_index = cpu_mmu_index_kernel(env);
- access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max,
+ access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
MMU_DATA_STORE, mmu_index, retaddr);
if (source == SWITCH_TSS_CALL) {
@@ -386,7 +403,8 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
probe_access(env, tss_base, 2, MMU_DATA_STORE,
mmu_index, retaddr);
}
- access_prepare_mmu(&new, env, tss_base, tss_limit,
+ /* While true tss_limit may be larger, we don't access the iopb here. */
+ access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
MMU_DATA_LOAD, mmu_index, retaddr);
/* save the current state in the old TSS */
@@ -455,10 +473,6 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
new_segs[R_GS] = 0;
new_trap = 0;
}
- /* XXX: avoid a compiler warning, see
- http://support.amd.com/us/Processor_TechDocs/24593.pdf
- chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
- (void)new_trap;
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
@@ -581,14 +595,43 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
}
#endif
- return type >> 3;
+
+ if (has_error_code) {
+ int cpl = env->hflags & HF_CPL_MASK;
+ StackAccess sa;
+
+ /* push the error code */
+ sa.env = env;
+ sa.ra = retaddr;
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
+ sa.sp = env->regs[R_ESP];
+ if (env->segs[R_SS].flags & DESC_B_MASK) {
+ sa.sp_mask = 0xffffffff;
+ } else {
+ sa.sp_mask = 0xffff;
+ }
+ sa.ss_base = env->segs[R_SS].base;
+ if (type & 8) {
+ pushl(&sa, error_code);
+ } else {
+ pushw(&sa, error_code);
+ }
+ SET_ESP(sa.sp, sa.sp_mask);
+ }
+
+ if (new_trap) {
+ env->dr[6] |= DR6_BT;
+ raise_exception_ra(env, EXCP01_DB, retaddr);
+ }
}
-static int switch_tss(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip)
+static void switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, bool has_error_code,
+ int error_code)
{
- return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+ switch_tss_ra(env, tss_selector, e1, e2, source, next_eip,
+ has_error_code, error_code, 0);
}
static inline unsigned int get_sp_mask(unsigned int e2)
@@ -694,7 +737,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
sa.env = env;
sa.ra = 0;
- sa.mmu_index = cpu_mmu_index_kernel(env);
if (type == 5) {
/* task gate */
@@ -702,23 +744,8 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
- shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
- if (has_error_code) {
- /* push the error code */
- if (env->segs[R_SS].flags & DESC_B_MASK) {
- sa.sp_mask = 0xffffffff;
- } else {
- sa.sp_mask = 0xffff;
- }
- sa.sp = env->regs[R_ESP];
- sa.ss_base = env->segs[R_SS].base;
- if (shift) {
- pushl(&sa, error_code);
- } else {
- pushw(&sa, error_code);
- }
- SET_ESP(sa.sp, sa.sp_mask);
- }
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip,
+ has_error_code, error_code);
return;
}
@@ -749,6 +776,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (e2 & DESC_C_MASK) {
dpl = cpl;
}
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
if (dpl < cpl) {
/* to inner privilege */
uint32_t esp;
@@ -926,7 +954,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
target_ulong ptr;
int type, dpl, selector, cpl, ist;
int has_error_code, new_stack;
- uint32_t e1, e2, e3, ss, eflags;
+ uint32_t e1, e2, e3, eflags;
target_ulong old_eip, offset;
bool set_rf;
StackAccess sa;
@@ -1000,14 +1028,13 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
sa.env = env;
sa.ra = 0;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
sa.sp_mask = -1;
sa.ss_base = 0;
if (dpl < cpl || ist != 0) {
/* to inner privilege */
new_stack = 1;
sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
- ss = 0;
} else {
/* to same privilege */
if (env->eflags & VM_MASK) {
@@ -1040,7 +1067,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
if (new_stack) {
- ss = 0 | dpl;
+ uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
}
env->regs[R_ESP] = sa.sp;
@@ -1135,7 +1162,7 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
sa.sp = env->regs[R_ESP];
sa.sp_mask = 0xffff;
sa.ss_base = env->segs[R_SS].base;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
if (is_int) {
old_eip = next_eip;
@@ -1514,7 +1541,8 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip,
+ false, 0, GETPC());
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1599,7 +1627,7 @@ void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
sa.sp = env->regs[R_ESP];
sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
sa.ss_base = env->segs[R_SS].base;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
if (shift) {
pushl(&sa, env->segs[R_CS].selector);
@@ -1639,9 +1667,9 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
sa.env = env;
sa.ra = GETPC();
- sa.mmu_index = cpu_mmu_index_kernel(env);
if (e2 & DESC_S_MASK) {
+ /* "normal" far call, no stack switch possible */
if (!(e2 & DESC_CS_MASK)) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
@@ -1665,6 +1693,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
#ifdef TARGET_X86_64
/* XXX: check 16/32 bit cases in long mode */
if (shift == 2) {
@@ -1725,7 +1754,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip,
+ false, 0, GETPC());
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1792,6 +1822,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
#ifdef TARGET_X86_64
if (shift == 2) {
ss = dpl; /* SS = NULL selector with RPL = new CPL */
@@ -1870,6 +1901,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
new_stack = 1;
} else {
/* to same privilege */
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
sa.sp = env->regs[R_ESP];
sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
sa.ss_base = env->segs[R_SS].base;
@@ -2234,7 +2266,8 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
if (type != 3) {
raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
- switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
+ switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip,
+ false, 0, GETPC());
} else {
helper_ret_protected(env, shift, 1, 0, GETPC());
}