diff options
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/fpu_helper.c | 175 | ||||
-rw-r--r-- | target-i386/helper.h | 4 | ||||
-rw-r--r-- | target-i386/translate.c | 4 |
3 files changed, 101 insertions, 82 deletions
diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c index 2d54b47..a7da370 100644 --- a/target-i386/fpu_helper.c +++ b/target-i386/fpu_helper.c @@ -1115,89 +1115,89 @@ void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) } #endif -static void do_fxsave(CPUX86State *env, target_ulong ptr, int data64, - uintptr_t retaddr) +static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) { - int fpus, fptag, i, nb_xmm_regs; - floatx80 tmp; + int fpus, fptag, i; target_ulong addr; - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception_ra(env, EXCP0D_GPF, retaddr); - } - fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for (i = 0; i < 8; i++) { fptag |= (env->fptags[i] << i); } - cpu_stw_data_ra(env, ptr, env->fpuc, retaddr); - cpu_stw_data_ra(env, ptr + 2, fpus, retaddr); - cpu_stw_data_ra(env, ptr + 4, fptag ^ 0xff, retaddr); -#ifdef TARGET_X86_64 - if (data64) { - cpu_stq_data_ra(env, ptr + 0x08, 0, retaddr); /* rip */ - cpu_stq_data_ra(env, ptr + 0x10, 0, retaddr); /* rdp */ - } else -#endif - { - cpu_stl_data_ra(env, ptr + 0x08, 0, retaddr); /* eip */ - cpu_stl_data_ra(env, ptr + 0x0c, 0, retaddr); /* sel */ - cpu_stl_data_ra(env, ptr + 0x10, 0, retaddr); /* dp */ - cpu_stl_data_ra(env, ptr + 0x14, 0, retaddr); /* sel */ - } + cpu_stw_data_ra(env, ptr, env->fpuc, ra); + cpu_stw_data_ra(env, ptr + 2, fpus, ra); + cpu_stw_data_ra(env, ptr + 4, fptag ^ 0xff, ra); + + /* In 32-bit mode this is eip, sel, dp, sel. + In 64-bit mode this is rip, rdp. + But in either case we don't write actual data, just zeros. */ + cpu_stq_data_ra(env, ptr + 0x08, 0, ra); /* eip+sel; rip */ + cpu_stq_data_ra(env, ptr + 0x10, 0, ra); /* edp+sel; rdp */ addr = ptr + 0x20; for (i = 0; i < 8; i++) { - tmp = ST(i); - helper_fstt(env, tmp, addr, retaddr); + floatx80 tmp = ST(i); + helper_fstt(env, tmp, addr, ra); addr += 16; } +} + +static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + cpu_stl_data_ra(env, ptr + 0x18, env->mxcsr, ra); /* mxcsr */ + cpu_stl_data_ra(env, ptr + 0x1c, 0x0000ffff, ra); /* mxcsr_mask */ +} + +static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, nb_xmm_regs; + target_ulong addr; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + addr = ptr + 0xa0; + for (i = 0; i < nb_xmm_regs; i++) { + cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra); + cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra); + addr += 16; + } +} + +void helper_fxsave(CPUX86State *env, target_ulong ptr) +{ + uintptr_t ra = GETPC(); + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + do_xsave_fpu(env, ptr, ra); if (env->cr[4] & CR4_OSFXSR_MASK) { - /* XXX: finish it */ - cpu_stl_data_ra(env, ptr + 0x18, env->mxcsr, retaddr); /* mxcsr */ - cpu_stl_data_ra(env, ptr + 0x1c, 0x0000ffff, retaddr); /* mxcsr_mask */ - if (env->hflags & HF_CS64_MASK) { - nb_xmm_regs = 16; - } else { - nb_xmm_regs = 8; - } - addr = ptr + 0xa0; + do_xsave_mxcsr(env, ptr, ra); /* Fast FXSAVE leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { - for (i = 0; i < nb_xmm_regs; i++) { - cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), retaddr); - cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), retaddr); - addr += 16; - } + do_xsave_sse(env, ptr, ra); } } } -void helper_fxsave(CPUX86State *env, target_ulong ptr, int data64) -{ - do_fxsave(env, ptr, data64, GETPC()); -} - -static void do_fxrstor(CPUX86State *env, target_ulong ptr, int data64, - uintptr_t retaddr) +static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) { - int i, fpus, fptag, nb_xmm_regs; - floatx80 tmp; + int i, fpus, fptag; target_ulong addr; - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception_ra(env, EXCP0D_GPF, retaddr); - } - - cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr)); - fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr); - fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr); + cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, ra)); + fpus = cpu_lduw_data_ra(env, ptr + 2, ra); + fptag = cpu_lduw_data_ra(env, ptr + 4, ra); env->fpstt = (fpus >> 11) & 7; env->fpus = fpus & ~0x3800; fptag ^= 0xff; @@ -1207,39 +1207,58 @@ static void do_fxrstor(CPUX86State *env, target_ulong ptr, int data64, addr = ptr + 0x20; for (i = 0; i < 8; i++) { - tmp = helper_fldt(env, addr, retaddr); + floatx80 tmp = helper_fldt(env, addr, ra); ST(i) = tmp; addr += 16; } +} + +static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + 0x18, ra)); +} + +static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, nb_xmm_regs; + target_ulong addr; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + addr = ptr + 0xa0; + for (i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra); + env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra); + addr += 16; + } +} + +void helper_fxrstor(CPUX86State *env, target_ulong ptr) +{ + uintptr_t ra = GETPC(); + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + do_xrstor_fpu(env, ptr, ra); if (env->cr[4] & CR4_OSFXSR_MASK) { - /* XXX: finish it */ - cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + 0x18, retaddr)); - /* cpu_ldl_data_ra(env, ptr + 0x1c, retaddr); */ - if (env->hflags & HF_CS64_MASK) { - nb_xmm_regs = 16; - } else { - nb_xmm_regs = 8; - } - addr = ptr + 0xa0; - /* Fast FXRESTORE leaves out the XMM registers */ + do_xrstor_mxcsr(env, ptr, ra); + /* Fast FXRSTOR leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { - for (i = 0; i < nb_xmm_regs; i++) { - env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, retaddr); - env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, retaddr); - addr += 16; - } + do_xrstor_sse(env, ptr, ra); } } } -void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64) -{ - do_fxrstor(env, ptr, data64, GETPC()); -} - void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) { CPU_LDoubleU temp; diff --git a/target-i386/helper.h b/target-i386/helper.h index 3a25c3b..6109e46 100644 --- a/target-i386/helper.h +++ b/target-i386/helper.h @@ -185,8 +185,8 @@ DEF_HELPER_3(fstenv, void, env, tl, int) DEF_HELPER_3(fldenv, void, env, tl, int) DEF_HELPER_3(fsave, void, env, tl, int) DEF_HELPER_3(frstor, void, env, tl, int) -DEF_HELPER_3(fxsave, void, env, tl, int) -DEF_HELPER_3(fxrstor, void, env, tl, int) +DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl) diff --git a/target-i386/translate.c b/target-i386/translate.c index c8e2799..fec2601 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -7515,7 +7515,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s, break; } gen_lea_modrm(env, s, modrm); - gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64)); + gen_helper_fxsave(cpu_env, cpu_A0); break; case 1: /* fxrstor */ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || @@ -7526,7 +7526,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s, break; } gen_lea_modrm(env, s, modrm); - gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64)); + gen_helper_fxrstor(cpu_env, cpu_A0); break; case 2: /* ldmxcsr */ case 3: /* stmxcsr */ |