aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-06-30 14:59:01 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-06-30 14:59:01 +0100
commit6db174aed1f70215b681aaf3a6a9e23e2c7ba86d (patch)
tree93a88c25b31eedbecc0d128393da70773751a5b8 /target
parent0912d0f2c719e029c5101eacaacbaf2c755be099 (diff)
parenta1e58ddcb3eed7ec4a158512b9dae46f90492c1b (diff)
downloadqemu-6db174aed1f70215b681aaf3a6a9e23e2c7ba86d.zip
qemu-6db174aed1f70215b681aaf3a6a9e23e2c7ba86d.tar.gz
qemu-6db174aed1f70215b681aaf3a6a9e23e2c7ba86d.tar.bz2
Merge remote-tracking branch 'remotes/vivier/tags/m68k-for-2.10-pull-request' into staging
# gpg: Signature made Fri 30 Jun 2017 13:30:44 BST # gpg: using RSA key 0xF30C38BD3F2FBE3C # gpg: Good signature from "Laurent Vivier <lvivier@redhat.com>" # gpg: aka "Laurent Vivier <laurent@vivier.eu>" # gpg: aka "Laurent Vivier (Red Hat) <lvivier@redhat.com>" # Primary key fingerprint: CD2F 75DD C8E3 A4DC 2E4F 5173 F30C 38BD 3F2F BE3C * remotes/vivier/tags/m68k-for-2.10-pull-request: target/m68k: add fmovem target/m68k: add explicit single and double precision operations (part 2) target/m68k: add fsglmul and fsgldiv softfloat: define floatx80_round() target/m68k: add explicit single and double precision operations target/m68k: add fmovecr target/m68k: add fscc. Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/m68k/fpu_helper.c310
-rw-r--r--target/m68k/helper.h27
-rw-r--r--target/m68k/translate.c388
3 files changed, 602 insertions, 123 deletions
diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c
index a9e17f5..bdfc537 100644
--- a/target/m68k/fpu_helper.c
+++ b/target/m68k/fpu_helper.c
@@ -22,6 +22,36 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+/* Undefined offsets may be different on various FPU.
+ * On 68040 they return 0.0 (floatx80_zero)
+ */
+
+static const floatx80 fpu_rom[128] = {
+ [0x00] = floatx80_pi, /* Pi */
+ [0x0b] = make_floatx80(0x3ffd, 0x9a209a84fbcff798ULL), /* Log10(2) */
+ [0x0c] = make_floatx80(0x4000, 0xadf85458a2bb4a9aULL), /* e */
+ [0x0d] = make_floatx80(0x3fff, 0xb8aa3b295c17f0bcULL), /* Log2(e) */
+ [0x0e] = make_floatx80(0x3ffd, 0xde5bd8a937287195ULL), /* Log10(e) */
+ [0x0f] = floatx80_zero, /* Zero */
+ [0x30] = floatx80_ln2, /* ln(2) */
+ [0x31] = make_floatx80(0x4000, 0x935d8dddaaa8ac17ULL), /* ln(10) */
+ [0x32] = floatx80_one, /* 10^0 */
+ [0x33] = make_floatx80(0x4002, 0xa000000000000000ULL), /* 10^1 */
+ [0x34] = make_floatx80(0x4005, 0xc800000000000000ULL), /* 10^2 */
+ [0x35] = make_floatx80(0x400c, 0x9c40000000000000ULL), /* 10^4 */
+ [0x36] = make_floatx80(0x4019, 0xbebc200000000000ULL), /* 10^8 */
+ [0x37] = make_floatx80(0x4034, 0x8e1bc9bf04000000ULL), /* 10^16 */
+ [0x38] = make_floatx80(0x4069, 0x9dc5ada82b70b59eULL), /* 10^32 */
+ [0x39] = make_floatx80(0x40d3, 0xc2781f49ffcfa6d5ULL), /* 10^64 */
+ [0x3a] = make_floatx80(0x41a8, 0x93ba47c980e98ce0ULL), /* 10^128 */
+ [0x3b] = make_floatx80(0x4351, 0xaa7eebfb9df9de8eULL), /* 10^256 */
+ [0x3c] = make_floatx80(0x46a3, 0xe319a0aea60e91c7ULL), /* 10^512 */
+ [0x3d] = make_floatx80(0x4d48, 0xc976758681750c17ULL), /* 10^1024 */
+ [0x3e] = make_floatx80(0x5a92, 0x9e8b3b5dc53d5de5ULL), /* 10^2048 */
+ [0x3f] = make_floatx80(0x7525, 0xc46052028a20979bULL), /* 10^4096 */
+};
int32_t HELPER(reds32)(CPUM68KState *env, FPReg *val)
{
@@ -128,19 +158,85 @@ void HELPER(set_fpcr)(CPUM68KState *env, uint32_t val)
cpu_m68k_set_fpcr(env, val);
}
+#define PREC_BEGIN(prec) \
+ do { \
+ int old; \
+ old = get_floatx80_rounding_precision(&env->fp_status); \
+ set_floatx80_rounding_precision(prec, &env->fp_status) \
+
+#define PREC_END() \
+ set_floatx80_rounding_precision(old, &env->fp_status); \
+ } while (0)
+
+void HELPER(fsround)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_round(val->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdround)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_round(val->d, &env->fp_status);
+ PREC_END();
+}
+
void HELPER(fsqrt)(CPUM68KState *env, FPReg *res, FPReg *val)
{
res->d = floatx80_sqrt(val->d, &env->fp_status);
}
+void HELPER(fssqrt)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_sqrt(val->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdsqrt)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_sqrt(val->d, &env->fp_status);
+ PREC_END();
+}
+
void HELPER(fabs)(CPUM68KState *env, FPReg *res, FPReg *val)
{
- res->d = floatx80_abs(val->d);
+ res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status);
+}
+
+void HELPER(fsabs)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdabs)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status);
+ PREC_END();
}
-void HELPER(fchs)(CPUM68KState *env, FPReg *res, FPReg *val)
+void HELPER(fneg)(CPUM68KState *env, FPReg *res, FPReg *val)
{
- res->d = floatx80_chs(val->d);
+ res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status);
+}
+
+void HELPER(fsneg)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdneg)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status);
+ PREC_END();
}
void HELPER(fadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
@@ -148,21 +244,105 @@ void HELPER(fadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
res->d = floatx80_add(val0->d, val1->d, &env->fp_status);
}
+void HELPER(fsadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_add(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_add(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
void HELPER(fsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
{
res->d = floatx80_sub(val1->d, val0->d, &env->fp_status);
}
+void HELPER(fssub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_sub(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_sub(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
void HELPER(fmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
{
res->d = floatx80_mul(val0->d, val1->d, &env->fp_status);
}
+void HELPER(fsmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_mul(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_mul(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fsglmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ int rounding_mode = get_float_rounding_mode(&env->fp_status);
+ floatx80 a, b;
+
+ PREC_BEGIN(32);
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ a = floatx80_round(val0->d, &env->fp_status);
+ b = floatx80_round(val1->d, &env->fp_status);
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ res->d = floatx80_mul(a, b, &env->fp_status);
+ PREC_END();
+}
+
void HELPER(fdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
{
res->d = floatx80_div(val1->d, val0->d, &env->fp_status);
}
+void HELPER(fsdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(32);
+ res->d = floatx80_div(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fddiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(64);
+ res->d = floatx80_div(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fsgldiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ int rounding_mode = get_float_rounding_mode(&env->fp_status);
+ floatx80 a, b;
+
+ PREC_BEGIN(32);
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ a = floatx80_round(val1->d, &env->fp_status);
+ b = floatx80_round(val0->d, &env->fp_status);
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ res->d = floatx80_div(a, b, &env->fp_status);
+ PREC_END();
+}
+
static int float_comp_to_cc(int float_compare)
{
switch (float_compare) {
@@ -204,3 +384,127 @@ void HELPER(ftst)(CPUM68KState *env, FPReg *val)
}
env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | cc;
}
+
+void HELPER(fconst)(CPUM68KState *env, FPReg *val, uint32_t offset)
+{
+ val->d = fpu_rom[offset];
+}
+
+typedef int (*float_access)(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra);
+
+static uint32_t fmovem_predec(CPUM68KState *env, uint32_t addr, uint32_t mask,
+ float_access access)
+{
+ uintptr_t ra = GETPC();
+ int i, size;
+
+ for (i = 7; i >= 0; i--, mask <<= 1) {
+ if (mask & 0x80) {
+ size = access(env, addr, &env->fregs[i], ra);
+ if ((mask & 0xff) != 0x80) {
+ addr -= size;
+ }
+ }
+ }
+
+ return addr;
+}
+
+static uint32_t fmovem_postinc(CPUM68KState *env, uint32_t addr, uint32_t mask,
+ float_access access)
+{
+ uintptr_t ra = GETPC();
+ int i, size;
+
+ for (i = 0; i < 8; i++, mask <<= 1) {
+ if (mask & 0x80) {
+ size = access(env, addr, &env->fregs[i], ra);
+ addr += size;
+ }
+ }
+
+ return addr;
+}
+
+static int cpu_ld_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ uint32_t high;
+ uint64_t low;
+
+ high = cpu_ldl_data_ra(env, addr, ra);
+ low = cpu_ldq_data_ra(env, addr + 4, ra);
+
+ fp->l.upper = high >> 16;
+ fp->l.lower = low;
+
+ return 12;
+}
+
+static int cpu_st_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ cpu_stl_data_ra(env, addr, fp->l.upper << 16, ra);
+ cpu_stq_data_ra(env, addr + 4, fp->l.lower, ra);
+
+ return 12;
+}
+
+static int cpu_ld_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ uint64_t val;
+
+ val = cpu_ldq_data_ra(env, addr, ra);
+ fp->d = float64_to_floatx80(*(float64 *)&val, &env->fp_status);
+
+ return 8;
+}
+
+static int cpu_st_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ float64 val;
+
+ val = floatx80_to_float64(fp->d, &env->fp_status);
+ cpu_stq_data_ra(env, addr, *(uint64_t *)&val, ra);
+
+ return 8;
+}
+
+uint32_t HELPER(fmovemx_st_predec)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_predec(env, addr, mask, cpu_st_floatx80_ra);
+}
+
+uint32_t HELPER(fmovemx_st_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_st_floatx80_ra);
+}
+
+uint32_t HELPER(fmovemx_ld_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_ld_floatx80_ra);
+}
+
+uint32_t HELPER(fmovemd_st_predec)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_predec(env, addr, mask, cpu_st_float64_ra);
+}
+
+uint32_t HELPER(fmovemd_st_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_st_float64_ra);
+}
+
+uint32_t HELPER(fmovemd_ld_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_ld_float64_ra);
+}
diff --git a/target/m68k/helper.h b/target/m68k/helper.h
index 98cbf18..475a1f2 100644
--- a/target/m68k/helper.h
+++ b/target/m68k/helper.h
@@ -23,18 +23,43 @@ DEF_HELPER_2(redf32, f32, env, fp)
DEF_HELPER_2(redf64, f64, env, fp)
DEF_HELPER_2(reds32, s32, env, fp)
+DEF_HELPER_3(fsround, void, env, fp, fp)
+DEF_HELPER_3(fdround, void, env, fp, fp)
DEF_HELPER_3(firound, void, env, fp, fp)
DEF_HELPER_3(fitrunc, void, env, fp, fp)
DEF_HELPER_3(fsqrt, void, env, fp, fp)
+DEF_HELPER_3(fssqrt, void, env, fp, fp)
+DEF_HELPER_3(fdsqrt, void, env, fp, fp)
DEF_HELPER_3(fabs, void, env, fp, fp)
-DEF_HELPER_3(fchs, void, env, fp, fp)
+DEF_HELPER_3(fsabs, void, env, fp, fp)
+DEF_HELPER_3(fdabs, void, env, fp, fp)
+DEF_HELPER_3(fneg, void, env, fp, fp)
+DEF_HELPER_3(fsneg, void, env, fp, fp)
+DEF_HELPER_3(fdneg, void, env, fp, fp)
DEF_HELPER_4(fadd, void, env, fp, fp, fp)
+DEF_HELPER_4(fsadd, void, env, fp, fp, fp)
+DEF_HELPER_4(fdadd, void, env, fp, fp, fp)
DEF_HELPER_4(fsub, void, env, fp, fp, fp)
+DEF_HELPER_4(fssub, void, env, fp, fp, fp)
+DEF_HELPER_4(fdsub, void, env, fp, fp, fp)
DEF_HELPER_4(fmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fsmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fdmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fsglmul, void, env, fp, fp, fp)
DEF_HELPER_4(fdiv, void, env, fp, fp, fp)
+DEF_HELPER_4(fsdiv, void, env, fp, fp, fp)
+DEF_HELPER_4(fddiv, void, env, fp, fp, fp)
+DEF_HELPER_4(fsgldiv, void, env, fp, fp, fp)
DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_RWG, void, env, fp, fp)
DEF_HELPER_FLAGS_2(set_fpcr, TCG_CALL_NO_RWG, void, env, i32)
DEF_HELPER_FLAGS_2(ftst, TCG_CALL_NO_RWG, void, env, fp)
+DEF_HELPER_3(fconst, void, env, fp, i32)
+DEF_HELPER_3(fmovemx_st_predec, i32, env, i32, i32)
+DEF_HELPER_3(fmovemx_st_postinc, i32, env, i32, i32)
+DEF_HELPER_3(fmovemx_ld_postinc, i32, env, i32, i32)
+DEF_HELPER_3(fmovemd_st_predec, i32, env, i32, i32)
+DEF_HELPER_3(fmovemd_st_postinc, i32, env, i32, i32)
+DEF_HELPER_3(fmovemd_ld_postinc, i32, env, i32, i32)
DEF_HELPER_3(mac_move, void, env, i32, i32)
DEF_HELPER_3(macmulf, i64, env, i32, i32)
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 7aa0fdc..3a519b7 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -4505,23 +4505,93 @@ static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
tcg_temp_free_i32(addr);
}
+static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
+ uint32_t insn, uint32_t ext)
+{
+ int opsize;
+ TCGv addr, tmp;
+ int mode = (ext >> 11) & 0x3;
+ int is_load = ((ext & 0x2000) == 0);
+
+ if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
+ opsize = OS_EXTENDED;
+ } else {
+ opsize = OS_DOUBLE; /* FIXME */
+ }
+
+ addr = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ tmp = tcg_temp_new();
+ if (mode & 0x1) {
+ /* Dynamic register list */
+ tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
+ } else {
+ /* Static register list */
+ tcg_gen_movi_i32(tmp, ext & 0xff);
+ }
+
+ if (!is_load && (mode & 2) == 0) {
+ /* predecrement addressing mode
+ * only available to store register to memory
+ */
+ if (opsize == OS_EXTENDED) {
+ gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
+ } else {
+ gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
+ }
+ } else {
+ /* postincrement addressing mode */
+ if (opsize == OS_EXTENDED) {
+ if (is_load) {
+ gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
+ } else {
+ gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
+ }
+ } else {
+ if (is_load) {
+ gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
+ } else {
+ gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
+ }
+ }
+ }
+ if ((insn & 070) == 030 || (insn & 070) == 040) {
+ tcg_gen_mov_i32(AREG(insn, 0), tmp);
+ }
+ tcg_temp_free(tmp);
+}
+
/* ??? FP exceptions are not implemented. Most exceptions are deferred until
immediately before the next FP instruction is executed. */
DISAS_INSN(fpu)
{
uint16_t ext;
int opmode;
- TCGv tmp32;
int opsize;
TCGv_ptr cpu_src, cpu_dest;
ext = read_im16(env, s);
opmode = ext & 0x7f;
switch ((ext >> 13) & 7) {
- case 0: case 2:
+ case 0:
break;
case 1:
goto undef;
+ case 2:
+ if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
+ /* fmovecr */
+ TCGv rom_offset = tcg_const_i32(opmode);
+ cpu_dest = gen_fp_ptr(REG(ext, 7));
+ gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
+ tcg_temp_free_ptr(cpu_dest);
+ tcg_temp_free(rom_offset);
+ return;
+ }
+ break;
case 3: /* fmove out */
cpu_src = gen_fp_ptr(REG(ext, 7));
opsize = ext_opsize(ext, 10);
@@ -4537,36 +4607,10 @@ DISAS_INSN(fpu)
return;
case 6: /* fmovem */
case 7:
- {
- TCGv addr;
- TCGv_ptr fp;
- uint16_t mask;
- int i;
- if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
- goto undef;
- tmp32 = gen_lea(env, s, insn, OS_LONG);
- if (IS_NULL_QREG(tmp32)) {
- gen_addr_fault(s);
- return;
- }
- addr = tcg_temp_new_i32();
- tcg_gen_mov_i32(addr, tmp32);
- mask = 0x80;
- fp = tcg_temp_new_ptr();
- for (i = 0; i < 8; i++) {
- if (ext & mask) {
- tcg_gen_addi_ptr(fp, cpu_env,
- offsetof(CPUM68KState, fregs[i]));
- gen_ldst_fp(s, OS_DOUBLE, addr, fp,
- (ext & (1 << 13)) ? EA_STORE : EA_LOADS);
- if (ext & (mask - 1))
- tcg_gen_addi_i32(addr, addr, 8);
- }
- mask >>= 1;
- }
- tcg_temp_free_i32(addr);
- tcg_temp_free_ptr(fp);
+ if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
+ goto undef;
}
+ gen_op_fmovem(env, s, insn, ext);
return;
}
if (ext & (1 << 14)) {
@@ -4584,36 +4628,90 @@ DISAS_INSN(fpu)
}
cpu_dest = gen_fp_ptr(REG(ext, 7));
switch (opmode) {
- case 0: case 0x40: case 0x44: /* fmove */
+ case 0: /* fmove */
gen_fp_move(cpu_dest, cpu_src);
break;
+ case 0x40: /* fsmove */
+ gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x44: /* fdmove */
+ gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
+ break;
case 1: /* fint */
gen_helper_firound(cpu_env, cpu_dest, cpu_src);
break;
case 3: /* fintrz */
gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
break;
- case 4: case 0x41: case 0x45: /* fsqrt */
+ case 4: /* fsqrt */
gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
break;
- case 0x18: case 0x58: case 0x5c: /* fabs */
+ case 0x41: /* fssqrt */
+ gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x45: /* fdsqrt */
+ gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x18: /* fabs */
gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
break;
- case 0x1a: case 0x5a: case 0x5e: /* fneg */
- gen_helper_fchs(cpu_env, cpu_dest, cpu_src);
+ case 0x58: /* fsabs */
+ gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x5c: /* fdabs */
+ gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
break;
- case 0x20: case 0x60: case 0x64: /* fdiv */
+ case 0x1a: /* fneg */
+ gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x5a: /* fsneg */
+ gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x5e: /* fdneg */
+ gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x20: /* fdiv */
gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
break;
- case 0x22: case 0x62: case 0x66: /* fadd */
+ case 0x60: /* fsdiv */
+ gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x64: /* fddiv */
+ gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x22: /* fadd */
gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
break;
- case 0x23: case 0x63: case 0x67: /* fmul */
+ case 0x62: /* fsadd */
+ gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x66: /* fdadd */
+ gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x23: /* fmul */
gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
break;
- case 0x28: case 0x68: case 0x6c: /* fsub */
+ case 0x63: /* fsmul */
+ gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x67: /* fdmul */
+ gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x24: /* fsgldiv */
+ gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x27: /* fsglmul */
+ gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x28: /* fsub */
gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
break;
+ case 0x68: /* fssub */
+ gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x6c: /* fdsub */
+ gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
case 0x38: /* fcmp */
gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
return;
@@ -4633,142 +4731,193 @@ undef:
disas_undef_fpu(env, s, insn);
}
-DISAS_INSN(fbcc)
+static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
{
- uint32_t offset;
- uint32_t addr;
- TCGLabel *l1;
- TCGv tmp, fpsr;
-
- addr = s->pc;
- offset = cpu_ldsw_code(env, s->pc);
- s->pc += 2;
- if (insn & (1 << 6)) {
- offset = (offset << 16) | read_im16(env, s);
- }
+ TCGv fpsr;
+ c->g1 = 1;
+ c->v2 = tcg_const_i32(0);
+ c->g2 = 0;
+ /* TODO: Raise BSUN exception. */
fpsr = tcg_temp_new();
gen_load_fcr(s, fpsr, M68K_FPSR);
- l1 = gen_new_label();
- /* TODO: Raise BSUN exception. */
- /* Jump to l1 if condition is true. */
- switch (insn & 0x3f) {
+ switch (cond) {
case 0: /* False */
case 16: /* Signaling False */
+ c->v1 = c->v2;
+ c->tcond = TCG_COND_NEVER;
break;
case 1: /* EQual Z */
case 17: /* Signaling EQual Z */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_Z);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ c->tcond = TCG_COND_NE;
break;
case 2: /* Ordered Greater Than !(A || Z || N) */
case 18: /* Greater Than !(A || Z || N) */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr,
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr,
FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
+ c->tcond = TCG_COND_EQ;
break;
case 3: /* Ordered Greater than or Equal Z || !(A || N) */
case 19: /* Greater than or Equal Z || !(A || N) */
- assert(FPSR_CC_A == (FPSR_CC_N >> 3));
- tmp = tcg_temp_new();
- tcg_gen_shli_i32(tmp, fpsr, 3);
- tcg_gen_or_i32(tmp, tmp, fpsr);
- tcg_gen_xori_i32(tmp, tmp, FPSR_CC_N);
- tcg_gen_andi_i32(tmp, tmp, FPSR_CC_N | FPSR_CC_Z);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
+ tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
+ tcg_gen_or_i32(c->v1, c->v1, fpsr);
+ tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
break;
case 4: /* Ordered Less Than !(!N || A || Z); */
case 20: /* Less Than !(!N || A || Z); */
- tmp = tcg_temp_new();
- tcg_gen_xori_i32(tmp, fpsr, FPSR_CC_N);
- tcg_gen_andi_i32(tmp, tmp, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
+ tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
+ c->tcond = TCG_COND_EQ;
break;
case 5: /* Ordered Less than or Equal Z || (N && !A) */
case 21: /* Less than or Equal Z || (N && !A) */
- assert(FPSR_CC_A == (FPSR_CC_N >> 3));
- tmp = tcg_temp_new();
- tcg_gen_xori_i32(tmp, fpsr, FPSR_CC_A);
- tcg_gen_shli_i32(tmp, tmp, 3);
- tcg_gen_ori_i32(tmp, tmp, FPSR_CC_Z);
- tcg_gen_and_i32(tmp, tmp, fpsr);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
+ tcg_gen_andc_i32(c->v1, fpsr, c->v1);
+ tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
break;
case 6: /* Ordered Greater or Less than !(A || Z) */
case 22: /* Greater or Less than !(A || Z) */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_A | FPSR_CC_Z);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
+ c->tcond = TCG_COND_EQ;
break;
case 7: /* Ordered !A */
case 23: /* Greater, Less or Equal !A */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_A);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ c->tcond = TCG_COND_EQ;
break;
case 8: /* Unordered A */
case 24: /* Not Greater, Less or Equal A */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_A);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ c->tcond = TCG_COND_NE;
break;
case 9: /* Unordered or Equal A || Z */
case 25: /* Not Greater or Less then A || Z */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_A | FPSR_CC_Z);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
+ c->tcond = TCG_COND_NE;
break;
case 10: /* Unordered or Greater Than A || !(N || Z)) */
case 26: /* Not Less or Equal A || !(N || Z)) */
- assert(FPSR_CC_Z == (FPSR_CC_N >> 1));
- tmp = tcg_temp_new();
- tcg_gen_shli_i32(tmp, fpsr, 1);
- tcg_gen_or_i32(tmp, tmp, fpsr);
- tcg_gen_xori_i32(tmp, tmp, FPSR_CC_N);
- tcg_gen_andi_i32(tmp, tmp, FPSR_CC_N | FPSR_CC_A);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
+ tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
+ tcg_gen_or_i32(c->v1, c->v1, fpsr);
+ tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
break;
case 11: /* Unordered or Greater or Equal A || Z || !N */
case 27: /* Not Less Than A || Z || !N */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
- tcg_gen_xori_i32(tmp, tmp, FPSR_CC_N);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
+ tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
break;
case 12: /* Unordered or Less Than A || (N && !Z) */
case 28: /* Not Greater than or Equal A || (N && !Z) */
- assert(FPSR_CC_Z == (FPSR_CC_N >> 1));
- tmp = tcg_temp_new();
- tcg_gen_xori_i32(tmp, fpsr, FPSR_CC_Z);
- tcg_gen_shli_i32(tmp, tmp, 1);
- tcg_gen_ori_i32(tmp, tmp, FPSR_CC_A);
- tcg_gen_and_i32(tmp, tmp, fpsr);
- tcg_gen_andi_i32(tmp, tmp, FPSR_CC_A | FPSR_CC_N);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
+ tcg_gen_andc_i32(c->v1, fpsr, c->v1);
+ tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
break;
case 13: /* Unordered or Less or Equal A || Z || N */
case 29: /* Not Greater Than A || Z || N */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
break;
case 14: /* Not Equal !Z */
case 30: /* Signaling Not Equal !Z */
- tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, fpsr, FPSR_CC_Z);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ c->tcond = TCG_COND_EQ;
break;
case 15: /* True */
case 31: /* Signaling True */
- tcg_gen_br(l1);
+ c->v1 = c->v2;
+ c->tcond = TCG_COND_ALWAYS;
break;
}
tcg_temp_free(fpsr);
+}
+
+static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
+{
+ DisasCompare c;
+
+ gen_fcc_cond(&c, s, cond);
+ tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
+ free_cond(&c);
+}
+
+DISAS_INSN(fbcc)
+{
+ uint32_t offset;
+ uint32_t base;
+ TCGLabel *l1;
+
+ base = s->pc;
+ offset = (int16_t)read_im16(env, s);
+ if (insn & (1 << 6)) {
+ offset = (offset << 16) | read_im16(env, s);
+ }
+
+ l1 = gen_new_label();
+ update_cc_op(s);
+ gen_fjmpcc(s, insn & 0x3f, l1);
gen_jmp_tb(s, 0, s->pc);
gen_set_label(l1);
- gen_jmp_tb(s, 1, addr + offset);
+ gen_jmp_tb(s, 1, base + offset);
+}
+
+DISAS_INSN(fscc)
+{
+ DisasCompare c;
+ int cond;
+ TCGv tmp;
+ uint16_t ext;
+
+ ext = read_im16(env, s);
+ cond = ext & 0x3f;
+ gen_fcc_cond(&c, s, cond);
+
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
+ free_cond(&c);
+
+ tcg_gen_neg_i32(tmp, tmp);
+ DEST_EA(env, insn, OS_BYTE, tmp, NULL);
+ tcg_temp_free(tmp);
}
DISAS_INSN(frestore)
@@ -5349,6 +5498,7 @@ void register_m68k_insns (CPUM68KState *env)
INSN(frestore, f340, ffc0, CF_FPU);
INSN(fsave, f300, ffc0, CF_FPU);
INSN(fpu, f200, ffc0, FPU);
+ INSN(fscc, f240, ffc0, FPU);
INSN(fbcc, f280, ff80, FPU);
INSN(frestore, f340, ffc0, FPU);
INSN(fsave, f300, ffc0, FPU);