aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorAurelien Jarno <aurelien@aurel32.net>2011-01-06 15:38:18 +0100
committerAurelien Jarno <aurelien@aurel32.net>2011-01-06 16:10:48 +0100
commitdd94ad96e51457d8d9562a73659734b559d2f4b1 (patch)
treefb5c979dd1ebf19dd201ab3eb8ff12cc0d8167b5 /target-ppc
parent23979dc5411befabe9049e37075b2b6320debc4e (diff)
downloadqemu-dd94ad96e51457d8d9562a73659734b559d2f4b1.zip
qemu-dd94ad96e51457d8d9562a73659734b559d2f4b1.tar.gz
qemu-dd94ad96e51457d8d9562a73659734b559d2f4b1.tar.bz2
target-ppc: remove PRECISE_EMULATION define
The PRECISE_EMULATION is "hardcoded" to one in target-ppc/exec.h and not something easily tunable. Remove it and non-precise emulation code as it doesn't make a noticeable difference in speed. People wanting speed improvement should use softfloat-native instead. Acked-by: Alexander Graf <agraf@suse.de> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/exec.h3
-rw-r--r--target-ppc/op_helper.c58
2 files changed, 11 insertions, 50 deletions
diff --git a/target-ppc/exec.h b/target-ppc/exec.h
index 44cc5e9..4688ef5 100644
--- a/target-ppc/exec.h
+++ b/target-ppc/exec.h
@@ -26,9 +26,6 @@
#include "cpu.h"
#include "exec-all.h"
-/* Precise emulation is needed to correctly emulate exception flags */
-#define USE_PRECISE_EMULATION 1
-
register struct CPUPPCState *env asm(AREG0);
#if !defined(CONFIG_USER_ONLY)
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
index 5ded1c1..31520ab 100644
--- a/target-ppc/op_helper.c
+++ b/target-ppc/op_helper.c
@@ -974,7 +974,7 @@ uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
farg1.ll = arg1;
farg2.ll = arg2;
-#if USE_PRECISE_EMULATION
+
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN addition */
@@ -986,9 +986,7 @@ uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
} else {
farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
}
-#else
- farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
-#endif
+
return farg1.ll;
}
@@ -999,8 +997,7 @@ uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
farg1.ll = arg1;
farg2.ll = arg2;
-#if USE_PRECISE_EMULATION
-{
+
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN subtraction */
@@ -1012,10 +1009,7 @@ uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
} else {
farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
}
-}
-#else
- farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
-#endif
+
return farg1.ll;
}
@@ -1026,7 +1020,7 @@ uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
farg1.ll = arg1;
farg2.ll = arg2;
-#if USE_PRECISE_EMULATION
+
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN multiplication */
@@ -1038,9 +1032,7 @@ uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
} else {
farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
}
-#else
- farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
-#endif
+
return farg1.ll;
}
@@ -1051,7 +1043,7 @@ uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
farg1.ll = arg1;
farg2.ll = arg2;
-#if USE_PRECISE_EMULATION
+
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN division */
@@ -1065,9 +1057,7 @@ uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
} else {
farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
}
-#else
- farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
-#endif
+
return farg1.ll;
}
@@ -1116,12 +1106,10 @@ uint64_t helper_fctiw (uint64_t arg)
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int32(farg.d, &env->fp_status);
-#if USE_PRECISE_EMULATION
/* XXX: higher bits are not supposed to be significant.
* to make tests easier, return the same as a real PowerPC 750
*/
farg.ll |= 0xFFF80000ULL << 32;
-#endif
}
return farg.ll;
}
@@ -1140,12 +1128,10 @@ uint64_t helper_fctiwz (uint64_t arg)
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
-#if USE_PRECISE_EMULATION
/* XXX: higher bits are not supposed to be significant.
* to make tests easier, return the same as a real PowerPC 750
*/
farg.ll |= 0xFFF80000ULL << 32;
-#endif
}
return farg.ll;
}
@@ -1245,7 +1231,7 @@ uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
-#if USE_PRECISE_EMULATION
+
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
@@ -1277,10 +1263,7 @@ uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
farg1.d = (farg1.d * farg2.d) + farg3.d;
#endif
}
-#else
- farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
- farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
-#endif
+
return farg1.ll;
}
@@ -1292,7 +1275,7 @@ uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
-#if USE_PRECISE_EMULATION
+
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
@@ -1324,10 +1307,6 @@ uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
farg1.d = (farg1.d * farg2.d) - farg3.d;
#endif
}
-#else
- farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
- farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
-#endif
return farg1.ll;
}
@@ -1350,7 +1329,6 @@ uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
} else {
-#if USE_PRECISE_EMULATION
#ifdef FLOAT128
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -1371,10 +1349,6 @@ uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
/* This is OK on x86 hosts */
farg1.d = (farg1.d * farg2.d) + farg3.d;
#endif
-#else
- farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
- farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
-#endif
if (likely(!float64_is_quiet_nan(farg1.d)))
farg1.d = float64_chs(farg1.d);
}
@@ -1400,7 +1374,6 @@ uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
} else {
-#if USE_PRECISE_EMULATION
#ifdef FLOAT128
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -1421,10 +1394,6 @@ uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
/* This is OK on x86 hosts */
farg1.d = (farg1.d * farg2.d) - farg3.d;
#endif
-#else
- farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
- farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
-#endif
if (likely(!float64_is_quiet_nan(farg1.d)))
farg1.d = float64_chs(farg1.d);
}
@@ -1438,7 +1407,6 @@ uint64_t helper_frsp (uint64_t arg)
float32 f32;
farg.ll = arg;
-#if USE_PRECISE_EMULATION
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN square root */
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
@@ -1446,10 +1414,6 @@ uint64_t helper_frsp (uint64_t arg)
f32 = float64_to_float32(farg.d, &env->fp_status);
farg.d = float32_to_float64(f32, &env->fp_status);
}
-#else
- f32 = float64_to_float32(farg.d, &env->fp_status);
- farg.d = float32_to_float64(f32, &env->fp_status);
-#endif
return farg.ll;
}