aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-08-24 19:55:23 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-08-24 19:55:23 +0100
commit44423107e7b5731ef40c5c8632a5bad8b49d0838 (patch)
tree3d1dfbb992e59318a1bf775948e2eb86ac4483aa /tests
parent30aa19446d82358a30eac3b556b4d6641e00b7c1 (diff)
parentc621b4142bf1ff8c663811c10bd1628481e494a6 (diff)
downloadqemu-44423107e7b5731ef40c5c8632a5bad8b49d0838.zip
qemu-44423107e7b5731ef40c5c8632a5bad8b49d0838.tar.gz
qemu-44423107e7b5731ef40c5c8632a5bad8b49d0838.tar.bz2
Merge remote-tracking branch 'remotes/xtensa/tags/20200821-xtensa' into staging
target/xtensa updates for 5.2: - add NMI support; - add DFPU option implementation; - update FPU tests to support both FPU2000 and DFPU; - add example cores with FPU2000 and DFPU. # gpg: Signature made Fri 21 Aug 2020 21:09:37 BST # gpg: using RSA key 2B67854B98E5327DCDEB17D851F9CC91F83FA044 # gpg: issuer "jcmvbkbc@gmail.com" # gpg: Good signature from "Max Filippov <filippov@cadence.com>" [unknown] # gpg: aka "Max Filippov <max.filippov@cogentembedded.com>" [full] # gpg: aka "Max Filippov <jcmvbkbc@gmail.com>" [full] # Primary key fingerprint: 2B67 854B 98E5 327D CDEB 17D8 51F9 CC91 F83F A044 * remotes/xtensa/tags/20200821-xtensa: (24 commits) target/xtensa: import DSP3400 core target/xtensa: import de233_fpu core tests/tcg/xtensa: add DFP0 arithmetic tests tests/tcg/xtensa: test double precision load/store tests/tcg/xtensa: add fp0 div and sqrt tests tests/tcg/xtensa: update test_lsc for DFPU tests/tcg/xtensa: update test_fp1 for DFPU tests/tcg/xtensa: update test_fp0_conv for DFPU tests/tcg/xtensa: expand madd tests tests/tcg/xtensa: update test_fp0_arith for DFPU tests/tcg/xtensa: fix test execution on ISS target/xtensa: implement FPU division and square root target/xtensa: add DFPU registers and opcodes target/xtensa: add DFPU option target/xtensa: don't access BR regfile directly target/xtensa: move FSR/FCR register accessors target/xtensa: rename FPU2000 translators and helpers target/xtensa: support copying registers up to 64 bits wide target/xtensa: add geometry to xtensa_get_regfile_by_name softfloat: add xtensa specialization for pickNaNMulAdd ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/tcg/xtensa/fpu.h142
-rw-r--r--tests/tcg/xtensa/macros.inc10
-rw-r--r--tests/tcg/xtensa/test_dfp0_arith.S162
-rw-r--r--tests/tcg/xtensa/test_fp0_arith.S282
-rw-r--r--tests/tcg/xtensa/test_fp0_conv.S299
-rw-r--r--tests/tcg/xtensa/test_fp0_div.S82
-rw-r--r--tests/tcg/xtensa/test_fp0_sqrt.S76
-rw-r--r--tests/tcg/xtensa/test_fp1.S62
-rw-r--r--tests/tcg/xtensa/test_lsc.S170
9 files changed, 998 insertions, 287 deletions
diff --git a/tests/tcg/xtensa/fpu.h b/tests/tcg/xtensa/fpu.h
new file mode 100644
index 0000000..42e3217
--- /dev/null
+++ b/tests/tcg/xtensa/fpu.h
@@ -0,0 +1,142 @@
+#if XCHAL_HAVE_DFP || XCHAL_HAVE_FP_DIV
+#define DFPU 1
+#else
+#define DFPU 0
+#endif
+
+#define FCR_RM_NEAREST 0
+#define FCR_RM_TRUNC 1
+#define FCR_RM_CEIL 2
+#define FCR_RM_FLOOR 3
+
+#define FSR__ 0x00000000
+#define FSR_I 0x00000080
+#define FSR_U 0x00000100
+#define FSR_O 0x00000200
+#define FSR_Z 0x00000400
+#define FSR_V 0x00000800
+
+#define FSR_UI (FSR_U | FSR_I)
+#define FSR_OI (FSR_O | FSR_I)
+
+#define F32_0 0x00000000
+#define F32_0_5 0x3f000000
+#define F32_1 0x3f800000
+#define F32_MAX 0x7f7fffff
+#define F32_PINF 0x7f800000
+#define F32_NINF 0xff800000
+
+#define F32_DNAN 0x7fc00000
+#define F32_SNAN(v) (0x7f800000 | (v))
+#define F32_QNAN(v) (0x7fc00000 | (v))
+
+#define F32_MINUS 0x80000000
+
+#define F64_0 0x0000000000000000
+#define F64_MIN_NORM 0x0010000000000000
+#define F64_1 0x3ff0000000000000
+#define F64_MAX_2 0x7fe0000000000000
+#define F64_MAX 0x7fefffffffffffff
+#define F64_PINF 0x7ff0000000000000
+#define F64_NINF 0xfff0000000000000
+
+#define F64_DNAN 0x7ff8000000000000
+#define F64_SNAN(v) (0x7ff0000000000000 | (v))
+#define F64_QNAN(v) (0x7ff8000000000000 | (v))
+
+#define F64_MINUS 0x8000000000000000
+
+.macro test_op1_rm op, fr0, fr1, v0, r, sr
+ movi a2, 0
+ wur a2, fsr
+ movfp \fr0, \v0
+ \op \fr1, \fr0
+ check_res \fr1, \r, \sr
+.endm
+
+.macro test_op2_rm op, fr0, fr1, fr2, v0, v1, r, sr
+ movi a2, 0
+ wur a2, fsr
+ movfp \fr0, \v0
+ movfp \fr1, \v1
+ \op \fr2, \fr0, \fr1
+ check_res \fr2, \r, \sr
+.endm
+
+.macro test_op3_rm op, fr0, fr1, fr2, fr3, v0, v1, v2, r, sr
+ movi a2, 0
+ wur a2, fsr
+ movfp \fr0, \v0
+ movfp \fr1, \v1
+ movfp \fr2, \v2
+ \op \fr0, \fr1, \fr2
+ check_res \fr3, \r, \sr
+.endm
+
+.macro test_op1_ex op, fr0, fr1, v0, rm, r, sr
+ movi a2, \rm
+ wur a2, fcr
+ test_op1_rm \op, \fr0, \fr1, \v0, \r, \sr
+ movi a2, (\rm) | 0x7c
+ wur a2, fcr
+ test_op1_rm \op, \fr0, \fr1, \v0, \r, \sr
+.endm
+
+.macro test_op2_ex op, fr0, fr1, fr2, v0, v1, rm, r, sr
+ movi a2, \rm
+ wur a2, fcr
+ test_op2_rm \op, \fr0, \fr1, \fr2, \v0, \v1, \r, \sr
+ movi a2, (\rm) | 0x7c
+ wur a2, fcr
+ test_op2_rm \op, \fr0, \fr1, \fr2, \v0, \v1, \r, \sr
+.endm
+
+.macro test_op3_ex op, fr0, fr1, fr2, fr3, v0, v1, v2, rm, r, sr
+ movi a2, \rm
+ wur a2, fcr
+ test_op3_rm \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, \r, \sr
+ movi a2, (\rm) | 0x7c
+ wur a2, fcr
+ test_op3_rm \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, \r, \sr
+.endm
+
+.macro test_op1 op, fr0, fr1, v0, r0, r1, r2, r3, sr0, sr1, sr2, sr3
+ test_op1_ex \op, \fr0, \fr1, \v0, 0, \r0, \sr0
+ test_op1_ex \op, \fr0, \fr1, \v0, 1, \r1, \sr1
+ test_op1_ex \op, \fr0, \fr1, \v0, 2, \r2, \sr2
+ test_op1_ex \op, \fr0, \fr1, \v0, 3, \r3, \sr3
+.endm
+
+.macro test_op2 op, fr0, fr1, fr2, v0, v1, r0, r1, r2, r3, sr0, sr1, sr2, sr3
+ test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 0, \r0, \sr0
+ test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 1, \r1, \sr1
+ test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 2, \r2, \sr2
+ test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 3, \r3, \sr3
+.endm
+
+.macro test_op3 op, fr0, fr1, fr2, fr3, v0, v1, v2, r0, r1, r2, r3, sr0, sr1, sr2, sr3
+ test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 0, \r0, \sr0
+ test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 1, \r1, \sr1
+ test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 2, \r2, \sr2
+ test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 3, \r3, \sr3
+.endm
+
+.macro test_op2_cpe op
+ set_vector kernel, 2f
+ movi a2, 0
+ wsr a2, cpenable
+1:
+ \op f2, f0, f1
+ test_fail
+2:
+ rsr a2, excvaddr
+ movi a3, 1b
+ assert eq, a2, a3
+ rsr a2, exccause
+ movi a3, 32
+ assert eq, a2, a3
+
+ set_vector kernel, 0
+ movi a2, 1
+ wsr a2, cpenable
+.endm
diff --git a/tests/tcg/xtensa/macros.inc b/tests/tcg/xtensa/macros.inc
index aa8f95b..f88937c 100644
--- a/tests/tcg/xtensa/macros.inc
+++ b/tests/tcg/xtensa/macros.inc
@@ -3,7 +3,7 @@
.macro test_suite name
.data
status: .word result
-result: .space 256
+result: .space 1024
.text
.global main
.align 4
@@ -25,9 +25,9 @@ main:
movi a3, 0
beqz a2, 2f
1:
- l8ui a1, a0, 0
+ l32i a1, a0, 0
or a3, a3, a1
- addi a0, a0, 1
+ addi a0, a0, 4
addi a2, a2, -1
bnez a2, 1b
2:
@@ -65,7 +65,7 @@ test_\name:
reset_ps
movi a2, status
l32i a3, a2, 0
- addi a3, a3, 1
+ addi a3, a3, 4
s32i a3, a2, 0
.endm
@@ -78,7 +78,7 @@ test_\name:
movi a2, status
l32i a2, a2, 0
movi a3, 1
- s8i a3, a2, 0
+ s32i a3, a2, 0
#ifdef DEBUG
print failed
#endif
diff --git a/tests/tcg/xtensa/test_dfp0_arith.S b/tests/tcg/xtensa/test_dfp0_arith.S
new file mode 100644
index 0000000..53bf812
--- /dev/null
+++ b/tests/tcg/xtensa/test_dfp0_arith.S
@@ -0,0 +1,162 @@
+#include "macros.inc"
+#include "fpu.h"
+
+test_suite fp0_arith
+
+#if XCHAL_HAVE_DFP
+
+.macro movfp fr, v
+ movi a2, ((\v) >> 32) & 0xffffffff
+ movi a3, ((\v) & 0xffffffff)
+ wfrd \fr, a2, a3
+.endm
+
+.macro check_res fr, r, sr
+ rfrd a2, \fr
+ dump a2
+ movi a3, ((\r) >> 32) & 0xffffffff
+ assert eq, a2, a3
+ rfr a2, \fr
+ dump a2
+ movi a3, ((\r) & 0xffffffff)
+ assert eq, a2, a3
+ rur a2, fsr
+ movi a3, \sr
+ assert eq, a2, a3
+.endm
+
+test add_d
+ movi a2, 1
+ wsr a2, cpenable
+
+ /* MAX_FLOAT + MAX_FLOAT = +inf/MAX_FLOAT */
+ test_op2 add.d, f6, f7, f8, F64_MAX, F64_MAX, \
+ F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
+ FSR_OI, FSR_OI, FSR_OI, FSR_OI
+test_end
+
+test add_d_inf
+ /* 1 + +inf = +inf */
+ test_op2 add.d, f6, f7, f8, F64_1, F64_PINF, \
+ F64_PINF, F64_PINF, F64_PINF, F64_PINF, \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* +inf + -inf = default NaN */
+ test_op2 add.d, f0, f1, f2, F64_PINF, F64_NINF, \
+ F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+
+test add_d_nan_dfpu
+ /* 1 + QNaN = QNaN */
+ test_op2 add.d, f9, f10, f11, F64_1, F64_QNAN(1), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ /* 1 + SNaN = QNaN */
+ test_op2 add.d, f12, f13, f14, F64_1, F64_SNAN(1), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+
+ /* SNaN1 + SNaN2 = QNaN2 */
+ test_op2 add.d, f15, f0, f1, F64_SNAN(1), F64_SNAN(2), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* QNaN1 + SNaN2 = QNaN2 */
+ test_op2 add.d, f5, f6, f7, F64_QNAN(1), F64_SNAN(2), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* SNaN1 + QNaN2 = QNaN2 */
+ test_op2 add.d, f8, f9, f10, F64_SNAN(1), F64_QNAN(2), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+
+test sub_d
+ /* norm - norm = denorm */
+ test_op2 sub.d, f6, f7, f8, F64_MIN_NORM | 1, F64_MIN_NORM, \
+ 0x00000001, 0x00000001, 0x00000001, 0x00000001, \
+ FSR__, FSR__, FSR__, FSR__
+test_end
+
+test mul_d
+ test_op2 mul.d, f0, f1, f2, F64_1 | 1, F64_1 | 1, \
+ F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
+ FSR_I, FSR_I, FSR_I, FSR_I
+ /* MAX_FLOAT/2 * MAX_FLOAT/2 = +inf/MAX_FLOAT */
+ test_op2 mul.d, f6, f7, f8, F64_MAX_2, F64_MAX_2, \
+ F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
+ FSR_OI, FSR_OI, FSR_OI, FSR_OI
+ /* min norm * min norm = 0/denorm */
+ test_op2 mul.d, f6, f7, f8, F64_MIN_NORM, F64_MIN_NORM, \
+ F64_0, F64_0, 0x00000001, F64_0, \
+ FSR_UI, FSR_UI, FSR_UI, FSR_UI
+ /* inf * 0 = default NaN */
+ test_op2 mul.d, f6, f7, f8, F64_PINF, F64_0, \
+ F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+
+test madd_d
+ test_op3 madd.d, f0, f1, f2, f0, F64_0, F64_1 | 1, F64_1 | 1, \
+ F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
+ FSR_I, FSR_I, FSR_I, FSR_I
+test_end
+
+test madd_d_precision
+ test_op3 madd.d, f0, f1, f2, f0, \
+ F64_MINUS | F64_1 | 2, F64_1 | 1, F64_1 | 1, \
+ 0x3970000000000000, 0x3970000000000000, 0x3970000000000000, 0x3970000000000000, \
+ FSR__, FSR__, FSR__, FSR__
+test_end
+
+test madd_d_nan_dfpu
+ /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */
+ test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_1, \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_1, \
+ F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_1, F64_QNAN(3), \
+ F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_1, \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_QNAN(3), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_QNAN(3), \
+ F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_QNAN(3), \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* inf * 0 = default NaN */
+ test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_PINF, F64_0, \
+ F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* inf * 0 + SNaN1 = QNaN1 */
+ test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_PINF, F64_0, \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* inf * 0 + QNaN1 = QNaN1 */
+ test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_PINF, F64_0, \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+
+ /* madd/msub SNaN turns to QNaN and sets Invalid flag */
+ test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_1, F64_1, \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_SNAN(2), F64_1, \
+ F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+
+#endif
+
+test_suite_end
diff --git a/tests/tcg/xtensa/test_fp0_arith.S b/tests/tcg/xtensa/test_fp0_arith.S
index 253d033..7eefc1d 100644
--- a/tests/tcg/xtensa/test_fp0_arith.S
+++ b/tests/tcg/xtensa/test_fp0_arith.S
@@ -1,4 +1,5 @@
#include "macros.inc"
+#include "fpu.h"
test_suite fp0_arith
@@ -9,84 +10,18 @@ test_suite fp0_arith
wfr \fr, a2
.endm
-.macro check_res fr, r
+.macro check_res fr, r, sr
rfr a2, \fr
dump a2
movi a3, \r
assert eq, a2, a3
rur a2, fsr
- assert eqi, a2, 0
-.endm
-
-.macro test_op2_rm op, fr0, fr1, fr2, v0, v1, r
- movi a2, 0
- wur a2, fsr
- movfp \fr0, \v0
- movfp \fr1, \v1
- \op \fr2, \fr0, \fr1
- check_res \fr2, \r
-.endm
-
-.macro test_op3_rm op, fr0, fr1, fr2, fr3, v0, v1, v2, r
- movi a2, 0
- wur a2, fsr
- movfp \fr0, \v0
- movfp \fr1, \v1
- movfp \fr2, \v2
- \op \fr0, \fr1, \fr2
- check_res \fr3, \r
-.endm
-
-.macro test_op2_ex op, fr0, fr1, fr2, v0, v1, rm, r
- movi a2, \rm
- wur a2, fcr
- test_op2_rm \op, \fr0, \fr1, \fr2, \v0, \v1, \r
- movi a2, (\rm) | 0x7c
- wur a2, fcr
- test_op2_rm \op, \fr0, \fr1, \fr2, \v0, \v1, \r
-.endm
-
-.macro test_op3_ex op, fr0, fr1, fr2, fr3, v0, v1, v2, rm, r
- movi a2, \rm
- wur a2, fcr
- test_op3_rm \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, \r
- movi a2, (\rm) | 0x7c
- wur a2, fcr
- test_op3_rm \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, \r
-.endm
-
-.macro test_op2 op, fr0, fr1, fr2, v0, v1, r0, r1, r2, r3
- test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 0, \r0
- test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 1, \r1
- test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 2, \r2
- test_op2_ex \op, \fr0, \fr1, \fr2, \v0, \v1, 3, \r3
-.endm
-
-.macro test_op3 op, fr0, fr1, fr2, fr3, v0, v1, v2, r0, r1, r2, r3
- test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 0, \r0
- test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 1, \r1
- test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 2, \r2
- test_op3_ex \op, \fr0, \fr1, \fr2, \fr3, \v0, \v1, \v2, 3, \r3
-.endm
-
-.macro test_op2_cpe op
- set_vector kernel, 2f
- movi a2, 0
- wsr a2, cpenable
-1:
- \op f2, f0, f1
- test_fail
-2:
- rsr a2, excvaddr
- movi a3, 1b
- assert eq, a2, a3
- rsr a2, exccause
- movi a3, 32
+#if DFPU
+ movi a3, \sr
assert eq, a2, a3
-
- set_vector kernel, 0
- movi a2, 1
- wsr a2, cpenable
+#else
+ assert eqi, a2, 0
+#endif
.endm
test add_s
@@ -94,78 +29,231 @@ test add_s
wsr a2, cpenable
test_op2 add.s, f0, f1, f2, 0x3fc00000, 0x34400000, \
- 0x3fc00002, 0x3fc00001, 0x3fc00002, 0x3fc00001
+ 0x3fc00002, 0x3fc00001, 0x3fc00002, 0x3fc00001, \
+ FSR_I, FSR_I, FSR_I, FSR_I
test_op2 add.s, f3, f4, f5, 0x3fc00000, 0x34a00000, \
- 0x3fc00002, 0x3fc00002, 0x3fc00003, 0x3fc00002
+ 0x3fc00002, 0x3fc00002, 0x3fc00003, 0x3fc00002, \
+ FSR_I, FSR_I, FSR_I, FSR_I
/* MAX_FLOAT + MAX_FLOAT = +inf/MAX_FLOAT */
test_op2 add.s, f6, f7, f8, 0x7f7fffff, 0x7f7fffff, \
- 0x7f800000, 0x7f7fffff, 0x7f800000, 0x7f7fffff
+ 0x7f800000, 0x7f7fffff, 0x7f800000, 0x7f7fffff, \
+ FSR_OI, FSR_OI, FSR_OI, FSR_OI
test_end
test add_s_inf
/* 1 + +inf = +inf */
test_op2 add.s, f6, f7, f8, 0x3fc00000, 0x7f800000, \
- 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, \
+ FSR__, FSR__, FSR__, FSR__
/* +inf + -inf = default NaN */
test_op2 add.s, f0, f1, f2, 0x7f800000, 0xff800000, \
- 0x7fc00000, 0x7fc00000, 0x7fc00000, 0x7fc00000
+ 0x7fc00000, 0x7fc00000, 0x7fc00000, 0x7fc00000, \
+ FSR_V, FSR_V, FSR_V, FSR_V
test_end
-test add_s_nan
- /* 1 + NaN = NaN */
+#if DFPU
+test add_s_nan_dfpu
+ /* 1 + QNaN = QNaN */
test_op2 add.s, f9, f10, f11, 0x3fc00000, 0x7fc00001, \
- 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001
+ 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \
+ FSR__, FSR__, FSR__, FSR__
+ /* 1 + SNaN = QNaN */
test_op2 add.s, f12, f13, f14, 0x3fc00000, 0x7f800001, \
- 0x7f800001, 0x7f800001, 0x7f800001, 0x7f800001
+ 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \
+ FSR_V, FSR_V, FSR_V, FSR_V
- /* NaN1 + NaN2 = NaN1 */
+ /* SNaN1 + SNaN2 = QNaN2 */
+ test_op2 add.s, f15, f0, f1, 0x7f800001, 0x7fbfffff, \
+ 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ test_op2 add.s, f2, f3, f4, 0x7fbfffff, 0x7f800001, \
+ 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* QNaN1 + SNaN2 = QNaN2 */
+ test_op2 add.s, f5, f6, f7, 0x7fc00001, 0x7fbfffff, \
+ 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* SNaN1 + QNaN2 = QNaN2 */
+ test_op2 add.s, f8, f9, f10, 0x7fbfffff, 0x7fc00001, \
+ 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+#else
+test add_s_nan_fpu2k
+ /* 1 + QNaN = QNaN */
+ test_op2 add.s, f9, f10, f11, 0x3fc00000, 0x7fc00001, \
+ 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \
+ FSR__, FSR__, FSR__, FSR__
+ /* 1 + SNaN = SNaN */
+ test_op2 add.s, f12, f13, f14, 0x3fc00000, 0x7f800001, \
+ 0x7f800001, 0x7f800001, 0x7f800001, 0x7f800001, \
+ FSR__, FSR__, FSR__, FSR__
+ /* SNaN1 + SNaN2 = SNaN1 */
test_op2 add.s, f15, f0, f1, 0x7f800001, 0x7fbfffff, \
- 0x7f800001, 0x7f800001, 0x7f800001, 0x7f800001
+ 0x7f800001, 0x7f800001, 0x7f800001, 0x7f800001, \
+ FSR__, FSR__, FSR__, FSR__
test_op2 add.s, f2, f3, f4, 0x7fbfffff, 0x7f800001, \
- 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, 0x7fbfffff
+ 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, \
+ FSR__, FSR__, FSR__, FSR__
+ /* QNaN1 + SNaN2 = QNaN1 */
test_op2 add.s, f5, f6, f7, 0x7fc00001, 0x7fbfffff, \
- 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001
+ 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \
+ FSR__, FSR__, FSR__, FSR__
+ /* SNaN1 + QNaN2 = SNaN1 */
test_op2 add.s, f8, f9, f10, 0x7fbfffff, 0x7fc00001, \
- 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, 0x7fbfffff
+ 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, \
+ FSR__, FSR__, FSR__, FSR__
test_end
+#endif
test sub_s
test_op2 sub.s, f0, f1, f0, 0x3f800001, 0x33800000, \
- 0x3f800000, 0x3f800000, 0x3f800001, 0x3f800000
+ 0x3f800000, 0x3f800000, 0x3f800001, 0x3f800000, \
+ FSR_I, FSR_I, FSR_I, FSR_I
test_op2 sub.s, f0, f1, f1, 0x3f800002, 0x33800000, \
- 0x3f800002, 0x3f800001, 0x3f800002, 0x3f800001
+ 0x3f800002, 0x3f800001, 0x3f800002, 0x3f800001, \
+ FSR_I, FSR_I, FSR_I, FSR_I
/* norm - norm = denorm */
test_op2 sub.s, f6, f7, f8, 0x00800001, 0x00800000, \
- 0x00000001, 0x00000001, 0x00000001, 0x00000001
+ 0x00000001, 0x00000001, 0x00000001, 0x00000001, \
+ FSR__, FSR__, FSR__, FSR__
test_end
test mul_s
test_op2 mul.s, f0, f1, f2, 0x3f800001, 0x3f800001, \
- 0x3f800002, 0x3f800002, 0x3f800003, 0x3f800002
-
+ 0x3f800002, 0x3f800002, 0x3f800003, 0x3f800002, \
+ FSR_I, FSR_I, FSR_I, FSR_I
/* MAX_FLOAT/2 * MAX_FLOAT/2 = +inf/MAX_FLOAT */
test_op2 mul.s, f6, f7, f8, 0x7f000000, 0x7f000000, \
- 0x7f800000, 0x7f7fffff, 0x7f800000, 0x7f7fffff
+ 0x7f800000, 0x7f7fffff, 0x7f800000, 0x7f7fffff, \
+ FSR_OI, FSR_OI, FSR_OI, FSR_OI
/* min norm * min norm = 0/denorm */
test_op2 mul.s, f6, f7, f8, 0x00800001, 0x00800000, \
- 0x00000000, 0x00000000, 0x00000001, 0x00000000
-
+ 0x00000000, 0x00000000, 0x00000001, 0x00000000, \
+ FSR_UI, FSR_UI, FSR_UI, FSR_UI
/* inf * 0 = default NaN */
test_op2 mul.s, f6, f7, f8, 0x7f800000, 0x00000000, \
- 0x7fc00000, 0x7fc00000, 0x7fc00000, 0x7fc00000
+ 0x7fc00000, 0x7fc00000, 0x7fc00000, 0x7fc00000, \
+ FSR_V, FSR_V, FSR_V, FSR_V
test_end
test madd_s
test_op3 madd.s, f0, f1, f2, f0, 0, 0x3f800001, 0x3f800001, \
- 0x3f800002, 0x3f800002, 0x3f800003, 0x3f800002
+ 0x3f800002, 0x3f800002, 0x3f800003, 0x3f800002, \
+ FSR_I, FSR_I, FSR_I, FSR_I
+test_end
+
+test madd_s_precision
+ test_op3 madd.s, f0, f1, f2, f0, 0xbf800002, 0x3f800001, 0x3f800001, \
+ 0x28800000, 0x28800000, 0x28800000, 0x28800000, \
+ FSR__, FSR__, FSR__, FSR__
+test_end
+
+#if DFPU
+test madd_s_nan_dfpu
+ /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_1, \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_1, F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_QNAN(3), \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* inf * 0 = default NaN */
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_PINF, F32_0, \
+ F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* inf * 0 + SNaN1 = QNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_PINF, F32_0, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* inf * 0 + QNaN1 = QNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_PINF, F32_0, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+
+ /* madd/msub SNaN turns to QNaN and sets Invalid flag */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_1, F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_SNAN(2), F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+#else
+test madd_s_nan_fpu2k
+ /* FPU2000 madd/msub NaN1, NaN2, NaN3 priority: NaN2, NaN3, NaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_1, \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_1, F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_1, \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* inf * 0 = default NaN */
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_PINF, F32_0, \
+ F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \
+ FSR__, FSR__, FSR__, FSR__
+ /* inf * 0 + SNaN1 = SNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_PINF, F32_0, \
+ F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ /* inf * 0 + QNaN1 = QNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_PINF, F32_0, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* madd/msub SNaN is preserved */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_1, F32_1, \
+ F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_SNAN(2), F32_1, \
+ F32_SNAN(2), F32_SNAN(2), F32_SNAN(2), F32_SNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
test_end
+#endif
test msub_s
test_op3 msub.s, f0, f1, f2, f0, 0x3f800000, 0x3f800001, 0x3f800001, \
- 0xb4800000, 0xb4800000, 0xb4800000, 0xb4800001
+ 0xb4800000, 0xb4800000, 0xb4800000, 0xb4800001, \
+ FSR_I, FSR_I, FSR_I, FSR_I
test_end
#endif
diff --git a/tests/tcg/xtensa/test_fp0_conv.S b/tests/tcg/xtensa/test_fp0_conv.S
index 147e3d5..cfee6e5 100644
--- a/tests/tcg/xtensa/test_fp0_conv.S
+++ b/tests/tcg/xtensa/test_fp0_conv.S
@@ -1,4 +1,5 @@
#include "macros.inc"
+#include "fpu.h"
test_suite fp0_conv
@@ -9,7 +10,7 @@ test_suite fp0_conv
wfr \fr, a2
.endm
-.macro test_ftoi_ex op, r0, fr0, v, c, r
+.macro test_ftoi_ex op, r0, fr0, v, c, r, sr
movi a2, 0
wur a2, fsr
movfp \fr0, \v
@@ -18,20 +19,25 @@ test_suite fp0_conv
movi a3, \r
assert eq, \r0, a3
rur a2, fsr
+#if DFPU
+ movi a3, \sr
+ assert eq, a2, a3
+#else
assert eqi, a2, 0
+#endif
.endm
-.macro test_ftoi op, r0, fr0, v, c, r
+.macro test_ftoi op, r0, fr0, v, c, r, sr
movi a2, 0
wur a2, fcr
- test_ftoi_ex \op, \r0, \fr0, \v, \c, \r
+ test_ftoi_ex \op, \r0, \fr0, \v, \c, \r, \sr
movi a2, 0x7c
wur a2, fcr
- test_ftoi_ex \op, \r0, \fr0, \v, \c, \r
+ test_ftoi_ex \op, \r0, \fr0, \v, \c, \r, \sr
.endm
-.macro test_itof_ex op, fr0, ar0, v, c, r
+.macro test_itof_ex op, fr0, ar0, v, c, r, sr
movi a2, 0
wur a2, fsr
movi \ar0, \v
@@ -42,23 +48,28 @@ test_suite fp0_conv
movi a3, \r
assert eq, a2, a3
rur a2, fsr
+#if DFPU
+ movi a3, \sr
+ assert eq, a2, a3
+#else
assert eqi, a2, 0
+#endif
.endm
-.macro test_itof_rm op, fr0, ar0, v, c, rm, r
+.macro test_itof_rm op, fr0, ar0, v, c, rm, r, sr
movi a2, \rm
wur a2, fcr
- test_itof_ex \op, \fr0, \ar0, \v, \c, \r
+ test_itof_ex \op, \fr0, \ar0, \v, \c, \r, \sr
movi a2, (\rm) | 0x7c
wur a2, fcr
- test_itof_ex \op, \fr0, \ar0, \v, \c, \r
+ test_itof_ex \op, \fr0, \ar0, \v, \c, \r, \sr
.endm
-.macro test_itof op, fr0, ar0, v, c, r0, r1, r2, r3
- test_itof_rm \op, \fr0, \ar0, \v, \c, 0, \r0
- test_itof_rm \op, \fr0, \ar0, \v, \c, 1, \r1
- test_itof_rm \op, \fr0, \ar0, \v, \c, 2, \r2
- test_itof_rm \op, \fr0, \ar0, \v, \c, 3, \r3
+.macro test_itof op, fr0, ar0, v, c, r0, r1, r2, r3, sr
+ test_itof_rm \op, \fr0, \ar0, \v, \c, 0, \r0, \sr
+ test_itof_rm \op, \fr0, \ar0, \v, \c, 1, \r1, \sr
+ test_itof_rm \op, \fr0, \ar0, \v, \c, 2, \r2, \sr
+ test_itof_rm \op, \fr0, \ar0, \v, \c, 3, \r3, \sr
.endm
test round_s
@@ -66,237 +77,237 @@ test round_s
wsr a2, cpenable
/* NaN */
- test_ftoi round.s, a2, f0, 0xffc00001, 0, 0x7fffffff
- test_ftoi round.s, a2, f0, 0xff800001, 0, 0x7fffffff
+ test_ftoi round.s, a2, f0, 0xffc00001, 0, 0x7fffffff, FSR_V
+ test_ftoi round.s, a2, f0, 0xff800001, 0, 0x7fffffff, FSR_V
/* -inf */
- test_ftoi round.s, a2, f0, 0xff800000, 0, 0x80000000
+ test_ftoi round.s, a2, f0, 0xff800000, 0, 0x80000000, FSR_V
/* negative overflow */
- test_ftoi round.s, a2, f0, 0xceffffff, 1, 0x80000000
- test_ftoi round.s, a2, f0, 0xcf000000, 0, 0x80000000
- test_ftoi round.s, a2, f0, 0xceffffff, 0, 0x80000080
+ test_ftoi round.s, a2, f0, 0xceffffff, 1, 0x80000000, FSR_V
+ test_ftoi round.s, a2, f0, 0xcf000000, 0, 0x80000000, FSR__
+ test_ftoi round.s, a2, f0, 0xceffffff, 0, 0x80000080, FSR__
/* negative */
- test_ftoi round.s, a2, f0, 0xbfa00000, 1, -2 /* -1.25 * 2 */
- test_ftoi round.s, a2, f0, 0xbfc00000, 0, -2 /* -1.5 */
- test_ftoi round.s, a2, f0, 0xbf800000, 1, -2 /* -1 * 2 */
- test_ftoi round.s, a2, f0, 0xbf800000, 0, -1 /* -1 */
- test_ftoi round.s, a2, f0, 0xbf400000, 0, -1 /* -0.75 */
- test_ftoi round.s, a2, f0, 0xbf000000, 0, 0 /* -0.5 */
+ test_ftoi round.s, a2, f0, 0xbfa00000, 1, -2, FSR_I /* -1.25 * 2 */
+ test_ftoi round.s, a2, f0, 0xbfc00000, 0, -2, FSR_I /* -1.5 */
+ test_ftoi round.s, a2, f0, 0xbf800000, 1, -2, FSR__ /* -1 * 2 */
+ test_ftoi round.s, a2, f0, 0xbf800000, 0, -1, FSR__ /* -1 */
+ test_ftoi round.s, a2, f0, 0xbf400000, 0, -1, FSR_I /* -0.75 */
+ test_ftoi round.s, a2, f0, 0xbf000000, 0, 0, FSR_I /* -0.5 */
/* positive */
- test_ftoi round.s, a2, f0, 0x3f000000, 0, 0 /* 0.5 */
- test_ftoi round.s, a2, f0, 0x3f400000, 0, 1 /* 0.75 */
- test_ftoi round.s, a2, f0, 0x3f800000, 0, 1 /* 1 */
- test_ftoi round.s, a2, f0, 0x3f800000, 1, 2 /* 1 * 2 */
- test_ftoi round.s, a2, f0, 0x3fc00000, 0, 2 /* 1.5 */
- test_ftoi round.s, a2, f0, 0x3fa00000, 1, 2 /* 1.25 * 2 */
+ test_ftoi round.s, a2, f0, 0x3f000000, 0, 0, FSR_I /* 0.5 */
+ test_ftoi round.s, a2, f0, 0x3f400000, 0, 1, FSR_I /* 0.75 */
+ test_ftoi round.s, a2, f0, 0x3f800000, 0, 1, FSR__ /* 1 */
+ test_ftoi round.s, a2, f0, 0x3f800000, 1, 2, FSR__ /* 1 * 2 */
+ test_ftoi round.s, a2, f0, 0x3fc00000, 0, 2, FSR_I /* 1.5 */
+ test_ftoi round.s, a2, f0, 0x3fa00000, 1, 2, FSR_I /* 1.25 * 2 */
/* positive overflow */
- test_ftoi round.s, a2, f0, 0x4effffff, 0, 0x7fffff80
- test_ftoi round.s, a2, f0, 0x4f000000, 0, 0x7fffffff
- test_ftoi round.s, a2, f0, 0x4effffff, 1, 0x7fffffff
+ test_ftoi round.s, a2, f0, 0x4effffff, 0, 0x7fffff80, FSR__
+ test_ftoi round.s, a2, f0, 0x4f000000, 0, 0x7fffffff, FSR_V
+ test_ftoi round.s, a2, f0, 0x4effffff, 1, 0x7fffffff, FSR_V
/* +inf */
- test_ftoi round.s, a2, f0, 0x7f800000, 0, 0x7fffffff
+ test_ftoi round.s, a2, f0, 0x7f800000, 0, 0x7fffffff, FSR_V
/* NaN */
- test_ftoi round.s, a2, f0, 0x7f800001, 0, 0x7fffffff
- test_ftoi round.s, a2, f0, 0x7fc00000, 0, 0x7fffffff
+ test_ftoi round.s, a2, f0, 0x7f800001, 0, 0x7fffffff, FSR_V
+ test_ftoi round.s, a2, f0, 0x7fc00000, 0, 0x7fffffff, FSR_V
test_end
test trunc_s
/* NaN */
- test_ftoi trunc.s, a2, f0, 0xffc00001, 0, 0x7fffffff
- test_ftoi trunc.s, a2, f0, 0xff800001, 0, 0x7fffffff
+ test_ftoi trunc.s, a2, f0, 0xffc00001, 0, 0x7fffffff, FSR_V
+ test_ftoi trunc.s, a2, f0, 0xff800001, 0, 0x7fffffff, FSR_V
/* -inf */
- test_ftoi trunc.s, a2, f0, 0xff800000, 0, 0x80000000
+ test_ftoi trunc.s, a2, f0, 0xff800000, 0, 0x80000000, FSR_V
/* negative overflow */
- test_ftoi trunc.s, a2, f0, 0xceffffff, 1, 0x80000000
- test_ftoi trunc.s, a2, f0, 0xcf000000, 0, 0x80000000
- test_ftoi trunc.s, a2, f0, 0xceffffff, 0, 0x80000080
+ test_ftoi trunc.s, a2, f0, 0xceffffff, 1, 0x80000000, FSR_V
+ test_ftoi trunc.s, a2, f0, 0xcf000000, 0, 0x80000000, FSR__
+ test_ftoi trunc.s, a2, f0, 0xceffffff, 0, 0x80000080, FSR__
/* negative */
- test_ftoi trunc.s, a2, f0, 0xbfa00000, 1, -2 /* -1.25 * 2 */
- test_ftoi trunc.s, a2, f0, 0xbfc00000, 0, -1 /* -1.5 */
- test_ftoi trunc.s, a2, f0, 0xbf800000, 1, -2 /* -1 * 2 */
- test_ftoi trunc.s, a2, f0, 0xbf800000, 0, -1 /* -1 */
- test_ftoi trunc.s, a2, f0, 0xbf400000, 0, 0 /* -0.75 */
- test_ftoi trunc.s, a2, f0, 0xbf000000, 0, 0 /* -0.5 */
+ test_ftoi trunc.s, a2, f0, 0xbfa00000, 1, -2, FSR_I /* -1.25 * 2 */
+ test_ftoi trunc.s, a2, f0, 0xbfc00000, 0, -1, FSR_I /* -1.5 */
+ test_ftoi trunc.s, a2, f0, 0xbf800000, 1, -2, FSR__ /* -1 * 2 */
+ test_ftoi trunc.s, a2, f0, 0xbf800000, 0, -1, FSR__ /* -1 */
+ test_ftoi trunc.s, a2, f0, 0xbf400000, 0, 0, FSR_I /* -0.75 */
+ test_ftoi trunc.s, a2, f0, 0xbf000000, 0, 0, FSR_I /* -0.5 */
/* positive */
- test_ftoi trunc.s, a2, f0, 0x3f000000, 0, 0 /* 0.5 */
- test_ftoi trunc.s, a2, f0, 0x3f400000, 0, 0 /* 0.75 */
- test_ftoi trunc.s, a2, f0, 0x3f800000, 0, 1 /* 1 */
- test_ftoi trunc.s, a2, f0, 0x3f800000, 1, 2 /* 1 * 2 */
- test_ftoi trunc.s, a2, f0, 0x3fc00000, 0, 1 /* 1.5 */
- test_ftoi trunc.s, a2, f0, 0x3fa00000, 1, 2 /* 1.25 * 2 */
+ test_ftoi trunc.s, a2, f0, 0x3f000000, 0, 0, FSR_I /* 0.5 */
+ test_ftoi trunc.s, a2, f0, 0x3f400000, 0, 0, FSR_I /* 0.75 */
+ test_ftoi trunc.s, a2, f0, 0x3f800000, 0, 1, FSR__ /* 1 */
+ test_ftoi trunc.s, a2, f0, 0x3f800000, 1, 2, FSR__ /* 1 * 2 */
+ test_ftoi trunc.s, a2, f0, 0x3fc00000, 0, 1, FSR_I /* 1.5 */
+ test_ftoi trunc.s, a2, f0, 0x3fa00000, 1, 2, FSR_I /* 1.25 * 2 */
/* positive overflow */
- test_ftoi trunc.s, a2, f0, 0x4effffff, 0, 0x7fffff80
- test_ftoi trunc.s, a2, f0, 0x4f000000, 0, 0x7fffffff
- test_ftoi trunc.s, a2, f0, 0x4effffff, 1, 0x7fffffff
+ test_ftoi trunc.s, a2, f0, 0x4effffff, 0, 0x7fffff80, FSR__
+ test_ftoi trunc.s, a2, f0, 0x4f000000, 0, 0x7fffffff, FSR_V
+ test_ftoi trunc.s, a2, f0, 0x4effffff, 1, 0x7fffffff, FSR_V
/* +inf */
- test_ftoi trunc.s, a2, f0, 0x7f800000, 0, 0x7fffffff
+ test_ftoi trunc.s, a2, f0, 0x7f800000, 0, 0x7fffffff, FSR_V
/* NaN */
- test_ftoi trunc.s, a2, f0, 0x7f800001, 0, 0x7fffffff
- test_ftoi trunc.s, a2, f0, 0x7fc00000, 0, 0x7fffffff
+ test_ftoi trunc.s, a2, f0, 0x7f800001, 0, 0x7fffffff, FSR_V
+ test_ftoi trunc.s, a2, f0, 0x7fc00000, 0, 0x7fffffff, FSR_V
test_end
test floor_s
/* NaN */
- test_ftoi floor.s, a2, f0, 0xffc00001, 0, 0x7fffffff
- test_ftoi floor.s, a2, f0, 0xff800001, 0, 0x7fffffff
+ test_ftoi floor.s, a2, f0, 0xffc00001, 0, 0x7fffffff, FSR_V
+ test_ftoi floor.s, a2, f0, 0xff800001, 0, 0x7fffffff, FSR_V
/* -inf */
- test_ftoi floor.s, a2, f0, 0xff800000, 0, 0x80000000
+ test_ftoi floor.s, a2, f0, 0xff800000, 0, 0x80000000, FSR_V
/* negative overflow */
- test_ftoi floor.s, a2, f0, 0xceffffff, 1, 0x80000000
- test_ftoi floor.s, a2, f0, 0xcf000000, 0, 0x80000000
- test_ftoi floor.s, a2, f0, 0xceffffff, 0, 0x80000080
+ test_ftoi floor.s, a2, f0, 0xceffffff, 1, 0x80000000, FSR_V
+ test_ftoi floor.s, a2, f0, 0xcf000000, 0, 0x80000000, FSR__
+ test_ftoi floor.s, a2, f0, 0xceffffff, 0, 0x80000080, FSR__
/* negative */
- test_ftoi floor.s, a2, f0, 0xbfa00000, 1, -3 /* -1.25 * 2 */
- test_ftoi floor.s, a2, f0, 0xbfc00000, 0, -2 /* -1.5 */
- test_ftoi floor.s, a2, f0, 0xbf800000, 1, -2 /* -1 * 2 */
- test_ftoi floor.s, a2, f0, 0xbf800000, 0, -1 /* -1 */
- test_ftoi floor.s, a2, f0, 0xbf400000, 0, -1 /* -0.75 */
- test_ftoi floor.s, a2, f0, 0xbf000000, 0, -1 /* -0.5 */
+ test_ftoi floor.s, a2, f0, 0xbfa00000, 1, -3, FSR_I /* -1.25 * 2 */
+ test_ftoi floor.s, a2, f0, 0xbfc00000, 0, -2, FSR_I /* -1.5 */
+ test_ftoi floor.s, a2, f0, 0xbf800000, 1, -2, FSR__ /* -1 * 2 */
+ test_ftoi floor.s, a2, f0, 0xbf800000, 0, -1, FSR__ /* -1 */
+ test_ftoi floor.s, a2, f0, 0xbf400000, 0, -1, FSR_I /* -0.75 */
+ test_ftoi floor.s, a2, f0, 0xbf000000, 0, -1, FSR_I /* -0.5 */
/* positive */
- test_ftoi floor.s, a2, f0, 0x3f000000, 0, 0 /* 0.5 */
- test_ftoi floor.s, a2, f0, 0x3f400000, 0, 0 /* 0.75 */
- test_ftoi floor.s, a2, f0, 0x3f800000, 0, 1 /* 1 */
- test_ftoi floor.s, a2, f0, 0x3f800000, 1, 2 /* 1 * 2 */
- test_ftoi floor.s, a2, f0, 0x3fc00000, 0, 1 /* 1.5 */
- test_ftoi floor.s, a2, f0, 0x3fa00000, 1, 2 /* 1.25 * 2 */
+ test_ftoi floor.s, a2, f0, 0x3f000000, 0, 0, FSR_I /* 0.5 */
+ test_ftoi floor.s, a2, f0, 0x3f400000, 0, 0, FSR_I /* 0.75 */
+ test_ftoi floor.s, a2, f0, 0x3f800000, 0, 1, FSR__ /* 1 */
+ test_ftoi floor.s, a2, f0, 0x3f800000, 1, 2, FSR__ /* 1 * 2 */
+ test_ftoi floor.s, a2, f0, 0x3fc00000, 0, 1, FSR_I /* 1.5 */
+ test_ftoi floor.s, a2, f0, 0x3fa00000, 1, 2, FSR_I /* 1.25 * 2 */
/* positive overflow */
- test_ftoi floor.s, a2, f0, 0x4effffff, 0, 0x7fffff80
- test_ftoi floor.s, a2, f0, 0x4f000000, 0, 0x7fffffff
- test_ftoi floor.s, a2, f0, 0x4effffff, 1, 0x7fffffff
+ test_ftoi floor.s, a2, f0, 0x4effffff, 0, 0x7fffff80, FSR__
+ test_ftoi floor.s, a2, f0, 0x4f000000, 0, 0x7fffffff, FSR_V
+ test_ftoi floor.s, a2, f0, 0x4effffff, 1, 0x7fffffff, FSR_V
/* +inf */
- test_ftoi floor.s, a2, f0, 0x7f800000, 0, 0x7fffffff
+ test_ftoi floor.s, a2, f0, 0x7f800000, 0, 0x7fffffff, FSR_V
/* NaN */
- test_ftoi floor.s, a2, f0, 0x7f800001, 0, 0x7fffffff
- test_ftoi floor.s, a2, f0, 0x7fc00000, 0, 0x7fffffff
+ test_ftoi floor.s, a2, f0, 0x7f800001, 0, 0x7fffffff, FSR_V
+ test_ftoi floor.s, a2, f0, 0x7fc00000, 0, 0x7fffffff, FSR_V
test_end
test ceil_s
/* NaN */
- test_ftoi ceil.s, a2, f0, 0xffc00001, 0, 0x7fffffff
- test_ftoi ceil.s, a2, f0, 0xff800001, 0, 0x7fffffff
+ test_ftoi ceil.s, a2, f0, 0xffc00001, 0, 0x7fffffff, FSR_V
+ test_ftoi ceil.s, a2, f0, 0xff800001, 0, 0x7fffffff, FSR_V
/* -inf */
- test_ftoi ceil.s, a2, f0, 0xff800000, 0, 0x80000000
+ test_ftoi ceil.s, a2, f0, 0xff800000, 0, 0x80000000, FSR_V
/* negative overflow */
- test_ftoi ceil.s, a2, f0, 0xceffffff, 1, 0x80000000
- test_ftoi ceil.s, a2, f0, 0xcf000000, 0, 0x80000000
- test_ftoi ceil.s, a2, f0, 0xceffffff, 0, 0x80000080
+ test_ftoi ceil.s, a2, f0, 0xceffffff, 1, 0x80000000, FSR_V
+ test_ftoi ceil.s, a2, f0, 0xcf000000, 0, 0x80000000, FSR__
+ test_ftoi ceil.s, a2, f0, 0xceffffff, 0, 0x80000080, FSR__
/* negative */
- test_ftoi ceil.s, a2, f0, 0xbfa00000, 1, -2 /* -1.25 * 2 */
- test_ftoi ceil.s, a2, f0, 0xbfc00000, 0, -1 /* -1.5 */
- test_ftoi ceil.s, a2, f0, 0xbf800000, 1, -2 /* -1 * 2 */
- test_ftoi ceil.s, a2, f0, 0xbf800000, 0, -1 /* -1 */
- test_ftoi ceil.s, a2, f0, 0xbf400000, 0, 0 /* -0.75 */
- test_ftoi ceil.s, a2, f0, 0xbf000000, 0, 0 /* -0.5 */
+ test_ftoi ceil.s, a2, f0, 0xbfa00000, 1, -2, FSR_I /* -1.25 * 2 */
+ test_ftoi ceil.s, a2, f0, 0xbfc00000, 0, -1, FSR_I /* -1.5 */
+ test_ftoi ceil.s, a2, f0, 0xbf800000, 1, -2, FSR__ /* -1 * 2 */
+ test_ftoi ceil.s, a2, f0, 0xbf800000, 0, -1, FSR__ /* -1 */
+ test_ftoi ceil.s, a2, f0, 0xbf400000, 0, 0, FSR_I /* -0.75 */
+ test_ftoi ceil.s, a2, f0, 0xbf000000, 0, 0, FSR_I /* -0.5 */
/* positive */
- test_ftoi ceil.s, a2, f0, 0x3f000000, 0, 1 /* 0.5 */
- test_ftoi ceil.s, a2, f0, 0x3f400000, 0, 1 /* 0.75 */
- test_ftoi ceil.s, a2, f0, 0x3f800000, 0, 1 /* 1 */
- test_ftoi ceil.s, a2, f0, 0x3f800000, 1, 2 /* 1 * 2 */
- test_ftoi ceil.s, a2, f0, 0x3fc00000, 0, 2 /* 1.5 */
- test_ftoi ceil.s, a2, f0, 0x3fa00000, 1, 3 /* 1.25 * 2 */
+ test_ftoi ceil.s, a2, f0, 0x3f000000, 0, 1, FSR_I /* 0.5 */
+ test_ftoi ceil.s, a2, f0, 0x3f400000, 0, 1, FSR_I /* 0.75 */
+ test_ftoi ceil.s, a2, f0, 0x3f800000, 0, 1, FSR__ /* 1 */
+ test_ftoi ceil.s, a2, f0, 0x3f800000, 1, 2, FSR__ /* 1 * 2 */
+ test_ftoi ceil.s, a2, f0, 0x3fc00000, 0, 2, FSR_I /* 1.5 */
+ test_ftoi ceil.s, a2, f0, 0x3fa00000, 1, 3, FSR_I /* 1.25 * 2 */
/* positive overflow */
- test_ftoi ceil.s, a2, f0, 0x4effffff, 0, 0x7fffff80
- test_ftoi ceil.s, a2, f0, 0x4f000000, 0, 0x7fffffff
- test_ftoi ceil.s, a2, f0, 0x4effffff, 1, 0x7fffffff
+ test_ftoi ceil.s, a2, f0, 0x4effffff, 0, 0x7fffff80, FSR__
+ test_ftoi ceil.s, a2, f0, 0x4f000000, 0, 0x7fffffff, FSR_V
+ test_ftoi ceil.s, a2, f0, 0x4effffff, 1, 0x7fffffff, FSR_V
/* +inf */
- test_ftoi ceil.s, a2, f0, 0x7f800000, 0, 0x7fffffff
+ test_ftoi ceil.s, a2, f0, 0x7f800000, 0, 0x7fffffff, FSR_V
/* NaN */
- test_ftoi ceil.s, a2, f0, 0x7f800001, 0, 0x7fffffff
- test_ftoi ceil.s, a2, f0, 0x7fc00000, 0, 0x7fffffff
+ test_ftoi ceil.s, a2, f0, 0x7f800001, 0, 0x7fffffff, FSR_V
+ test_ftoi ceil.s, a2, f0, 0x7fc00000, 0, 0x7fffffff, FSR_V
test_end
test utrunc_s
/* NaN */
- test_ftoi utrunc.s, a2, f0, 0xffc00001, 0, 0xffffffff
- test_ftoi utrunc.s, a2, f0, 0xff800001, 0, 0xffffffff
+ test_ftoi utrunc.s, a2, f0, 0xffc00001, 0, 0xffffffff, FSR_V
+ test_ftoi utrunc.s, a2, f0, 0xff800001, 0, 0xffffffff, FSR_V
/* -inf */
- test_ftoi utrunc.s, a2, f0, 0xff800000, 0, 0x80000000
+ test_ftoi utrunc.s, a2, f0, 0xff800000, 0, 0x80000000, FSR_V
/* negative overflow */
- test_ftoi utrunc.s, a2, f0, 0xceffffff, 1, 0x80000000
- test_ftoi utrunc.s, a2, f0, 0xcf000000, 0, 0x80000000
- test_ftoi utrunc.s, a2, f0, 0xceffffff, 0, 0x80000080
+ test_ftoi utrunc.s, a2, f0, 0xceffffff, 1, 0x80000000, FSR_V
+ test_ftoi utrunc.s, a2, f0, 0xcf000000, 0, 0x80000000, FSR_V
+ test_ftoi utrunc.s, a2, f0, 0xceffffff, 0, 0x80000080, FSR_V
/* negative */
- test_ftoi utrunc.s, a2, f0, 0xbfa00000, 1, -2 /* -1.25 * 2 */
- test_ftoi utrunc.s, a2, f0, 0xbfc00000, 0, -1 /* -1.5 */
- test_ftoi utrunc.s, a2, f0, 0xbf800000, 1, -2 /* -1 * 2 */
- test_ftoi utrunc.s, a2, f0, 0xbf800000, 0, -1 /* -1 */
- test_ftoi utrunc.s, a2, f0, 0xbf400000, 0, 0 /* -0.75 */
- test_ftoi utrunc.s, a2, f0, 0xbf000000, 0, 0 /* -0.5 */
+ test_ftoi utrunc.s, a2, f0, 0xbfa00000, 1, -2, FSR_V /* -1.25 * 2 */
+ test_ftoi utrunc.s, a2, f0, 0xbfc00000, 0, -1, FSR_V /* -1.5 */
+ test_ftoi utrunc.s, a2, f0, 0xbf800000, 1, -2, FSR_V /* -1 * 2 */
+ test_ftoi utrunc.s, a2, f0, 0xbf800000, 0, -1, FSR_V /* -1 */
+ test_ftoi utrunc.s, a2, f0, 0xbf400000, 0, 0, FSR_I /* -0.75 */
+ test_ftoi utrunc.s, a2, f0, 0xbf000000, 0, 0, FSR_I /* -0.5 */
/* positive */
- test_ftoi utrunc.s, a2, f0, 0x3f000000, 0, 0 /* 0.5 */
- test_ftoi utrunc.s, a2, f0, 0x3f400000, 0, 0 /* 0.75 */
- test_ftoi utrunc.s, a2, f0, 0x3f800000, 0, 1 /* 1 */
- test_ftoi utrunc.s, a2, f0, 0x3f800000, 1, 2 /* 1 * 2 */
- test_ftoi utrunc.s, a2, f0, 0x3fc00000, 0, 1 /* 1.5 */
- test_ftoi utrunc.s, a2, f0, 0x3fa00000, 1, 2 /* 1.25 * 2 */
+ test_ftoi utrunc.s, a2, f0, 0x3f000000, 0, 0, FSR_I /* 0.5 */
+ test_ftoi utrunc.s, a2, f0, 0x3f400000, 0, 0, FSR_I /* 0.75 */
+ test_ftoi utrunc.s, a2, f0, 0x3f800000, 0, 1, FSR__ /* 1 */
+ test_ftoi utrunc.s, a2, f0, 0x3f800000, 1, 2, FSR__ /* 1 * 2 */
+ test_ftoi utrunc.s, a2, f0, 0x3fc00000, 0, 1, FSR_I /* 1.5 */
+ test_ftoi utrunc.s, a2, f0, 0x3fa00000, 1, 2, FSR_I /* 1.25 * 2 */
/* positive overflow */
- test_ftoi utrunc.s, a2, f0, 0x4effffff, 0, 0x7fffff80
- test_ftoi utrunc.s, a2, f0, 0x4f000000, 0, 0x80000000
- test_ftoi utrunc.s, a2, f0, 0x4effffff, 1, 0xffffff00
- test_ftoi utrunc.s, a2, f0, 0x4f800000, 1, 0xffffffff
+ test_ftoi utrunc.s, a2, f0, 0x4effffff, 0, 0x7fffff80, FSR__
+ test_ftoi utrunc.s, a2, f0, 0x4f000000, 0, 0x80000000, FSR__
+ test_ftoi utrunc.s, a2, f0, 0x4effffff, 1, 0xffffff00, FSR__
+ test_ftoi utrunc.s, a2, f0, 0x4f800000, 1, 0xffffffff, FSR_V
/* +inf */
- test_ftoi utrunc.s, a2, f0, 0x7f800000, 0, 0xffffffff
+ test_ftoi utrunc.s, a2, f0, 0x7f800000, 0, 0xffffffff, FSR_V
/* NaN */
- test_ftoi utrunc.s, a2, f0, 0x7f800001, 0, 0xffffffff
- test_ftoi utrunc.s, a2, f0, 0x7fc00000, 0, 0xffffffff
+ test_ftoi utrunc.s, a2, f0, 0x7f800001, 0, 0xffffffff, FSR_V
+ test_ftoi utrunc.s, a2, f0, 0x7fc00000, 0, 0xffffffff, FSR_V
test_end
test float_s
test_itof float.s, f0, a2, -1, 0, \
- 0xbf800000, 0xbf800000, 0xbf800000, 0xbf800000
- test_itof float.s, f0, a2, 0, 0, 0, 0, 0, 0
+ 0xbf800000, 0xbf800000, 0xbf800000, 0xbf800000, FSR__
+ test_itof float.s, f0, a2, 0, 0, 0, 0, 0, 0, FSR__
test_itof float.s, f0, a2, 1, 1, \
- 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
+ 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, FSR__
test_itof float.s, f0, a2, 1, 0, \
- 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
+ 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, FSR__
test_itof float.s, f0, a2, 0x7fffffff, 0, \
- 0x4f000000, 0x4effffff, 0x4f000000, 0x4effffff
+ 0x4f000000, 0x4effffff, 0x4f000000, 0x4effffff, FSR_I
test_end
test ufloat_s
- test_itof ufloat.s, f0, a2, 0, 0, 0, 0, 0, 0
+ test_itof ufloat.s, f0, a2, 0, 0, 0, 0, 0, 0, FSR__
test_itof ufloat.s, f0, a2, 1, 1, \
- 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
+ 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, FSR__
test_itof ufloat.s, f0, a2, 1, 0, \
- 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
+ 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, FSR__
test_itof ufloat.s, f0, a2, 0x7fffffff, 0, \
- 0x4f000000, 0x4effffff, 0x4f000000, 0x4effffff
+ 0x4f000000, 0x4effffff, 0x4f000000, 0x4effffff, FSR_I
test_itof ufloat.s, f0, a2, 0xffffffff, 0, \
- 0x4f800000, 0x4f7fffff, 0x4f800000, 0x4f7fffff
+ 0x4f800000, 0x4f7fffff, 0x4f800000, 0x4f7fffff, FSR_I
test_end
#endif
diff --git a/tests/tcg/xtensa/test_fp0_div.S b/tests/tcg/xtensa/test_fp0_div.S
new file mode 100644
index 0000000..c3e7ad7
--- /dev/null
+++ b/tests/tcg/xtensa/test_fp0_div.S
@@ -0,0 +1,82 @@
+#include "macros.inc"
+#include "fpu.h"
+
+test_suite fp0_div
+
+#if XCHAL_HAVE_FP_DIV
+
+.macro divs_seq q, a, b, r, y, y0, an, bn, e, ex
+ div0.s \y0, \b
+ nexp01.s \bn, \b
+ const.s \e, 1
+ maddn.s \e, \bn, \y0
+ mov.s \y, \y0
+ mov.s \ex, \b
+ nexp01.s \an, \a
+ maddn.s \y, \e, \y0
+ const.s \e, 1
+ const.s \q, 0
+ neg.s \r, \an
+ maddn.s \e, \bn, \y
+ maddn.s \q, \r, \y0
+ mkdadj.s \ex, \a
+ maddn.s \y, \e, \y
+ maddn.s \r, \bn, \q
+ const.s \e, 1
+ maddn.s \e, \bn, \y
+ maddn.s \q, \r, \y
+ neg.s \r, \an
+ maddn.s \y, \e, \y
+ maddn.s \r, \bn, \q
+ addexpm.s \q, \ex
+ addexp.s \y, \ex
+ divn.s \q, \r, \y
+.endm
+
+.macro div_s fr0, fr1, fr2
+ divs_seq \fr0, \fr1, \fr2, f9, f10, f11, f12, f13, f14, f15
+.endm
+
+.macro movfp fr, v
+ movi a2, \v
+ wfr \fr, a2
+.endm
+
+.macro check_res fr, r, sr
+ rfr a2, \fr
+ dump a2
+ movi a3, \r
+ assert eq, a2, a3
+ rur a2, fsr
+ movi a3, \sr
+ assert eq, a2, a3
+.endm
+
+test div_s
+ movi a2, 1
+ wsr a2, cpenable
+
+ test_op2 div_s, f0, f1, f2, 0x40000000, 0x40400000, \
+ 0x3f2aaaab, 0x3f2aaaaa, 0x3f2aaaab, 0x3f2aaaaa, \
+ FSR_I, FSR_I, FSR_I, FSR_I
+ test_op2 div_s, f3, f4, f5, F32_1, F32_0, \
+ F32_PINF, F32_PINF, F32_PINF, F32_PINF, \
+ FSR_Z, FSR_Z, FSR_Z, FSR_Z
+ test_op2 div_s, f6, f7, f8, F32_0, F32_0, \
+ F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+
+ /* MAX_FLOAT / 0.5 = +inf/MAX_FLOAT */
+ test_op2 div_s, f0, f1, f2, F32_MAX, F32_0_5, \
+ F32_PINF, F32_MAX, F32_PINF, F32_MAX, \
+ FSR_OI, FSR_OI, FSR_OI, FSR_OI
+
+ /* 0.5 / MAX_FLOAT = denorm */
+ test_op2 div_s, f0, f1, f2, F32_0_5, F32_MAX, \
+ 0x00100000, 0x00100000, 0x00100001, 0x00100000, \
+ FSR_UI, FSR_UI, FSR_UI, FSR_UI
+test_end
+
+#endif
+
+test_suite_end
diff --git a/tests/tcg/xtensa/test_fp0_sqrt.S b/tests/tcg/xtensa/test_fp0_sqrt.S
new file mode 100644
index 0000000..585973d
--- /dev/null
+++ b/tests/tcg/xtensa/test_fp0_sqrt.S
@@ -0,0 +1,76 @@
+#include "macros.inc"
+#include "fpu.h"
+
+test_suite fp0_sqrt
+
+#if XCHAL_HAVE_FP_SQRT
+
+.macro sqrt_seq r, a, y, t1, hn, h2, t5, h
+ sqrt0.s \y, \a
+ const.s \t1, 0
+ maddn.s \t1, \y, \y
+ nexp01.s \hn, \a
+ const.s \r, 3
+ addexp.s \hn, \r
+ maddn.s \r, \t1, \hn
+ nexp01.s \t1, \a
+ neg.s \h2, \t1
+ maddn.s \y, \r, \y
+ const.s \r, 0
+ const.s \t5, 0
+ const.s \h, 0
+ maddn.s \r, \h2, \y
+ maddn.s \t5, \y, \hn
+ const.s \hn, 3
+ maddn.s \h, \hn, \y
+ maddn.s \t1, \r, \r
+ maddn.s \hn, \t5, \y
+ neg.s \y, \h
+ maddn.s \r, \t1, \y
+ maddn.s \h, \hn, \h
+ mksadj.s \y, \a
+ nexp01.s \a, \a
+ maddn.s \a, \r, \r
+ neg.s \t1, \h
+ addexpm.s \r, \y
+ addexp.s \t1, \y
+ divn.s \r, \a, \t1
+.endm
+
+.macro sqrt_s fr0, fr1
+ sqrt_seq \fr0, \fr1, f10, f11, f12, f13, f14, f15
+.endm
+
+.macro movfp fr, v
+ movi a2, \v
+ wfr \fr, a2
+.endm
+
+.macro check_res fr, r, sr
+ rfr a2, \fr
+ dump a2
+ movi a3, \r
+ assert eq, a2, a3
+ rur a2, fsr
+ movi a3, \sr
+ assert eq, a2, a3
+.endm
+
+test sqrt_s
+ movi a2, 1
+ wsr a2, cpenable
+
+ test_op1 sqrt_s, f0, f1, 0x40000000, \
+ 0x3fb504f3, 0x3fb504f3, 0x3fb504f4, 0x3fb504f3, \
+ FSR_I, FSR_I, FSR_I, FSR_I
+ test_op1 sqrt_s, f3, f4, F32_1, \
+ F32_1, F32_1, F32_1, F32_1, \
+ FSR__, FSR__, FSR__, FSR__
+ test_op1 sqrt_s, f6, f7, F32_MINUS | F32_1, \
+ F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+
+#endif
+
+test_suite_end
diff --git a/tests/tcg/xtensa/test_fp1.S b/tests/tcg/xtensa/test_fp1.S
index 6e182e5..77336a3 100644
--- a/tests/tcg/xtensa/test_fp1.S
+++ b/tests/tcg/xtensa/test_fp1.S
@@ -1,4 +1,5 @@
#include "macros.inc"
+#include "fpu.h"
test_suite fp1
@@ -9,7 +10,7 @@ test_suite fp1
wfr \fr, a2
.endm
-.macro test_ord_ex op, br, fr0, fr1, v0, v1, r
+.macro test_ord_ex op, br, fr0, fr1, v0, v1, r, sr
movi a2, 0
wur a2, fsr
movfp \fr0, \v0
@@ -20,65 +21,70 @@ test_suite fp1
movt a2, a3, \br
assert eqi, a2, \r
rur a2, fsr
+#if DFPU
+ movi a3, \sr
+ assert eq, a2, a3
+#else
assert eqi, a2, 0
+#endif
.endm
-.macro test_ord op, br, fr0, fr1, v0, v1, r
+.macro test_ord op, br, fr0, fr1, v0, v1, r, sr
movi a2, 0
wur a2, fcr
- test_ord_ex \op, \br, \fr0, \fr1, \v0, \v1, \r
+ test_ord_ex \op, \br, \fr0, \fr1, \v0, \v1, \r, \sr
movi a2, 0x7c
wur a2, fcr
- test_ord_ex \op, \br, \fr0, \fr1, \v0, \v1, \r
+ test_ord_ex \op, \br, \fr0, \fr1, \v0, \v1, \r, \sr
.endm
-.macro test_ord_all op, aa, ab, ba, aPI, PIa, aN, Na, II, IN, NI
- test_ord \op b0, f0, f1, 0x3f800000, 0x3f800000, \aa
- test_ord \op b1, f2, f3, 0x3f800000, 0x3fc00000, \ab
- test_ord \op b2, f4, f5, 0x3fc00000, 0x3f800000, \ba
- test_ord \op b3, f6, f7, 0x3f800000, 0x7f800000, \aPI
- test_ord \op b4, f8, f9, 0x7f800000, 0x3f800000, \PIa
- test_ord \op b5, f10, f11, 0x3f800000, 0xffc00001, \aN
- test_ord \op b6, f12, f13, 0x3f800000, 0xff800001, \aN
- test_ord \op b7, f14, f15, 0x3f800000, 0x7f800001, \aN
- test_ord \op b8, f0, f1, 0x3f800000, 0x7fc00000, \aN
- test_ord \op b9, f2, f3, 0xffc00001, 0x3f800000, \Na
- test_ord \op b10, f4, f5, 0xff800001, 0x3f800000, \Na
- test_ord \op b11, f6, f7, 0x7f800001, 0x3f800000, \Na
- test_ord \op b12, f8, f9, 0x7fc00000, 0x3f800000, \Na
- test_ord \op b13, f10, f11, 0x7f800000, 0x7f800000, \II
- test_ord \op b14, f12, f13, 0x7f800000, 0x7fc00000, \IN
- test_ord \op b15, f14, f15, 0x7fc00000, 0x7f800000, \NI
+.macro test_ord_all op, aa, ab, ba, aPI, PIa, aN, Na, II, IN, NI, qnan_sr
+ test_ord \op b0, f0, f1, 0x3f800000, 0x3f800000, \aa, FSR__ /* ord == ord */
+ test_ord \op b1, f2, f3, 0x3f800000, 0x3fc00000, \ab, FSR__ /* ord < ord */
+ test_ord \op b2, f4, f5, 0x3fc00000, 0x3f800000, \ba, FSR__ /* ord > ord */
+ test_ord \op b3, f6, f7, 0x3f800000, 0x7f800000, \aPI, FSR__ /* ord +INF */
+ test_ord \op b4, f8, f9, 0x7f800000, 0x3f800000, \PIa, FSR__ /* +INF ord */
+ test_ord \op b5, f10, f11, 0x3f800000, 0xffc00001, \aN, \qnan_sr /* ord -QNaN */
+ test_ord \op b6, f12, f13, 0x3f800000, 0xff800001, \aN, FSR_V /* ord -SNaN */
+ test_ord \op b7, f14, f15, 0x3f800000, 0x7f800001, \aN, FSR_V /* ord +SNaN */
+ test_ord \op b8, f0, f1, 0x3f800000, 0x7fc00000, \aN, \qnan_sr /* ord +QNaN */
+ test_ord \op b9, f2, f3, 0xffc00001, 0x3f800000, \Na, \qnan_sr /* -QNaN ord */
+ test_ord \op b10, f4, f5, 0xff800001, 0x3f800000, \Na, FSR_V /* -SNaN ord */
+ test_ord \op b11, f6, f7, 0x7f800001, 0x3f800000, \Na, FSR_V /* +SNaN ord */
+ test_ord \op b12, f8, f9, 0x7fc00000, 0x3f800000, \Na, \qnan_sr /* +QNaN ord */
+ test_ord \op b13, f10, f11, 0x7f800000, 0x7f800000, \II, FSR__ /* +INF +INF */
+ test_ord \op b14, f12, f13, 0x7f800000, 0x7fc00000, \IN, \qnan_sr /* +INF +QNaN */
+ test_ord \op b15, f14, f15, 0x7fc00000, 0x7f800000, \NI, \qnan_sr /* +QNaN +INF */
.endm
test un_s
movi a2, 1
wsr a2, cpenable
- test_ord_all un.s, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1
+ test_ord_all un.s, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, FSR__
test_end
test oeq_s
- test_ord_all oeq.s, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0
+ test_ord_all oeq.s, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, FSR__
test_end
test ueq_s
- test_ord_all ueq.s, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1
+ test_ord_all ueq.s, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, FSR__
test_end
test olt_s
- test_ord_all olt.s, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0
+ test_ord_all olt.s, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, FSR_V
test_end
test ult_s
- test_ord_all ult.s, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1
+ test_ord_all ult.s, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, FSR__
test_end
test ole_s
- test_ord_all ole.s, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0
+ test_ord_all ole.s, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, FSR_V
test_end
test ule_s
- test_ord_all ule.s, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1
+ test_ord_all ule.s, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, FSR__
test_end
.macro test_cond op, fr0, fr1, cr, v0, v1, r
diff --git a/tests/tcg/xtensa/test_lsc.S b/tests/tcg/xtensa/test_lsc.S
index 0578bf1..348822b 100644
--- a/tests/tcg/xtensa/test_lsc.S
+++ b/tests/tcg/xtensa/test_lsc.S
@@ -1,4 +1,5 @@
#include "macros.inc"
+#include "fpu.h"
test_suite lsc
@@ -9,9 +10,14 @@ test lsi
wsr a2, cpenable
movi a2, 1f
- lsi f0, a2, 0
lsi f1, a2, 4
+#if DFPU
+ lsi f2, a2, 8
+ lsip f0, a2, 8
+#else
+ lsi f0, a2, 0
lsiu f2, a2, 8
+#endif
movi a3, 1f + 8
assert eq, a2, a3
rfr a2, f0
@@ -34,13 +40,18 @@ test ssi
movi a2, 1f
movi a3, 0x40800000
wfr f3, a3
- ssi f3, a2, 0
movi a3, 0x40a00000
wfr f4, a3
- ssi f4, a2, 4
movi a3, 0x40c00000
wfr f5, a3
+ ssi f4, a2, 4
+#if DFPU
+ ssi f5, a2, 8
+ ssip f3, a2, 8
+#else
+ ssi f3, a2, 0
ssiu f5, a2, 8
+#endif
movi a3, 1f + 8
assert eq, a2, a3
l32i a4, a2, -8
@@ -62,11 +73,16 @@ test_end
test lsx
movi a2, 1f
movi a3, 0
+ movi a4, 4
+ movi a5, 8
+ lsx f7, a2, a4
+#if DFPU
+ lsx f8, a2, a5
+ lsxp f6, a2, a5
+#else
lsx f6, a2, a3
- movi a3, 4
- lsx f7, a2, a3
- movi a3, 8
- lsxu f8, a2, a3
+ lsxu f8, a2, a5
+#endif
movi a3, 1f + 8
assert eq, a2, a3
rfr a2, f6
@@ -87,18 +103,23 @@ test_end
test ssx
movi a2, 1f
- movi a3, 0
movi a4, 0x41200000
wfr f9, a4
- ssx f9, a2, a3
- movi a3, 4
movi a4, 0x41300000
wfr f10, a4
- ssx f10, a2, a3
- movi a3, 8
movi a4, 0x41400000
wfr f11, a4
- ssxu f11, a2, a3
+ movi a3, 0
+ movi a4, 4
+ movi a5, 8
+ ssx f10, a2, a4
+#if DFPU
+ ssx f11, a2, a5
+ ssxp f9, a2, a5
+#else
+ ssx f9, a2, a3
+ ssxu f11, a2, a5
+#endif
movi a3, 1f + 8
assert eq, a2, a3
l32i a4, a2, -8
@@ -119,4 +140,127 @@ test_end
#endif
+#if XCHAL_HAVE_DFP
+
+#if XCHAL_HAVE_BE
+#define F64_HIGH_OFF 0
+#else
+#define F64_HIGH_OFF 4
+#endif
+
+.macro movdf fr, hi, lo
+ movi a2, \hi
+ movi a3, \lo
+ wfrd \fr, a2, a3
+.endm
+
+test ldi
+ movi a2, 1
+ wsr a2, cpenable
+
+ movi a2, 1f
+ ldi f1, a2, 8
+ ldi f2, a2, 16
+ ldip f0, a2, 16
+ movi a3, 1f + 16
+ assert eq, a2, a3
+ rfrd a2, f0
+ movi a3, 0x3ff00000
+ assert eq, a2, a3
+ rfrd a2, f1
+ movi a3, 0x40000000
+ assert eq, a2, a3
+ rfrd a2, f2
+ movi a3, 0x40080000
+ assert eq, a2, a3
+.data
+ .align 8
+1:
+.double 1, 2, 3
+.text
+test_end
+
+test sdi
+ movdf f3, 0x40800000, 0
+ movdf f4, 0x40a00000, 0
+ movdf f5, 0x40c00000, 0
+ movi a2, 1f
+ sdi f4, a2, 8
+ sdi f5, a2, 16
+ sdip f3, a2, 16
+ movi a3, 1f + 16
+ assert eq, a2, a3
+ l32i a4, a2, -16 + F64_HIGH_OFF
+ movi a3, 0x40800000
+ assert eq, a4, a3
+ l32i a4, a2, -8 + F64_HIGH_OFF
+ movi a3, 0x40a00000
+ assert eq, a4, a3
+ l32i a4, a2, F64_HIGH_OFF
+ movi a3, 0x40c00000
+ assert eq, a4, a3
+.data
+ .align 8
+1:
+.double 0, 0, 0
+.text
+test_end
+
+test ldx
+ movi a2, 1f
+ movi a3, 0
+ movi a4, 8
+ movi a5, 16
+ ldx f7, a2, a4
+ ldx f8, a2, a5
+ ldxp f6, a2, a5
+ movi a3, 1f + 16
+ assert eq, a2, a3
+ rfrd a2, f6
+ movi a3, 0x401c0000
+ assert eq, a2, a3
+ rfrd a2, f7
+ movi a3, 0x40200000
+ assert eq, a2, a3
+ rfrd a2, f8
+ movi a3, 0x40220000
+ assert eq, a2, a3
+.data
+ .align 8
+1:
+.double 7, 8, 9
+.text
+test_end
+
+test sdx
+ movdf f9, 0x41200000, 0
+ movdf f10, 0x41300000, 0
+ movdf f11, 0x41400000, 0
+ movi a2, 1f
+ movi a3, 0
+ movi a4, 8
+ movi a5, 16
+ sdx f10, a2, a4
+ sdx f11, a2, a5
+ sdxp f9, a2, a5
+ movi a3, 1f + 16
+ assert eq, a2, a3
+ l32i a4, a2, -16 + F64_HIGH_OFF
+ movi a3, 0x41200000
+ assert eq, a4, a3
+ l32i a4, a2, -8 + F64_HIGH_OFF
+ movi a3, 0x41300000
+ assert eq, a4, a3
+ l32i a4, a2, F64_HIGH_OFF
+ movi a3, 0x41400000
+ assert eq, a4, a3
+.data
+ .align 8
+1:
+.double 0, 0, 0
+.text
+test_end
+
+#endif
+
test_suite_end