aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2020-07-04 10:16:22 -0700
committerMax Filippov <jcmvbkbc@gmail.com>2020-08-21 12:48:15 -0700
commitac81ff227d264b29b8b75068a75d047e2d736d6f (patch)
tree561a678804d35f4da36e13782664f60150f6d4e6 /tests
parente95ef43181b49a401d163729d0d96f304525dfe7 (diff)
downloadqemu-ac81ff227d264b29b8b75068a75d047e2d736d6f.zip
qemu-ac81ff227d264b29b8b75068a75d047e2d736d6f.tar.gz
qemu-ac81ff227d264b29b8b75068a75d047e2d736d6f.tar.bz2
tests/tcg/xtensa: expand madd tests
Test that madd doesn't do rounding after multiplication. Test NaN propagation rules for FPU2000 and DFPU madd opcode. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/tcg/xtensa/test_fp0_arith.S104
1 files changed, 104 insertions, 0 deletions
diff --git a/tests/tcg/xtensa/test_fp0_arith.S b/tests/tcg/xtensa/test_fp0_arith.S
index df870eb..7eefc1d 100644
--- a/tests/tcg/xtensa/test_fp0_arith.S
+++ b/tests/tcg/xtensa/test_fp0_arith.S
@@ -146,6 +146,110 @@ test madd_s
FSR_I, FSR_I, FSR_I, FSR_I
test_end
+test madd_s_precision
+ test_op3 madd.s, f0, f1, f2, f0, 0xbf800002, 0x3f800001, 0x3f800001, \
+ 0x28800000, 0x28800000, 0x28800000, 0x28800000, \
+ FSR__, FSR__, FSR__, FSR__
+test_end
+
+#if DFPU
+test madd_s_nan_dfpu
+ /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_1, \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_1, F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_QNAN(3), \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* inf * 0 = default NaN */
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_PINF, F32_0, \
+ F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* inf * 0 + SNaN1 = QNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_PINF, F32_0, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ /* inf * 0 + QNaN1 = QNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_PINF, F32_0, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+
+ /* madd/msub SNaN turns to QNaN and sets Invalid flag */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_1, F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_SNAN(2), F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR_V, FSR_V, FSR_V, FSR_V
+test_end
+#else
+test madd_s_nan_fpu2k
+ /* FPU2000 madd/msub NaN1, NaN2, NaN3 priority: NaN2, NaN3, NaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_1, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_1, \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_1, F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_1, \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_QNAN(3), \
+ F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_QNAN(3), \
+ F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* inf * 0 = default NaN */
+ test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_PINF, F32_0, \
+ F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \
+ FSR__, FSR__, FSR__, FSR__
+ /* inf * 0 + SNaN1 = SNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_PINF, F32_0, \
+ F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ /* inf * 0 + QNaN1 = QNaN1 */
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_PINF, F32_0, \
+ F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+
+ /* madd/msub SNaN is preserved */
+ test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_1, F32_1, \
+ F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), \
+ FSR__, FSR__, FSR__, FSR__
+ test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_SNAN(2), F32_1, \
+ F32_SNAN(2), F32_SNAN(2), F32_SNAN(2), F32_SNAN(2), \
+ FSR__, FSR__, FSR__, FSR__
+test_end
+#endif
+
test msub_s
test_op3 msub.s, f0, f1, f2, f0, 0x3f800000, 0x3f800001, 0x3f800001, \
0xb4800000, 0xb4800000, 0xb4800000, 0xb4800001, \