aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>2021-01-19 15:36:55 +0000
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2021-01-19 17:25:42 +0000
commit763b865a17d32ff1d19720e333999de25132d3d4 (patch)
treebd87505e1687b9d8bfcf8892f1c65d9bfc33911b
parent8227106f5668c8fb1f0c5d2026e44cc0b84ee991 (diff)
downloadgcc-763b865a17d32ff1d19720e333999de25132d3d4.zip
gcc-763b865a17d32ff1d19720e333999de25132d3d4.tar.gz
gcc-763b865a17d32ff1d19720e333999de25132d3d4.tar.bz2
aarch64: Remove testing of saturation cumulative QC bit
Since we don't guarantee the ordering of the QC flag in FPSR in the saturation intrinsics, we shouldn't be testing for it. I want to relax the flags for some of the builtins to enable more optimisation but that triggers the QC flag tests in advsimd-intrinsics.exp. We don't implement the saturation flag access intrinsics in aarch64 anyway and we don't want to. gcc/testsuite/ChangeLog: * gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h (CHECK_CUMULATIVE_SAT): Delete. (CHECK_CUMULATIVE_SAT_NAMED): Likewise. Deleted related variables. * gcc.target/aarch64/advsimd-intrinsics/binary_sat_op.inc: Remove uses of the above. * gcc.target/aarch64/advsimd-intrinsics/unary_sat_op.inc: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqabs.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqadd.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlXl.inc: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_lane.inc: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_n.inc: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlal.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlal_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlal_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlsl.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmulh.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmulh_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmulh_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmull.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmull_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqdmull_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqmovn.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqmovun.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqneg.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh.inc: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh_lane.inc: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmlah.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmlah_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrshl.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqshl.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c: Likewise. * gcc.target/aarch64/advsimd-intrinsics/vqsub.c: Likewise.
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h67
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_sat_op.inc43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/unary_sat_op.inc21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqabs.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqadd.c91
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl.inc17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_lane.inc21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_n.inc17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_lane.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_n.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_lane.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_n.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_lane.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_n.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_lane.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_n.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqneg.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh.inc42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh_lane.inc45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah_lane.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh_lane.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c57
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c521
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c74
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c72
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c376
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c75
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c46
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqsub.c92
41 files changed, 753 insertions, 1786 deletions
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
index 61fe7e7..6f4d62b 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
@@ -356,73 +356,6 @@ static volatile int __read_neon_cumulative_sat (void) {
}
#endif
-/* Declare expected cumulative saturation results, one for each
- size. They are defined and initialized in relevant test files. */
-extern int VECT_VAR(expected_cumulative_sat, int, 8, 8);
-extern int VECT_VAR(expected_cumulative_sat, int, 16, 4);
-extern int VECT_VAR(expected_cumulative_sat, int, 32, 2);
-extern int VECT_VAR(expected_cumulative_sat, int, 64, 1);
-extern int VECT_VAR(expected_cumulative_sat, uint, 8, 8);
-extern int VECT_VAR(expected_cumulative_sat, uint, 16, 4);
-extern int VECT_VAR(expected_cumulative_sat, uint, 32, 2);
-extern int VECT_VAR(expected_cumulative_sat, uint, 64, 1);
-extern int VECT_VAR(expected_cumulative_sat, int, 8, 16);
-extern int VECT_VAR(expected_cumulative_sat, int, 16, 8);
-extern int VECT_VAR(expected_cumulative_sat, int, 32, 4);
-extern int VECT_VAR(expected_cumulative_sat, int, 64, 2);
-extern int VECT_VAR(expected_cumulative_sat, uint, 8, 16);
-extern int VECT_VAR(expected_cumulative_sat, uint, 16, 8);
-extern int VECT_VAR(expected_cumulative_sat, uint, 32, 4);
-extern int VECT_VAR(expected_cumulative_sat, uint, 64, 2);
-
-/* Check cumulative saturation flag vs expected value. */
-#define CHECK_CUMULATIVE_SAT(MSG,T,W,N,EXPECTED,COMMENT) \
- { \
- if (Neon_Cumulative_Sat != \
- VECT_VAR(EXPECTED, T, W, N)) { \
- fprintf(stderr, \
- "ERROR in %s (%s line %d in cumulative_sat '%s') at type %s: " \
- "got %d expected %d%s\n", \
- MSG, __FILE__, __LINE__, \
- STR(EXPECTED), \
- STR(VECT_NAME(T, W, N)), \
- Neon_Cumulative_Sat, \
- VECT_VAR(EXPECTED, T, W, N), \
- strlen(COMMENT) > 0 ? " " COMMENT : ""); \
- abort(); \
- } \
- fprintf(stderr, "CHECKED CUMULATIVE SAT %s %s\n", \
- STR(VECT_TYPE(T, W, N)), MSG); \
- }
-
-#define CHECK_CUMULATIVE_SAT_NAMED(test_name,EXPECTED,comment) \
- { \
- CHECK_CUMULATIVE_SAT(test_name, int, 8, 8, PRIx8, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, int, 16, 4, PRIx16, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, int, 32, 2, PRIx32, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, int, 64, 1, PRIx64, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 8, 8, PRIx8, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 16, 4, PRIx16, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 32, 2, PRIx32, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 64, 1, PRIx64, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, poly, 8, 8, PRIx8, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, poly, 16, 4, PRIx16, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT_FP(test_name, float, 32, 2, PRIx32, EXPECTED, comment); \
- \
- CHECK_CUMULATIVE_SAT(test_name, int, 8, 16, PRIx8, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, int, 16, 8, PRIx16, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, int, 32, 4, PRIx32, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, int, 64, 2, PRIx64, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 8, 16, PRIx8, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 16, 8, PRIx16, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 32, 4, PRIx32, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, uint, 64, 2, PRIx64, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, poly, 8, 16, PRIx8, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT(test_name, poly, 16, 8, PRIx16, EXPECTED, comment); \
- CHECK_CUMULATIVE_SAT_FP(test_name, float, 32, 4, PRIx32, EXPECTED, comment); \
- } \
-
-
/* Clean output buffers before execution. */
static void clean_results (void)
{
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_sat_op.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_sat_op.inc
index c91709f..58bd9df 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_sat_op.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_sat_op.inc
@@ -17,17 +17,16 @@ void FNNAME (INSN_NAME) (void)
{
/* vector_res = OP(vector1,vector2), then store the result. */
-#define TEST_BINARY_SAT_OP1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_BINARY_SAT_OP1(INSN, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_##T2##W(VECT_VAR(vector1, T1, W, N), \
VECT_VAR(vector2, T1, W, N)); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_BINARY_SAT_OP(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_BINARY_SAT_OP1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_BINARY_SAT_OP(INSN, Q, T1, T2, W, N, CMT) \
+ TEST_BINARY_SAT_OP1(INSN, Q, T1, T2, W, N, CMT)
DECL_VARIABLE_ALL_VARIANTS(vector1);
DECL_VARIABLE_ALL_VARIANTS(vector2);
@@ -58,23 +57,23 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, q, uint, u, 64, 2, 0x88);
/* Apply a saturating binary operator named INSN_NAME. */
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 8, 8, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 16, 4, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 32, 2, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 8, 8, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 16, 4, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 32, 2, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat, "");
-
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 8, 16, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 16, 8, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 32, 4, expected_cumulative_sat, "");
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 8, 8, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 16, 4, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 32, 2, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 8, 8, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 16, 4, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 32, 2, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, "");
+
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 8, 16, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 16, 8, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 32, 4, "");
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, "");
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, "");
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, "");
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/unary_sat_op.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/unary_sat_op.inc
index 30df2dd..e9f6809 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/unary_sat_op.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/unary_sat_op.inc
@@ -16,16 +16,15 @@
void FNNAME (INSN_NAME) (void)
{
/* y=OP(x), then store the result. */
-#define TEST_UNARY_SAT_OP1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_UNARY_SAT_OP1(INSN, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N)); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_UNARY_SAT_OP(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_UNARY_SAT_OP1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_UNARY_SAT_OP(INSN, Q, T1, T2, W, N, CMT) \
+ TEST_UNARY_SAT_OP1(INSN, Q, T1, T2, W, N, CMT)
/* No need for 64 bits variants. */
DECL_VARIABLE(vector, int, 8, 8);
@@ -53,12 +52,12 @@ void FNNAME (INSN_NAME) (void)
VLOAD(vector, buffer, q, int, s, 32, 4);
/* Apply a saturating unary operator named INSN_NAME. */
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 8, 8, expected_cumulative_sat, "");
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 16, 4, expected_cumulative_sat, "");
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 32, 2, expected_cumulative_sat, "");
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, expected_cumulative_sat, "");
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, expected_cumulative_sat, "");
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, expected_cumulative_sat, "");
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 8, 8, "");
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 16, 4, "");
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 32, 2, "");
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, "");
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, "");
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, "");
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, "");
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, "");
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqabs.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqabs.c
index 03d25c4..5b86cf1 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqabs.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqabs.c
@@ -19,14 +19,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd,
0xc, 0xb, 0xa, 0x9 };
VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd };
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results when input is the min negative value of the type. */
VECT_VAR_DECL(expected_min_neg,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -44,15 +36,6 @@ VECT_VAR_DECL(expected_min_neg,int,16,8) [] = { 0x7fff, 0x7fff,
VECT_VAR_DECL(expected_min_neg,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
-/* Expected values of cumulative_saturation flag when input is the min
- negative value of the type. */
-int VECT_VAR(expected_cumulative_sat_min_neg,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,32,4) = 1;
-
void vqabs_extra()
{
/* No need for 64 bits variants. */
@@ -82,12 +65,12 @@ void vqabs_extra()
VDUP(vector, q, int, s, 32, 4, 0x80000000);
#define MSG "min negative input"
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 8, 8, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 16, 4, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 32, 2, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, expected_cumulative_sat_min_neg, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 8, 8, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 16, 4, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 32, 2, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, MSG);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_min_neg, MSG);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_min_neg, MSG);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqadd.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqadd.c
index eaa6e82..31f98d6 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqadd.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqadd.c
@@ -11,23 +11,6 @@ void vqadd_extras(void);
#include "binary_sat_op.inc"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0x1, 0x2, 0x3, 0x4,
0x5, 0x6, 0x7, 0x8 };
@@ -60,10 +43,6 @@ VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
/* 64-bits types, with 0 as second input. */
-int VECT_VAR(expected_cumulative_sat_64,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_64,uint,64,2) = 0;
VECT_VAR_DECL(expected_64,int,64,1) [] = { 0xfffffffffffffff0 };
VECT_VAR_DECL(expected_64,uint,64,1) [] = { 0xfffffffffffffff0 };
VECT_VAR_DECL(expected_64,int,64,2) [] = { 0xfffffffffffffff0,
@@ -72,10 +51,6 @@ VECT_VAR_DECL(expected_64,uint,64,2) [] = { 0xfffffffffffffff0,
0xfffffffffffffff1 };
/* 64-bits types, some cases causing cumulative saturation. */
-int VECT_VAR(expected_cumulative_sat_64_2,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64_2,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_64_2,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_64_2,uint,64,2) = 1;
VECT_VAR_DECL(expected_64_2,int,64,1) [] = { 0x34 };
VECT_VAR_DECL(expected_64_2,uint,64,1) [] = { 0xffffffffffffffff };
VECT_VAR_DECL(expected_64_2,int,64,2) [] = { 0x34, 0x35 };
@@ -83,10 +58,6 @@ VECT_VAR_DECL(expected_64_2,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
/* 64-bits types, all causing cumulative saturation. */
-int VECT_VAR(expected_cumulative_sat_64_3,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_64_3,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_64_3,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_64_3,uint,64,2) = 1;
VECT_VAR_DECL(expected_64_3,int,64,1) [] = { 0x8000000000000000 };
VECT_VAR_DECL(expected_64_3,uint,64,1) [] = { 0xffffffffffffffff };
VECT_VAR_DECL(expected_64_3,int,64,2) [] = { 0x7fffffffffffffff,
@@ -94,13 +65,6 @@ VECT_VAR_DECL(expected_64_3,int,64,2) [] = { 0x7fffffffffffffff,
VECT_VAR_DECL(expected_64_3,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* smaller types, corner cases causing cumulative saturation. (1) */
-int VECT_VAR(expected_csat_lt_64_1,int,8,8) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,16,4) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,32,2) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,8,16) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,16,8) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,32,4) = 1;
VECT_VAR_DECL(expected_lt_64_1,int,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80 };
VECT_VAR_DECL(expected_lt_64_1,int,16,4) [] = { 0x8000, 0x8000,
@@ -117,13 +81,6 @@ VECT_VAR_DECL(expected_lt_64_1,int,16,8) [] = { 0x8000, 0x8000,
VECT_VAR_DECL(expected_lt_64_1,int,32,4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
-/* smaller types, corner cases causing cumulative saturation. (2) */
-int VECT_VAR(expected_csat_lt_64_2,uint,8,8) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,16,4) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,32,2) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,8,16) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,16,8) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,32,4) = 1;
VECT_VAR_DECL(expected_lt_64_2,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff };
VECT_VAR_DECL(expected_lt_64_2,uint,16,4) [] = { 0xffff, 0xffff,
@@ -157,10 +114,10 @@ void vqadd_extras(void)
VDUP(vector2, q, uint, u, 64, 2, 0);
#define MSG "64 bits saturation adding zero"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat_64, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat_64, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat_64, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat_64, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1,MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, MSG);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64, MSG);
CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_64, MSG);
@@ -176,10 +133,10 @@ void vqadd_extras(void)
#undef MSG
#define MSG "64 bits saturation cumulative_sat (2)"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat_64_2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, MSG);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64_2, MSG);
CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_64_2, MSG);
@@ -198,10 +155,10 @@ void vqadd_extras(void)
#undef MSG
#define MSG "64 bits saturation cumulative_sat (3)"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat_64_3, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat_64_3, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat_64_3, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat_64_3, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, MSG);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64_3, MSG);
CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_64_3, MSG);
@@ -219,12 +176,12 @@ void vqadd_extras(void)
#undef MSG
#define MSG "less than 64 bits saturation cumulative_sat (1)"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 8, 8, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 16, 4, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 32, 2, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, expected_csat_lt_64_1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 8, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 16, 4, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 32, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, MSG);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_lt_64_1, MSG);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_lt_64_1, MSG);
@@ -250,12 +207,12 @@ void vqadd_extras(void)
#undef MSG
#define MSG "less than 64 bits saturation cumulative_sat (2)"
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 8, 8, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 16, 4, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 32, 2, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 8, 16, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 16, 8, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 32, 4, expected_csat_lt_64_2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 8, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 16, 4, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 32, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 8, 16, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 16, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 32, 4, MSG);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_lt_64_2, MSG);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_lt_64_2, MSG);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl.inc
index cd61fd4..9724101 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl.inc
@@ -5,18 +5,17 @@ void FNNAME (INSN_NAME) (void)
{
/* vector_res = OP(vector, vector3, vector4),
then store the result. */
-#define TEST_VQDMLXL1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMLXL1(INSN, T1, T2, W, W2, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##_##T2##W2(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector3, T1, W2, N), \
VECT_VAR(vector4, T1, W2, N)); \
vst1q_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_VQDMLXL(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMLXL1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMLXL(INSN, T1, T2, W, W2, N, CMT) \
+ TEST_VQDMLXL1(INSN, T1, T2, W, W2, N, CMT)
DECL_VARIABLE(vector, int, 32, 4);
DECL_VARIABLE(vector3, int, 16, 4);
@@ -37,8 +36,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector3, , int, s, 32, 2, 0x55);
VDUP(vector4, , int, s, 32, 2, 0xBB);
- TEST_VQDMLXL(INSN_NAME, int, s, 32, 16, 4, expected_cumulative_sat, "");
- TEST_VQDMLXL(INSN_NAME, int, s, 64, 32, 2, expected_cumulative_sat, "");
+ TEST_VQDMLXL(INSN_NAME, int, s, 32, 16, 4, "");
+ TEST_VQDMLXL(INSN_NAME, int, s, 64, 32, 2, "");
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, "");
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, "");
@@ -49,8 +48,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector4, , int, s, 32, 2, 0x80000000);
#define TEST_MSG2 "with saturation"
- TEST_VQDMLXL(INSN_NAME, int, s, 32, 16, 4, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMLXL(INSN_NAME, int, s, 64, 32, 2, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMLXL(INSN_NAME, int, s, 32, 16, 4, TEST_MSG2);
+ TEST_VQDMLXL(INSN_NAME, int, s, 64, 32, 2, TEST_MSG2);
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected2, TEST_MSG2);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_lane.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_lane.inc
index 705f90a..8596acf 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_lane.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_lane.inc
@@ -5,7 +5,7 @@ void FNNAME (INSN_NAME) (void)
{
/* vector_res = vqdmlXl_lane(vector, vector3, vector4, lane),
then store the result. */
-#define TEST_VQDMLXL_LANE1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMLXL_LANE1(INSN, T1, T2, W, W2, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##_##T2##W2(VECT_VAR(vector, T1, W, N), \
@@ -13,11 +13,10 @@ void FNNAME (INSN_NAME) (void)
VECT_VAR(vector4, T1, W2, N), \
V); \
vst1q_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_VQDMLXL_LANE(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMLXL_LANE1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMLXL_LANE(INSN, T1, T2, W, W2, N, V, CMT) \
+ TEST_VQDMLXL_LANE1(INSN, T1, T2, W, W2, N, V, CMT)
DECL_VARIABLE(vector, int, 32, 4);
DECL_VARIABLE(vector3, int, 16, 4);
@@ -39,8 +38,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector3, , int, s, 32, 2, 0x55);
VDUP(vector4, , int, s, 32, 2, 0xBB);
- TEST_VQDMLXL_LANE(INSN_NAME, int, s, 32, 16, 4, 0, expected_cumulative_sat, "");
- TEST_VQDMLXL_LANE(INSN_NAME, int, s, 64, 32, 2, 0, expected_cumulative_sat, "");
+ TEST_VQDMLXL_LANE(INSN_NAME, int, s, 32, 16, 4, 0, "");
+ TEST_VQDMLXL_LANE(INSN_NAME, int, s, 64, 32, 2, 0, "");
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, "");
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, "");
@@ -48,8 +47,8 @@ void FNNAME (INSN_NAME) (void)
#define TEST_MSG2 "(mul with input=0)"
VDUP(vector3, , int, s, 16, 4, 0);
VDUP(vector3, , int, s, 32, 2, 0);
- TEST_VQDMLXL_LANE(INSN_NAME, int, s, 32, 16, 4, 0, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMLXL_LANE(INSN_NAME, int, s, 64, 32, 2, 0, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMLXL_LANE(INSN_NAME, int, s, 32, 16, 4, 0, TEST_MSG2);
+ TEST_VQDMLXL_LANE(INSN_NAME, int, s, 64, 32, 2, 0, TEST_MSG2);
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected2, TEST_MSG2);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected2, TEST_MSG2);
@@ -59,8 +58,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector3, , int, s, 32, 2, 0x80000000);
VDUP(vector4, , int, s, 16, 4, 0x8000);
VDUP(vector4, , int, s, 32, 2, 0x80000000);
- TEST_VQDMLXL_LANE(INSN_NAME, int, s, 32, 16, 4, 0, expected_cumulative_sat3, TEST_MSG3);
- TEST_VQDMLXL_LANE(INSN_NAME, int, s, 64, 32, 2, 0, expected_cumulative_sat3, TEST_MSG3);
+ TEST_VQDMLXL_LANE(INSN_NAME, int, s, 32, 16, 4, 0, TEST_MSG3);
+ TEST_VQDMLXL_LANE(INSN_NAME, int, s, 64, 32, 2, 0, TEST_MSG3);
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected3, TEST_MSG3);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected3, TEST_MSG3);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_n.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_n.inc
index fd885dd..5f89aa8 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_n.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlXl_n.inc
@@ -5,18 +5,17 @@ void FNNAME (INSN_NAME) (void)
{
/* vector_res = vqdmlxl_n(vector, vector3, val),
then store the result. */
-#define TEST_VQDMLXL_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMLXL_N1(INSN, T1, T2, W, W2, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##_##T2##W2(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector3, T1, W2, N), \
V); \
vst1q_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_VQDMLXL_N(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMLXL_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMLXL_N(INSN, T1, T2, W, W2, N, V, CMT) \
+ TEST_VQDMLXL_N1(INSN, T1, T2, W, W2, N, V, CMT)
DECL_VARIABLE(vector, int, 32, 4);
DECL_VARIABLE(vector3, int, 16, 4);
@@ -35,8 +34,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector3, , int, s, 32, 2, 0x55);
/* Choose val arbitrarily. */
- TEST_VQDMLXL_N(INSN_NAME, int, s, 32, 16, 4, 0x22, expected_cumulative_sat, "");
- TEST_VQDMLXL_N(INSN_NAME, int, s, 64, 32, 2, 0x33, expected_cumulative_sat, "");
+ TEST_VQDMLXL_N(INSN_NAME, int, s, 32, 16, 4, 0x22, "");
+ TEST_VQDMLXL_N(INSN_NAME, int, s, 64, 32, 2, 0x33, "");
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, "");
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, "");
@@ -45,8 +44,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector3, , int, s, 16, 4, 0x8000);
VDUP(vector3, , int, s, 32, 2, 0x80000000);
- TEST_VQDMLXL_N(INSN_NAME, int, s, 32, 16, 4, 0x8000, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMLXL_N(INSN_NAME, int, s, 64, 32, 2, 0x80000000, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMLXL_N(INSN_NAME, int, s, 32, 16, 4, 0x8000, TEST_MSG2);
+ TEST_VQDMLXL_N(INSN_NAME, int, s, 64, 32, 2, 0x80000000, TEST_MSG2);
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected2, TEST_MSG2);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal.c
index c53a90a..784047e 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal.c
@@ -5,19 +5,10 @@
#define INSN_NAME vqdmlal
#define TEST_MSG "VQDMLAL"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0x7c1e, 0x7c1f, 0x7c20, 0x7c21 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0x7c1e, 0x7c1f };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,64,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffef, 0x7ffffff0,
0x7ffffff1, 0x7ffffff2 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_lane.c
index 832a705..2e641fd 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_lane.c
@@ -5,30 +5,16 @@
#define INSN_NAME vqdmlal_lane
#define TEST_MSG "VQDMLAL_LANE"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0x7c1e, 0x7c1f, 0x7c20, 0x7c21 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0x7c1e, 0x7c1f };
-/* Expected values of cumulative_saturation flag when multiplying with
- 0. */
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat2,int,64,2) = 0;
-
/* Expected values when multiplying with 0. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
VECT_VAR_DECL(expected2,int,64,2) [] = { 0xfffffffffffffff0,
0xfffffffffffffff1 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR(expected_cumulative_sat3,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat3,int,64,2) = 1;
-
/* Expected values when multiplication saturates. */
VECT_VAR_DECL(expected3,int,32,4) [] = { 0x7fffffef, 0x7ffffff0,
0x7ffffff1, 0x7ffffff2 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_n.c
index b84bca3..3356112 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlal_n.c
@@ -5,19 +5,10 @@
#define INSN_NAME vqdmlal_n
#define TEST_MSG "VQDMLAL_N"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0x1684, 0x1685, 0x1686, 0x1687 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0x21ce, 0x21cf };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,64,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffef, 0x7ffffff0,
0x7ffffff1, 0x7ffffff2 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl.c
index 56e0b61..d65713c 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl.c
@@ -5,21 +5,12 @@
#define INSN_NAME vqdmlsl
#define TEST_MSG "VQDMLSL"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0xffff83c2, 0xffff83c3,
0xffff83c4, 0xffff83c5 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffff83c2,
0xffffffffffff83c3 };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,64,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_lane.c
index b95e61c..2180314 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_lane.c
@@ -5,32 +5,18 @@
#define INSN_NAME vqdmlsl_lane
#define TEST_MSG "VQDMLSL_LANE"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0xffff83c2, 0xffff83c3,
0xffff83c4, 0xffff83c5 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffff83c2,
0xffffffffffff83c3 };
-/* Expected values of cumulative_saturation flag when multiplying with
- 0. */
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat2,int,64,2) = 0;
-
/* Expected values when multiplying with 0. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
VECT_VAR_DECL(expected2,int,64,2) [] = { 0xfffffffffffffff0,
0xfffffffffffffff1 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR(expected_cumulative_sat3,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat3,int,64,2) = 1;
-
/* Expected values when multiplication saturates. */
VECT_VAR_DECL(expected3,int,32,4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_n.c
index ff8d9d3..40a1929 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmlsl_n.c
@@ -5,21 +5,12 @@
#define INSN_NAME vqdmlsl_n
#define TEST_MSG "VQDMLSL_N"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0xffffe95c, 0xffffe95d,
0xffffe95e, 0xffffe95f };
VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffffde12,
0xffffffffffffde13 };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,64,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh.c
index 8d2a365..e78590f 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh.c
@@ -2,12 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
VECT_VAR_DECL(expected,int,32,2) [] = { 0xffffffff, 0xffffffff };
@@ -16,13 +10,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
VECT_VAR_DECL(expected,int,32,4) [] = { 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected2,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -40,21 +27,20 @@ VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
void FNNAME (INSN_NAME) (void)
{
/* vector_res = vqdmulh(vector,vector2,lane), then store the result. */
-#define TEST_VQDMULH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMULH2(INSN, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector2, T1, W, N)); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQDMULH1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULH1(INSN, Q, T1, T2, W, N, CMT) \
+ TEST_VQDMULH2(INSN, Q, T1, T2, W, N, CMT)
-#define TEST_VQDMULH(Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULH1(INSN_NAME, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULH(Q, T1, T2, W, N, CMT) \
+ TEST_VQDMULH1(INSN_NAME, Q, T1, T2, W, N, CMT)
DECL_VARIABLE(vector, int, 16, 4);
DECL_VARIABLE(vector, int, 32, 2);
@@ -84,10 +70,10 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, q, int, s, 16, 8, 0x33);
VDUP(vector2, q, int, s, 32, 4, 0x22);
- TEST_VQDMULH(, int, s, 16, 4, expected_cumulative_sat, "");
- TEST_VQDMULH(, int, s, 32, 2, expected_cumulative_sat, "");
- TEST_VQDMULH(q, int, s, 16, 8, expected_cumulative_sat, "");
- TEST_VQDMULH(q, int, s, 32, 4, expected_cumulative_sat, "");
+ TEST_VQDMULH(, int, s, 16, 4, "");
+ TEST_VQDMULH(, int, s, 32, 2, "");
+ TEST_VQDMULH(q, int, s, 16, 8, "");
+ TEST_VQDMULH(q, int, s, 32, 4, "");
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected, "");
@@ -104,10 +90,10 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, q, int, s, 32, 4, 0x80000000);
#define TEST_MSG2 "with saturation"
- TEST_VQDMULH(, int, s, 16, 4, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH(, int, s, 32, 2, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH(q, int, s, 16, 8, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH(q, int, s, 32, 4, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMULH(, int, s, 16, 4, TEST_MSG2);
+ TEST_VQDMULH(, int, s, 32, 2, TEST_MSG2);
+ TEST_VQDMULH(q, int, s, 16, 8, TEST_MSG2);
+ TEST_VQDMULH(q, int, s, 32, 4, TEST_MSG2);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected2, TEST_MSG2);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_lane.c
index 5260676..d11c764 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_lane.c
@@ -2,12 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
VECT_VAR_DECL(expected,int,32,2) [] = { 0xffffffff, 0xffffffff };
@@ -16,13 +10,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
VECT_VAR_DECL(expected,int,32,4) [] = { 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected2,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -39,22 +26,21 @@ VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
void FNNAME (INSN_NAME) (void)
{
/* vector_res = vqdmulh_lane(vector,vector2,lane), then store the result. */
-#define TEST_VQDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_lane_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector2, T1, W, N2), \
L); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, CMT) \
+ TEST_VQDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, CMT)
-#define TEST_VQDMULH_LANE(Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULH_LANE1(INSN_NAME, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULH_LANE(Q, T1, T2, W, N, N2, L, CMT) \
+ TEST_VQDMULH_LANE1(INSN_NAME, Q, T1, T2, W, N, N2, L, CMT)
DECL_VARIABLE(vector, int, 16, 4);
DECL_VARIABLE(vector, int, 32, 2);
@@ -84,10 +70,10 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, , int, s, 32, 2, 0xBB);
/* Choose lane arbitrarily. */
- TEST_VQDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat, "");
- TEST_VQDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat, "");
- TEST_VQDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat, "");
- TEST_VQDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat, "");
+ TEST_VQDMULH_LANE(, int, s, 16, 4, 4, 2, "");
+ TEST_VQDMULH_LANE(, int, s, 32, 2, 2, 1, "");
+ TEST_VQDMULH_LANE(q, int, s, 16, 8, 4, 3, "");
+ TEST_VQDMULH_LANE(q, int, s, 32, 4, 2, 0, "");
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected, "");
@@ -103,10 +89,10 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, , int, s, 32, 2, 0x80000000);
#define TEST_MSG2 " (check mul cumulative saturation)"
- TEST_VQDMULH_LANE(, int, s, 16, 4, 4, 3, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH_LANE(q, int, s, 16, 8, 4, 2, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH_LANE(q, int, s, 32, 4, 2, 1, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMULH_LANE(, int, s, 16, 4, 4, 3, TEST_MSG2);
+ TEST_VQDMULH_LANE(, int, s, 32, 2, 2, 1, TEST_MSG2);
+ TEST_VQDMULH_LANE(q, int, s, 16, 8, 4, 2, TEST_MSG2);
+ TEST_VQDMULH_LANE(q, int, s, 32, 4, 2, 1, TEST_MSG2);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected2, TEST_MSG2);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_n.c
index ab66e2d..7819a85 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmulh_n.c
@@ -2,12 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,16,4) [] = { 0x19, 0x19, 0x19, 0x19 };
VECT_VAR_DECL(expected,int,32,2) [] = { 0x4, 0x4 };
@@ -15,13 +9,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10 };
VECT_VAR_DECL(expected,int,32,4) [] = { 0xa, 0xa, 0xa, 0xa };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,4) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected2,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -40,21 +27,20 @@ void FNNAME (INSN_NAME) (void)
int i;
/* vector_res = vqdmulh_n(vector,val), then store the result. */
-#define TEST_VQDMULH_N2(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMULH_N2(INSN, Q, T1, T2, W, N, L, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
L); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQDMULH_N1(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULH_N2(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULH_N1(INSN, Q, T1, T2, W, N, L, CMT) \
+ TEST_VQDMULH_N2(INSN, Q, T1, T2, W, N, L, CMT)
-#define TEST_VQDMULH_N(Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULH_N1(INSN_NAME, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULH_N(Q, T1, T2, W, N, L, CMT) \
+ TEST_VQDMULH_N1(INSN_NAME, Q, T1, T2, W, N, L, CMT)
DECL_VARIABLE(vector, int, 16, 4);
DECL_VARIABLE(vector, int, 32, 2);
@@ -75,10 +61,10 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector, q, int, s, 32, 4, 0x100045);
/* Choose multiplier arbitrarily. */
- TEST_VQDMULH_N(, int, s, 16, 4, 0xCF, expected_cumulative_sat, "");
- TEST_VQDMULH_N(, int, s, 32, 2, 0x2344, expected_cumulative_sat, "");
- TEST_VQDMULH_N(q, int, s, 16, 8, 0x80, expected_cumulative_sat, "");
- TEST_VQDMULH_N(q, int, s, 32, 4, 0x5422, expected_cumulative_sat, "");
+ TEST_VQDMULH_N(, int, s, 16, 4, 0xCF, "");
+ TEST_VQDMULH_N(, int, s, 32, 2, 0x2344, "");
+ TEST_VQDMULH_N(q, int, s, 16, 8, 0x80, "");
+ TEST_VQDMULH_N(q, int, s, 32, 4, 0x5422, "");
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected, "");
@@ -92,10 +78,10 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector, q, int, s, 32, 4, 0x80000000);
#define TEST_MSG2 " (check mul cumulative saturation)"
- TEST_VQDMULH_N(, int, s, 16, 4, 0x8000, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH_N(, int, s, 32, 2, 0x80000000, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH_N(q, int, s, 16, 8, 0x8000, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULH_N(q, int, s, 32, 4, 0x80000000, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMULH_N(, int, s, 16, 4, 0x8000, TEST_MSG2);
+ TEST_VQDMULH_N(, int, s, 32, 2, 0x80000000, TEST_MSG2);
+ TEST_VQDMULH_N(q, int, s, 16, 8, 0x8000, TEST_MSG2);
+ TEST_VQDMULH_N(q, int, s, 32, 4, 0x80000000, TEST_MSG2);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected2, TEST_MSG2);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull.c
index 5caebce..0845c1d 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull.c
@@ -2,19 +2,10 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0x200, 0x1c2, 0x188, 0x152 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0x200, 0x1c2 };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
@@ -30,21 +21,20 @@ VECT_VAR_DECL(expected2,int,64,2) [] = { 0x7fffffffffffffff,
void FNNAME (INSN_NAME) (void)
{
/* Basic test: y=vqdmull(x,x), then store the result. */
-#define TEST_VQDMULL2(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMULL2(INSN, T1, T2, W, W2, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
VECT_VAR(vector_res, T1, W2, N) = \
INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector2, T1, W, N)); \
vst1q_##T2##W2(VECT_VAR(result, T1, W2, N), \
- VECT_VAR(vector_res, T1, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W2, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQDMULL1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULL2(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULL1(INSN, T1, T2, W, W2, N, CMT) \
+ TEST_VQDMULL2(INSN, T1, T2, W, W2, N, CMT)
-#define TEST_VQDMULL(T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULL1(INSN_NAME, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULL(T1, T2, W, W2, N, CMT) \
+ TEST_VQDMULL1(INSN_NAME, T1, T2, W, W2, N, CMT)
DECL_VARIABLE(vector, int, 16, 4);
DECL_VARIABLE(vector, int, 32, 2);
@@ -60,8 +50,8 @@ void FNNAME (INSN_NAME) (void)
VLOAD(vector2, buffer, , int, s, 16, 4);
VLOAD(vector2, buffer, , int, s, 32, 2);
- TEST_VQDMULL(int, s, 16, 32, 4, expected_cumulative_sat, "");
- TEST_VQDMULL(int, s, 32, 64, 2, expected_cumulative_sat, "");
+ TEST_VQDMULL(int, s, 16, 32, 4, "");
+ TEST_VQDMULL(int, s, 32, 64, 2, "");
CHECK (TEST_MSG, int, 32, 4, PRIx32, expected, "");
CHECK (TEST_MSG, int, 64, 2, PRIx64, expected, "");
@@ -72,8 +62,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, , int, s, 32, 2, 0x80000000);
#define TEST_MSG2 "with saturation"
- TEST_VQDMULL(int, s, 16, 32, 4, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULL(int, s, 32, 64, 2, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMULL(int, s, 16, 32, 4, TEST_MSG2);
+ TEST_VQDMULL(int, s, 32, 64, 2, TEST_MSG2);
CHECK (TEST_MSG, int, 32, 4, PRIx32, expected2, TEST_MSG2);
CHECK (TEST_MSG, int, 64, 2, PRIx64, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_lane.c
index 12f2a6b..1f86011 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_lane.c
@@ -2,19 +2,10 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0x8000, 0x8000, 0x8000, 0x8000 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0x4000, 0x4000 };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
@@ -32,22 +23,21 @@ void FNNAME (INSN_NAME) (void)
int i;
/* vector_res = vqdmull_lane(vector,vector2,lane), then store the result. */
-#define TEST_VQDMULL_LANE2(INSN, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMULL_LANE2(INSN, T1, T2, W, W2, N, L, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
VECT_VAR(vector_res, T1, W2, N) = \
INSN##_lane_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector2, T1, W, N), \
L); \
vst1q_##T2##W2(VECT_VAR(result, T1, W2, N), \
- VECT_VAR(vector_res, T1, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W2, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQDMULL_LANE1(INSN, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULL_LANE2(INSN, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULL_LANE1(INSN, T1, T2, W, W2, N, L, CMT) \
+ TEST_VQDMULL_LANE2(INSN, T1, T2, W, W2, N, L, CMT)
-#define TEST_VQDMULL_LANE(T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULL_LANE1(INSN_NAME, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULL_LANE(T1, T2, W, W2, N, L, CMT) \
+ TEST_VQDMULL_LANE1(INSN_NAME, T1, T2, W, W2, N, L, CMT)
DECL_VARIABLE(vector, int, 16, 4);
DECL_VARIABLE(vector, int, 32, 2);
@@ -68,8 +58,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, , int, s, 32, 2, 0x2);
/* Choose lane arbitrarily. */
- TEST_VQDMULL_LANE(int, s, 16, 32, 4, 2, expected_cumulative_sat, "");
- TEST_VQDMULL_LANE(int, s, 32, 64, 2, 1, expected_cumulative_sat, "");
+ TEST_VQDMULL_LANE(int, s, 16, 32, 4, 2, "");
+ TEST_VQDMULL_LANE(int, s, 32, 64, 2, 1, "");
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, "");
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, "");
@@ -80,8 +70,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, , int, s, 32, 2, 0x80000000);
#define TEST_MSG2 "with saturation"
- TEST_VQDMULL_LANE(int, s, 16, 32, 4, 2, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULL_LANE(int, s, 32, 64, 2, 1, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMULL_LANE(int, s, 16, 32, 4, 2, TEST_MSG2);
+ TEST_VQDMULL_LANE(int, s, 32, 64, 2, 1, TEST_MSG2);
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected2, TEST_MSG2);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_n.c
index 9e73009..e618a36 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqdmull_n.c
@@ -2,20 +2,12 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
/* Expected results. */
VECT_VAR_DECL(expected,int,32,4) [] = { 0x44000, 0x44000,
0x44000, 0x44000 };
VECT_VAR_DECL(expected,int,64,2) [] = { 0xaa000, 0xaa000 };
-/* Expected values of cumulative_saturation flag when saturation
- occurs. */
-int VECT_VAR(expected_cumulative_sat2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat2,int,32,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected2,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
@@ -33,21 +25,20 @@ void FNNAME (INSN_NAME) (void)
int i;
/* vector_res = vqdmull_n(vector,val), then store the result. */
-#define TEST_VQDMULL_N2(INSN, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQDMULL_N2(INSN, T1, T2, W, W2, N, L, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
VECT_VAR(vector_res, T1, W2, N) = \
INSN##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
L); \
vst1q_##T2##W2(VECT_VAR(result, T1, W2, N), \
- VECT_VAR(vector_res, T1, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W2, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQDMULL_N1(INSN, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULL_N2(INSN, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULL_N1(INSN, T1, T2, W, W2, N, L, CMT) \
+ TEST_VQDMULL_N2(INSN, T1, T2, W, W2, N, L, CMT)
-#define TEST_VQDMULL_N(T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQDMULL_N1(INSN_NAME, T1, T2, W, W2, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQDMULL_N(T1, T2, W, W2, N, L, CMT) \
+ TEST_VQDMULL_N1(INSN_NAME, T1, T2, W, W2, N, L, CMT)
DECL_VARIABLE(vector, int, 16, 4);
DECL_VARIABLE(vector, int, 32, 2);
@@ -68,8 +59,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector2, , int, s, 32, 2, 0x2);
/* Choose multiplier arbitrarily. */
- TEST_VQDMULL_N(int, s, 16, 32, 4, 0x22, expected_cumulative_sat, "");
- TEST_VQDMULL_N(int, s, 32, 64, 2, 0x55, expected_cumulative_sat, "");
+ TEST_VQDMULL_N(int, s, 16, 32, 4, 0x22, "");
+ TEST_VQDMULL_N(int, s, 32, 64, 2, 0x55, "");
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, "");
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, "");
@@ -78,8 +69,8 @@ void FNNAME (INSN_NAME) (void)
VDUP(vector, , int, s, 32, 2, 0x80000000);
#define TEST_MSG2 "with saturation"
- TEST_VQDMULL_N(int, s, 16, 32, 4, 0x8000, expected_cumulative_sat2, TEST_MSG2);
- TEST_VQDMULL_N(int, s, 32, 64, 2, 0x80000000, expected_cumulative_sat2, TEST_MSG2);
+ TEST_VQDMULL_N(int, s, 16, 32, 4, 0x8000, TEST_MSG2);
+ TEST_VQDMULL_N(int, s, 32, 64, 2, 0x80000000, TEST_MSG2);
CHECK(TEST_MSG, int, 32, 4, PRIx32, expected2, TEST_MSG2);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected2, TEST_MSG2);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c
index 45c2db9..3cb874c 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c
@@ -2,14 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0x12, 0x12, 0x12, 0x12,
0x12, 0x12, 0x12, 0x12 };
@@ -20,14 +12,6 @@ VECT_VAR_DECL(expected,uint,8,8) [] = { 0x82, 0x82, 0x82, 0x82,
VECT_VAR_DECL(expected,uint,16,4) [] = { 0x8765, 0x8765, 0x8765, 0x8765 };
VECT_VAR_DECL(expected,uint,32,2) [] = { 0x87654321, 0x87654321 };
-/* Expected values of cumulative_saturation flag when saturation occurs. */
-int VECT_VAR(expected_cumulative_sat1,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat1,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat1,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat1,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat1,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat1,uint,32,2) = 1;
-
/* Expected results when saturation occurs. */
VECT_VAR_DECL(expected1,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -47,16 +31,15 @@ VECT_VAR_DECL(expected1,uint,32,2) [] = { 0xffffffff, 0xffffffff };
FNNAME (INSN_NAME)
{
/* Basic test: y=OP(x), then store the result. */
-#define TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##_##T2##W2(VECT_VAR(vector, T1, W2, N)); \
vst1##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_UNARY_OP(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_UNARY_OP(INSN, T1, T2, W, W2, N, CMT) \
+ TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, CMT)
/* No need for 64 bits variants. */
DECL_VARIABLE(vector, int, 16, 8);
@@ -85,12 +68,12 @@ FNNAME (INSN_NAME)
/* Apply a unary operator named INSN_NAME. */
#define CMT ""
- TEST_UNARY_OP(INSN_NAME, int, s, 8, 16, 8, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, int, s, 16, 32, 4, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, int, s, 32, 64, 2, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 8, 16, 8, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 16, 32, 4, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 32, 64, 2, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
@@ -112,12 +95,12 @@ FNNAME (INSN_NAME)
/* Apply a unary operator named INSN_NAME. */
#undef CMT
#define CMT " (with saturation)"
- TEST_UNARY_OP(INSN_NAME, int, s, 8, 16, 8, expected_cumulative_sat1, CMT);
- TEST_UNARY_OP(INSN_NAME, int, s, 16, 32, 4, expected_cumulative_sat1, CMT);
- TEST_UNARY_OP(INSN_NAME, int, s, 32, 64, 2, expected_cumulative_sat1, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat1, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat1, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat1, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 8, 16, 8, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 16, 32, 4, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 32, 64, 2, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected1, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected1, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c
index 1eeb4c8..f4c4e31 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c
@@ -2,22 +2,12 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,uint,8,8) [] = { 0x34, 0x34, 0x34, 0x34,
0x34, 0x34, 0x34, 0x34 };
VECT_VAR_DECL(expected,uint,16,4) [] = { 0x5678, 0x5678, 0x5678, 0x5678 };
VECT_VAR_DECL(expected,uint,32,2) [] = { 0x12345678, 0x12345678 };
-/* Expected values of cumulative_saturation flag with negative input. */
-int VECT_VAR(expected_cumulative_sat_neg,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,uint,32,2) = 1;
-
/* Expected results with negative input. */
VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -33,16 +23,15 @@ VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
FNNAME (INSN_NAME)
{
/* Basic test: y=OP(x), then store the result. */
-#define TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##_s##W2(VECT_VAR(vector, int, W2, N)); \
vst1##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
-#define TEST_UNARY_OP(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_UNARY_OP(INSN, T1, T2, W, W2, N, CMT) \
+ TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, CMT)
DECL_VARIABLE(vector, int, 16, 8);
DECL_VARIABLE(vector, int, 32, 4);
@@ -61,9 +50,9 @@ FNNAME (INSN_NAME)
/* Apply a unary operator named INSN_NAME. */
#define CMT ""
- TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
@@ -77,9 +66,9 @@ FNNAME (INSN_NAME)
/* Apply a unary operator named INSN_NAME. */
#undef CMT
#define CMT " (negative input)"
- TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat_neg, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat_neg, CMT);
- TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat_neg, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqneg.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqneg.c
index 5126ac9..f700f28 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqneg.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqneg.c
@@ -19,14 +19,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd,
0xc, 0xb, 0xa, 0x9 };
VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd };
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results when input is the min negative value of the type. */
VECT_VAR_DECL(expected_min_neg,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -44,15 +36,6 @@ VECT_VAR_DECL(expected_min_neg,int,16,8) [] = { 0x7fff, 0x7fff,
VECT_VAR_DECL(expected_min_neg,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
-/* Expected values of cumulative_saturation flag when input is the min
- negative value of the type. */
-int VECT_VAR(expected_cumulative_sat_min_neg,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_min_neg,int,32,4) = 1;
-
void vqneg_extra()
{
/* No need for 64 bits variants. */
@@ -82,12 +65,12 @@ void vqneg_extra()
VDUP(vector, q, int, s, 32, 4, 0x80000000);
#define MSG "min negative input"
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 8, 8, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 16, 4, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 32, 2, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, expected_cumulative_sat_min_neg, MSG);
- TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, expected_cumulative_sat_min_neg, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 8, 8, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 16, 4, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, , int, s, 32, 2, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, MSG);
+ TEST_UNARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, MSG);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_min_neg, MSG);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_min_neg, MSG);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh.inc
index a504ca6..4b2b712 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh.inc
@@ -5,23 +5,21 @@ void FNNAME (INSN) (void)
{
/* vector_res = vqrdmlah (vector, vector2, vector3, vector4),
then store the result. */
-#define TEST_VQRDMLAH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRDMLAH2(INSN, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat (0, VECT_VAR (vector_res, T1, W, N)); \
VECT_VAR (vector_res, T1, W, N) = \
INSN##Q##_##T2##W (VECT_VAR (vector, T1, W, N), \
VECT_VAR (vector2, T1, W, N), \
VECT_VAR (vector3, T1, W, N)); \
vst1##Q##_##T2##W (VECT_VAR (result, T1, W, N), \
- VECT_VAR (vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT (TEST_MSG, T1, W, N, \
- EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR (vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN. */
-#define TEST_VQRDMLAH1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMLAH2 (INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMLAH1(INSN, Q, T1, T2, W, N, CMT) \
+ TEST_VQRDMLAH2 (INSN, Q, T1, T2, W, N, CMT)
-#define TEST_VQRDMLAH(Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMLAH1 (INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMLAH(Q, T1, T2, W, N, CMT) \
+ TEST_VQRDMLAH1 (INSN, Q, T1, T2, W, N, CMT)
DECL_VARIABLE (vector, int, 16, 4);
DECL_VARIABLE (vector, int, 32, 2);
@@ -63,10 +61,10 @@ void FNNAME (INSN) (void)
VDUP (vector3, q, int, s, 32, 4, 0x22);
#define CMT ""
- TEST_VQRDMLAH ( , int, s, 16, 4, expected_cumulative_sat, CMT);
- TEST_VQRDMLAH ( , int, s, 32, 2, expected_cumulative_sat, CMT);
- TEST_VQRDMLAH (q, int, s, 16, 8, expected_cumulative_sat, CMT);
- TEST_VQRDMLAH (q, int, s, 32, 4, expected_cumulative_sat, CMT);
+ TEST_VQRDMLAH ( , int, s, 16, 4, CMT);
+ TEST_VQRDMLAH ( , int, s, 32, 2, CMT);
+ TEST_VQRDMLAH (q, int, s, 16, 8, CMT);
+ TEST_VQRDMLAH (q, int, s, 32, 4, CMT);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
@@ -89,10 +87,10 @@ void FNNAME (INSN) (void)
VDUP (vector3, q, int, s, 16, 8, 0x8000);
VDUP (vector3, q, int, s, 32, 4, 0x80000000);
- TEST_VQRDMLAH ( , int, s, 16, 4, expected_cumulative_sat_mul, TEST_MSG_MUL);
- TEST_VQRDMLAH ( , int, s, 32, 2, expected_cumulative_sat_mul, TEST_MSG_MUL);
- TEST_VQRDMLAH (q, int, s, 16, 8, expected_cumulative_sat_mul, TEST_MSG_MUL);
- TEST_VQRDMLAH (q, int, s, 32, 4, expected_cumulative_sat_mul, TEST_MSG_MUL);
+ TEST_VQRDMLAH ( , int, s, 16, 4, TEST_MSG_MUL);
+ TEST_VQRDMLAH ( , int, s, 32, 2, TEST_MSG_MUL);
+ TEST_VQRDMLAH (q, int, s, 16, 8, TEST_MSG_MUL);
+ TEST_VQRDMLAH (q, int, s, 32, 4, TEST_MSG_MUL);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
@@ -115,14 +113,10 @@ void FNNAME (INSN) (void)
VDUP (vector3, q, int, s, 16, 8, 0x8001);
VDUP (vector3, q, int, s, 32, 4, 0x80000001);
- TEST_VQRDMLAH ( , int, s, 16, 4, expected_cumulative_sat_round, \
- TEST_MSG_ROUND);
- TEST_VQRDMLAH ( , int, s, 32, 2, expected_cumulative_sat_round, \
- TEST_MSG_ROUND);
- TEST_VQRDMLAH (q, int, s, 16, 8, expected_cumulative_sat_round, \
- TEST_MSG_ROUND);
- TEST_VQRDMLAH (q, int, s, 32, 4, expected_cumulative_sat_round, \
- TEST_MSG_ROUND);
+ TEST_VQRDMLAH ( , int, s, 16, 4, TEST_MSG_ROUND);
+ TEST_VQRDMLAH ( , int, s, 32, 2, TEST_MSG_ROUND);
+ TEST_VQRDMLAH (q, int, s, 16, 8, TEST_MSG_ROUND);
+ TEST_VQRDMLAH (q, int, s, 32, 4, TEST_MSG_ROUND);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh_lane.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh_lane.inc
index a855502..204a2e6 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh_lane.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlXh_lane.inc
@@ -6,7 +6,7 @@ void FNNAME (INSN) (void)
/* vector_res = vqrdmlXh_lane (vector, vector2, vector3, lane),
then store the result. */
#define TEST_VQRDMLXH_LANE2(INSN, Q, T1, T2, W, N, N2, L, \
- EXPECTED_CUMULATIVE_SAT, CMT) \
+ CMT) \
Set_Neon_Cumulative_Sat (0, VECT_VAR (vector_res, T1, W, N)); \
VECT_VAR (vector_res, T1, W, N) = \
INSN##Q##_lane_##T2##W (VECT_VAR (vector, T1, W, N), \
@@ -14,19 +14,18 @@ void FNNAME (INSN) (void)
VECT_VAR (vector3, T1, W, N2), \
L); \
vst1##Q##_##T2##W (VECT_VAR (result, T1, W, N), \
- VECT_VAR (vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT (TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR (vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN. */
#define TEST_VQRDMLXH_LANE1(INSN, Q, T1, T2, W, N, N2, L, \
- EXPECTED_CUMULATIVE_SAT, CMT) \
+ CMT) \
TEST_VQRDMLXH_LANE2 (INSN, Q, T1, T2, W, N, N2, L, \
- EXPECTED_CUMULATIVE_SAT, CMT)
+ CMT)
#define TEST_VQRDMLXH_LANE(Q, T1, T2, W, N, N2, L, \
- EXPECTED_CUMULATIVE_SAT, CMT) \
+ CMT) \
TEST_VQRDMLXH_LANE1 (INSN, Q, T1, T2, W, N, N2, L, \
- EXPECTED_CUMULATIVE_SAT, CMT)
+ CMT)
DECL_VARIABLE (vector, int, 16, 4);
@@ -71,10 +70,10 @@ void FNNAME (INSN) (void)
/* Choose lane arbitrarily. */
#define CMT ""
- TEST_VQRDMLXH_LANE (, int, s, 16, 4, 4, 2, expected_cumulative_sat, CMT);
- TEST_VQRDMLXH_LANE (, int, s, 32, 2, 2, 1, expected_cumulative_sat, CMT);
- TEST_VQRDMLXH_LANE (q, int, s, 16, 8, 4, 3, expected_cumulative_sat, CMT);
- TEST_VQRDMLXH_LANE (q, int, s, 32, 4, 2, 0, expected_cumulative_sat, CMT);
+ TEST_VQRDMLXH_LANE (, int, s, 16, 4, 4, 2, CMT);
+ TEST_VQRDMLXH_LANE (, int, s, 32, 2, 2, 1, CMT);
+ TEST_VQRDMLXH_LANE (q, int, s, 16, 8, 4, 3, CMT);
+ TEST_VQRDMLXH_LANE (q, int, s, 32, 4, 2, 0, CMT);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
@@ -99,14 +98,10 @@ void FNNAME (INSN) (void)
VDUP (vector3, q, int, s, 16, 8, 0x8000);
VDUP (vector3, q, int, s, 32, 4, 0x80000000);
- TEST_VQRDMLXH_LANE (, int, s, 16, 4, 4, 2, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMLXH_LANE (, int, s, 32, 2, 2, 1, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMLXH_LANE (q, int, s, 16, 8, 4, 3, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMLXH_LANE (q, int, s, 32, 4, 2, 0, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
+ TEST_VQRDMLXH_LANE (, int, s, 16, 4, 4, 2, TEST_MSG_MUL);
+ TEST_VQRDMLXH_LANE (, int, s, 32, 2, 2, 1, TEST_MSG_MUL);
+ TEST_VQRDMLXH_LANE (q, int, s, 16, 8, 4, 3, TEST_MSG_MUL);
+ TEST_VQRDMLXH_LANE (q, int, s, 32, 4, 2, 0, TEST_MSG_MUL);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
@@ -131,14 +126,10 @@ void FNNAME (INSN) (void)
/* Use input values where rounding produces a result equal to the
saturation value, but does not set the saturation flag. */
#define TEST_MSG_ROUND " (check rounding)"
- TEST_VQRDMLXH_LANE (, int, s, 16, 4, 4, 2, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMLXH_LANE (, int, s, 32, 2, 2, 1, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMLXH_LANE (q, int, s, 16, 8, 4, 3, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMLXH_LANE (q, int, s, 32, 4, 2, 0, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
+ TEST_VQRDMLXH_LANE (, int, s, 16, 4, 4, 2, TEST_MSG_ROUND);
+ TEST_VQRDMLXH_LANE (, int, s, 32, 2, 2, 1, TEST_MSG_ROUND);
+ TEST_VQRDMLXH_LANE (q, int, s, 16, 8, 4, 3, TEST_MSG_ROUND);
+ TEST_VQRDMLXH_LANE (q, int, s, 32, 4, 2, 0, TEST_MSG_ROUND);
CHECK (TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
CHECK (TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah.c
index 148d94c..71f20b7 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah.c
@@ -5,12 +5,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR (expected_cumulative_sat, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 4) = 0;
-
/* Expected results. */
VECT_VAR_DECL (expected, int, 16, 4) [] = { 0x38d3, 0x38d4, 0x38d5, 0x38d6 };
VECT_VAR_DECL (expected, int, 32, 2) [] = { 0xfffffff0, 0xfffffff1 };
@@ -19,13 +13,6 @@ VECT_VAR_DECL (expected, int, 16, 8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
VECT_VAR_DECL (expected, int, 32, 4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 4) = 0;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL (expected_mul, int, 16, 4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL (expected_mul, int, 32, 2) [] = { 0x0, 0x0 };
@@ -33,13 +20,6 @@ VECT_VAR_DECL (expected_mul, int, 16, 8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL (expected_mul, int, 32, 4) [] = { 0x0, 0x0, 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 4) = 0;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL (expected_round, int, 16, 4) [] = { 0xfffe, 0xfffe,
0xfffe, 0xfffe };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah_lane.c
index ed43e01..158fb33 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlah_lane.c
@@ -5,12 +5,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR (expected_cumulative_sat, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 4) = 0;
-
/* Expected results. */
VECT_VAR_DECL (expected, int, 16, 4) [] = { 0x38d3, 0x38d4, 0x38d5, 0x38d6 };
VECT_VAR_DECL (expected, int, 32, 2) [] = { 0xfffffff0, 0xfffffff1 };
@@ -19,13 +13,6 @@ VECT_VAR_DECL (expected, int, 16, 8) [] = { 0x006d, 0x006e, 0x006f, 0x0070,
VECT_VAR_DECL (expected, int, 32, 4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 4) = 0;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL (expected_mul, int, 16, 4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL (expected_mul, int, 32, 2) [] = { 0x0, 0x0 };
@@ -33,13 +20,6 @@ VECT_VAR_DECL (expected_mul, int, 16, 8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL (expected_mul, int, 32, 4) [] = { 0x0, 0x0, 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 4) = 0;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL (expected_round, int, 16, 4) [] = { 0xfffe, 0xfffe,
0xfffe, 0xfffe };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh.c
index 91c3b34..280ae83 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh.c
@@ -5,12 +5,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR (expected_cumulative_sat, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 4) = 0;
-
/* Expected results. */
VECT_VAR_DECL (expected, int, 16, 4) [] = { 0xc70d, 0xc70e, 0xc70f, 0xc710 };
VECT_VAR_DECL (expected, int, 32, 2) [] = { 0xfffffff0, 0xfffffff1 };
@@ -19,13 +13,6 @@ VECT_VAR_DECL (expected, int, 16, 8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
VECT_VAR_DECL (expected, int, 32, 4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 4) = 1;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 2) = 1;
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 8) = 1;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 4) = 1;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL (expected_mul, int, 16, 4) [] = { 0x8000, 0x8000,
0x8000, 0x8000 };
@@ -37,13 +24,6 @@ VECT_VAR_DECL (expected_mul, int, 16, 8) [] = { 0x8000, 0x8000,
VECT_VAR_DECL (expected_mul, int, 32, 4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 4) = 1;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 2) = 1;
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 8) = 1;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 4) = 1;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL (expected_round, int, 16, 4) [] = { 0x8000, 0x8000,
0x8000, 0x8000 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh_lane.c
index 6010b42..0207e4d 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmlsh_lane.c
@@ -5,12 +5,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR (expected_cumulative_sat, int, 16, 4) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 2) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 16, 8) = 0;
-int VECT_VAR (expected_cumulative_sat, int, 32, 4) = 0;
-
/* Expected results. */
VECT_VAR_DECL (expected, int, 16, 4) [] = { 0xc70d, 0xc70e, 0xc70f, 0xc710 };
VECT_VAR_DECL (expected, int, 32, 2) [] = { 0xfffffff0, 0xfffffff1 };
@@ -19,13 +13,6 @@ VECT_VAR_DECL (expected, int, 16, 8) [] = { 0xff73, 0xff74, 0xff75, 0xff76,
VECT_VAR_DECL (expected, int, 32, 4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 4) = 1;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 2) = 1;
-int VECT_VAR (expected_cumulative_sat_mul, int, 16, 8) = 1;
-int VECT_VAR (expected_cumulative_sat_mul, int, 32, 4) = 1;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL (expected_mul, int, 16, 4) [] = { 0x8000, 0x8000,
0x8000, 0x8000 };
@@ -37,13 +24,6 @@ VECT_VAR_DECL (expected_mul, int, 16, 8) [] = { 0x8000, 0x8000,
VECT_VAR_DECL (expected_mul, int, 32, 4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 4) = 1;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 2) = 1;
-int VECT_VAR (expected_cumulative_sat_round, int, 16, 8) = 1;
-int VECT_VAR (expected_cumulative_sat_round, int, 32, 4) = 1;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL (expected_round, int, 16, 4) [] = { 0x8000, 0x8000,
0x8000, 0x8000 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c
index 915594a..5f4403b 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c
@@ -2,25 +2,12 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff5, 0xfff6, 0xfff7, 0xfff7 };
VECT_VAR_DECL(expected,int,32,2) [] = { 0x0, 0x0 };
VECT_VAR_DECL(expected,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR(expected_cumulative_sat_mul,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,32,4) = 1;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL(expected_mul,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected_mul,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -29,13 +16,6 @@ VECT_VAR_DECL(expected_mul,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
VECT_VAR_DECL(expected_mul,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR(expected_cumulative_sat_round,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,32,4) = 0;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL(expected_round,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected_round,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -53,21 +33,20 @@ VECT_VAR_DECL(expected_round,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
FNNAME (INSN)
{
/* vector_res = vqrdmulh(vector,vector2), then store the result. */
-#define TEST_VQRDMULH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRDMULH2(INSN, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector2, T1, W, N)); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQRDMULH1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMULH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMULH1(INSN, Q, T1, T2, W, N, CMT) \
+ TEST_VQRDMULH2(INSN, Q, T1, T2, W, N, CMT)
-#define TEST_VQRDMULH(Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMULH1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMULH(Q, T1, T2, W, N, CMT) \
+ TEST_VQRDMULH1(INSN, Q, T1, T2, W, N, CMT)
DECL_VARIABLE(vector, int, 16, 4);
@@ -99,10 +78,10 @@ FNNAME (INSN)
VDUP(vector2, q, int, s, 32, 4, 0x22);
#define CMT ""
- TEST_VQRDMULH(, int, s, 16, 4, expected_cumulative_sat, CMT);
- TEST_VQRDMULH(, int, s, 32, 2, expected_cumulative_sat, CMT);
- TEST_VQRDMULH(q, int, s, 16, 8, expected_cumulative_sat, CMT);
- TEST_VQRDMULH(q, int, s, 32, 4, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH(, int, s, 16, 4, CMT);
+ TEST_VQRDMULH(, int, s, 32, 2, CMT);
+ TEST_VQRDMULH(q, int, s, 16, 8, CMT);
+ TEST_VQRDMULH(q, int, s, 32, 4, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
@@ -121,10 +100,10 @@ FNNAME (INSN)
VDUP(vector2, q, int, s, 16, 8, 0x8000);
VDUP(vector2, q, int, s, 32, 4, 0x80000000);
- TEST_VQRDMULH(, int, s, 16, 4, expected_cumulative_sat_mul, TEST_MSG_MUL);
- TEST_VQRDMULH(, int, s, 32, 2, expected_cumulative_sat_mul, TEST_MSG_MUL);
- TEST_VQRDMULH(q, int, s, 16, 8, expected_cumulative_sat_mul, TEST_MSG_MUL);
- TEST_VQRDMULH(q, int, s, 32, 4, expected_cumulative_sat_mul, TEST_MSG_MUL);
+ TEST_VQRDMULH(, int, s, 16, 4, TEST_MSG_MUL);
+ TEST_VQRDMULH(, int, s, 32, 2, TEST_MSG_MUL);
+ TEST_VQRDMULH(q, int, s, 16, 8, TEST_MSG_MUL);
+ TEST_VQRDMULH(q, int, s, 32, 4, TEST_MSG_MUL);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
@@ -143,10 +122,10 @@ FNNAME (INSN)
VDUP(vector2, q, int, s, 16, 8, 0x8001);
VDUP(vector2, q, int, s, 32, 4, 0x80000001);
- TEST_VQRDMULH(, int, s, 16, 4, expected_cumulative_sat_round, TEST_MSG_ROUND);
- TEST_VQRDMULH(, int, s, 32, 2, expected_cumulative_sat_round, TEST_MSG_ROUND);
- TEST_VQRDMULH(q, int, s, 16, 8, expected_cumulative_sat_round, TEST_MSG_ROUND);
- TEST_VQRDMULH(q, int, s, 32, 4, expected_cumulative_sat_round, TEST_MSG_ROUND);
+ TEST_VQRDMULH(, int, s, 16, 4, TEST_MSG_ROUND);
+ TEST_VQRDMULH(, int, s, 32, 2, TEST_MSG_ROUND);
+ TEST_VQRDMULH(q, int, s, 16, 8, TEST_MSG_ROUND);
+ TEST_VQRDMULH(q, int, s, 32, 4, TEST_MSG_ROUND);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c
index 2235e74..02028cf 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c
@@ -2,12 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected,int,32,2) [] = { 0x0, 0x0 };
@@ -15,13 +9,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR(expected_cumulative_sat_mul,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,32,4) = 1;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL(expected_mul,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected_mul,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -30,13 +17,6 @@ VECT_VAR_DECL(expected_mul,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
VECT_VAR_DECL(expected_mul,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR(expected_cumulative_sat_round,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,32,4) = 0;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL(expected_round,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected_round,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -54,22 +34,21 @@ VECT_VAR_DECL(expected_round,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
FNNAME (INSN)
{
/* vector_res = vqrdmulh_lane(vector,vector2,lane), then store the result. */
-#define TEST_VQRDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_lane_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector2, T1, W, N2), \
L); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQRDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, CMT) \
+ TEST_VQRDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, CMT)
-#define TEST_VQRDMULH_LANE(Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMULH_LANE(Q, T1, T2, W, N, N2, L, CMT) \
+ TEST_VQRDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, CMT)
DECL_VARIABLE(vector, int, 16, 4);
@@ -102,10 +81,10 @@ FNNAME (INSN)
/* Choose lane arbitrarily. */
#define CMT ""
- TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat, CMT);
- TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat, CMT);
- TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat, CMT);
- TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, CMT);
+ TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, CMT);
+ TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, CMT);
+ TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
@@ -122,14 +101,10 @@ FNNAME (INSN)
VDUP(vector2, , int, s, 16, 4, 0x8000);
VDUP(vector2, , int, s, 32, 2, 0x80000000);
- TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, TEST_MSG_MUL);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
@@ -146,14 +121,10 @@ FNNAME (INSN)
/* Use input values where rounding produces a result equal to the
saturation value, but does not set the saturation flag. */
#define TEST_MSG_ROUND " (check rounding)"
- TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, TEST_MSG_ROUND);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c
index 7b43f71..fdcf7e4 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c
@@ -2,12 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,16,4) [] = { 0xfffc, 0xfffc, 0xfffc, 0xfffd };
VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffe, 0xfffffffe };
@@ -16,13 +10,6 @@ VECT_VAR_DECL(expected,int,16,8) [] = { 0x6, 0x6, 0x6, 0x5,
VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffffe, 0xfffffffe,
0xfffffffe, 0xfffffffe };
-/* Expected values of cumulative_saturation flag when multiplication
- saturates. */
-int VECT_VAR(expected_cumulative_sat_mul,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_mul,int,32,4) = 1;
-
/* Expected results when multiplication saturates. */
VECT_VAR_DECL(expected_mul,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected_mul,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -31,13 +18,6 @@ VECT_VAR_DECL(expected_mul,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
VECT_VAR_DECL(expected_mul,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
0x7fffffff, 0x7fffffff };
-/* Expected values of cumulative_saturation flag when rounding
- should not cause saturation. */
-int VECT_VAR(expected_cumulative_sat_round,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_round,int,32,4) = 0;
-
/* Expected results when rounding should not cause saturation. */
VECT_VAR_DECL(expected_round,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
VECT_VAR_DECL(expected_round,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
@@ -57,21 +37,20 @@ FNNAME (INSN)
int i;
/* vector_res = vqrdmulh_n(vector,val), then store the result. */
-#define TEST_VQRDMULH_N2(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRDMULH_N2(INSN, Q, T1, T2, W, N, L, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
L); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQRDMULH_N1(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMULH_N2(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMULH_N1(INSN, Q, T1, T2, W, N, L, CMT) \
+ TEST_VQRDMULH_N2(INSN, Q, T1, T2, W, N, L, CMT)
-#define TEST_VQRDMULH_N(Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRDMULH_N1(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRDMULH_N(Q, T1, T2, W, N, L, CMT) \
+ TEST_VQRDMULH_N1(INSN, Q, T1, T2, W, N, L, CMT)
DECL_VARIABLE(vector, int, 16, 4);
@@ -93,10 +72,10 @@ FNNAME (INSN)
/* Choose multiplier arbitrarily. */
#define CMT ""
- TEST_VQRDMULH_N(, int, s, 16, 4, 0x2233, expected_cumulative_sat, CMT);
- TEST_VQRDMULH_N(, int, s, 32, 2, 0x12345678, expected_cumulative_sat, CMT);
- TEST_VQRDMULH_N(q, int, s, 16, 8, 0xCD12, expected_cumulative_sat, CMT);
- TEST_VQRDMULH_N(q, int, s, 32, 4, 0xFA23456, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_N(, int, s, 16, 4, 0x2233, CMT);
+ TEST_VQRDMULH_N(, int, s, 32, 2, 0x12345678, CMT);
+ TEST_VQRDMULH_N(q, int, s, 16, 8, 0xCD12, CMT);
+ TEST_VQRDMULH_N(q, int, s, 32, 4, 0xFA23456, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
@@ -111,14 +90,10 @@ FNNAME (INSN)
VDUP(vector, q, int, s, 16, 8, 0x8000);
VDUP(vector, q, int, s, 32, 4, 0x80000000);
- TEST_VQRDMULH_N(, int, s, 16, 4, 0x8000, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMULH_N(, int, s, 32, 2, 0x80000000, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMULH_N(q, int, s, 16, 8, 0x8000, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
- TEST_VQRDMULH_N(q, int, s, 32, 4, 0x80000000, expected_cumulative_sat_mul,
- TEST_MSG_MUL);
+ TEST_VQRDMULH_N(, int, s, 16, 4, 0x8000, TEST_MSG_MUL);
+ TEST_VQRDMULH_N(, int, s, 32, 2, 0x80000000, TEST_MSG_MUL);
+ TEST_VQRDMULH_N(q, int, s, 16, 8, 0x8000, TEST_MSG_MUL);
+ TEST_VQRDMULH_N(q, int, s, 32, 4, 0x80000000, TEST_MSG_MUL);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
@@ -133,14 +108,10 @@ FNNAME (INSN)
VDUP(vector, q, int, s, 16, 8, 0x8000);
VDUP(vector, q, int, s, 32, 4, 0x80000000);
- TEST_VQRDMULH_N(, int, s, 16, 4, 0x8001, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMULH_N(, int, s, 32, 2, 0x80000001, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMULH_N(q, int, s, 16, 8, 0x8001, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
- TEST_VQRDMULH_N(q, int, s, 32, 4, 0x80000001, expected_cumulative_sat_round,
- TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(, int, s, 16, 4, 0x8001, TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(, int, s, 32, 2, 0x80000001, TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(q, int, s, 16, 8, 0x8001, TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(q, int, s, 32, 4, 0x80000001, TEST_MSG_ROUND);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c
index 0e194fa..f88b2fa 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c
@@ -2,24 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag with input=0. */
-int VECT_VAR(expected_cumulative_sat_0,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,64,2) = 0;
-
/* Expected results with input=0. */
VECT_VAR_DECL(expected_0,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -48,25 +30,6 @@ VECT_VAR_DECL(expected_0,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_0,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_0,uint,64,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with input=0 and
- negative shift amount. */
-int VECT_VAR(expected_cumulative_sat_0_neg,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,2) = 0;
-
/* Expected results with input=0 and negative shift amount. */
VECT_VAR_DECL(expected_0_neg,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -95,24 +58,6 @@ VECT_VAR_DECL(expected_0_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_0_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_0_neg,uint,64,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
0xe8, 0xea, 0xec, 0xee };
@@ -144,25 +89,6 @@ VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_saturation flag with negative shift
- amount. */
-int VECT_VAR(expected_cumulative_sat_neg,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,64,2) = 0;
-
/* Expected results with negative shift amount. */
VECT_VAR_DECL(expected_neg,int,8,8) [] = { 0xfc, 0xfc, 0xfd, 0xfd,
0xfd, 0xfd, 0xfe, 0xfe };
@@ -192,25 +118,6 @@ VECT_VAR_DECL(expected_neg,uint,32,4) [] = { 0x80000, 0x80000,
0x80000, 0x80000 };
VECT_VAR_DECL(expected_neg,uint,64,2) [] = { 0x100000000000, 0x100000000000 };
-/* Expected values of cumulative_saturation flag with input=max and
- shift by -1. */
-int VECT_VAR(expected_cumulative_sat_minus1,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus1,uint,64,2) = 0;
-
/* Expected results with input=max and shift by -1. */
VECT_VAR_DECL(expected_minus1,int,8,8) [] = { 0x40, 0x40, 0x40, 0x40,
0x40, 0x40, 0x40, 0x40 };
@@ -243,25 +150,6 @@ VECT_VAR_DECL(expected_minus1,uint,32,4) [] = { 0x80000000, 0x80000000,
VECT_VAR_DECL(expected_minus1,uint,64,2) [] = { 0x8000000000000000,
0x8000000000000000 };
-/* Expected values of cumulative_saturation flag with input=max and
- shift by -3. */
-int VECT_VAR(expected_cumulative_sat_minus3,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_minus3,uint,64,2) = 0;
-
/* Expected results with input=max and shift by -3. */
VECT_VAR_DECL(expected_minus3,int,8,8) [] = { 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10 };
@@ -294,25 +182,6 @@ VECT_VAR_DECL(expected_minus3,uint,32,4) [] = { 0x20000000, 0x20000000,
VECT_VAR_DECL(expected_minus3,uint,64,2) [] = { 0x2000000000000000,
0x2000000000000000 };
-/* Expected values of cumulative_saturation flag with input=max and
- large shift amount. */
-int VECT_VAR(expected_cumulative_sat_large_sh,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_large_sh,uint,64,2) = 1;
-
/* Expected results with input=max and large shift amount. */
VECT_VAR_DECL(expected_large_sh,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -351,25 +220,6 @@ VECT_VAR_DECL(expected_large_sh,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected_large_sh,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_saturation flag with negative input and
- large shift amount. */
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,64,2) = 1;
-
/* Expected results with negative input and large shift amount. */
VECT_VAR_DECL(expected_neg_large_sh,int,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80 };
@@ -411,25 +261,6 @@ VECT_VAR_DECL(expected_neg_large_sh,uint,32,4) [] = { 0xffffffff,
VECT_VAR_DECL(expected_neg_large_sh,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_saturation flag with max/min input and
- large negative shift amount. */
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,64,2) = 0;
-
/* Expected results with max/min input and large negative shift amount. */
VECT_VAR_DECL(expected_large_neg_sh,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -458,25 +289,6 @@ VECT_VAR_DECL(expected_large_neg_sh,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_large_neg_sh,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_large_neg_sh,uint,64,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with input=0 and
- large negative shift amount. */
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,64,2) = 0;
-
/* Expected results with input=0 and large negative shift amount. */
VECT_VAR_DECL(expected_0_large_neg_sh,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -514,21 +326,20 @@ VECT_VAR_DECL(expected_0_large_neg_sh,uint,64,2) [] = { 0x0, 0x0 };
FNNAME (INSN)
{
/* Basic test: v3=vqrshl(v1,v2), then store the result. */
-#define TEST_VQRSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRSHL2(INSN, T3, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector_shift, T3, W, N)); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQRSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRSHL1(INSN, T3, Q, T1, T2, W, N, CMT) \
+ TEST_VQRSHL2(INSN, T3, Q, T1, T2, W, N, CMT)
-#define TEST_VQRSHL(T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRSHL(T3, Q, T1, T2, W, N, CMT) \
+ TEST_VQRSHL1(INSN, T3, Q, T1, T2, W, N, CMT)
DECL_VARIABLE_ALL_VARIANTS(vector);
DECL_VARIABLE_ALL_VARIANTS(vector_res);
@@ -568,22 +379,22 @@ FNNAME (INSN)
VDUP(vector_shift, q, int, s, 64, 2, 64);
#define CMT " (with input = 0)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0, CMT);
@@ -615,22 +426,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (input 0 and negative shift amount)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0_neg, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0_neg, CMT);
@@ -665,22 +476,22 @@ FNNAME (INSN)
#undef CMT
#define CMT ""
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
@@ -712,22 +523,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (negative shift amount)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg, CMT);
@@ -779,22 +590,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (checking cumulative saturation: shift by -1)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_minus1, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_minus1, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_minus1, CMT);
@@ -827,22 +638,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (checking cumulative saturation: shift by -3)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_minus3, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_minus3, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_minus3, CMT);
@@ -874,22 +685,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (checking cumulative saturation: large shift amount)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_large_sh, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_large_sh, CMT);
@@ -932,22 +743,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (checking cumulative saturation: large shift amount with negative input)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg_large_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg_large_sh, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg_large_sh, CMT);
@@ -990,22 +801,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (checking cumulative saturation: large negative shift amount)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_large_neg_sh, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_large_neg_sh, CMT);
@@ -1048,22 +859,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (checking cumulative saturation: large negative shift amount with 0 input)"
- TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
- TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_large_neg_sh, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_large_neg_sh, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c
index 7bbcb85..6cf23d4 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c
@@ -2,14 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf9, 0xf9, 0xfa,
0xfa, 0xfb, 0xfb, 0xfc };
@@ -20,14 +12,6 @@ VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
-/* Expected values of cumulative_saturation flag with shift by 3. */
-int VECT_VAR(expected_cumulative_sat_sh3,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_sh3,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_sh3,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_sh3,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_sh3,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_sh3,uint,64,2) = 1;
-
/* Expected results with shift by 3. */
VECT_VAR_DECL(expected_sh3,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -38,15 +22,6 @@ VECT_VAR_DECL(expected_sh3,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
VECT_VAR_DECL(expected_sh3,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
VECT_VAR_DECL(expected_sh3,uint,32,2) [] = { 0xffffffff, 0xffffffff };
-/* Expected values of cumulative_saturation flag with shift by max
- amount. */
-int VECT_VAR(expected_cumulative_sat_shmax,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_shmax,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_shmax,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_shmax,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_shmax,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_shmax,uint,64,2) = 1;
-
/* Expected results with shift by max amount. */
VECT_VAR_DECL(expected_shmax,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -66,21 +41,20 @@ VECT_VAR_DECL(expected_shmax,uint,32,2) [] = { 0xffffffff, 0xffffffff };
FNNAME (INSN)
{
/* Basic test: y=vqrshrn_n(x,v), then store the result. */
-#define TEST_VQRSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRSHRN_N2(INSN, T1, T2, W, W2, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
VECT_VAR(vector_res, T1, W2, N) = \
INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
V); \
vst1_##T2##W2(VECT_VAR(result, T1, W2, N), \
- VECT_VAR(vector_res, T1, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W2, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQRSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRSHRN_N1(INSN, T1, T2, W, W2, N, V, CMT) \
+ TEST_VQRSHRN_N2(INSN, T1, T2, W, W2, N, V, CMT)
-#define TEST_VQRSHRN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRSHRN_N(T1, T2, W, W2, N, V, CMT) \
+ TEST_VQRSHRN_N1(INSN, T1, T2, W, W2, N, V, CMT)
/* vector is twice as large as vector_res. */
@@ -109,12 +83,12 @@ FNNAME (INSN)
/* Choose shift amount arbitrarily. */
#define CMT ""
- TEST_VQRSHRN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat, CMT);
- TEST_VQRSHRN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat, CMT);
- TEST_VQRSHRN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat, CMT);
- TEST_VQRSHRN_N(uint, u, 16, 8, 8, 2, expected_cumulative_sat, CMT);
- TEST_VQRSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat, CMT);
- TEST_VQRSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat, CMT);
+ TEST_VQRSHRN_N(int, s, 16, 8, 8, 1, CMT);
+ TEST_VQRSHRN_N(int, s, 32, 16, 4, 1, CMT);
+ TEST_VQRSHRN_N(int, s, 64, 32, 2, 2, CMT);
+ TEST_VQRSHRN_N(uint, u, 16, 8, 8, 2, CMT);
+ TEST_VQRSHRN_N(uint, u, 32, 16, 4, 3, CMT);
+ TEST_VQRSHRN_N(uint, u, 64, 32, 2, 3, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
@@ -134,12 +108,12 @@ FNNAME (INSN)
#undef CMT
#define CMT " (check saturation: shift by 3)"
- TEST_VQRSHRN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_sh3, CMT);
- TEST_VQRSHRN_N(int, s, 32, 16, 4, 3, expected_cumulative_sat_sh3, CMT);
- TEST_VQRSHRN_N(int, s, 64, 32, 2, 3, expected_cumulative_sat_sh3, CMT);
- TEST_VQRSHRN_N(uint, u, 16, 8, 8, 3, expected_cumulative_sat_sh3, CMT);
- TEST_VQRSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat_sh3, CMT);
- TEST_VQRSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat_sh3, CMT);
+ TEST_VQRSHRN_N(int, s, 16, 8, 8, 3, CMT);
+ TEST_VQRSHRN_N(int, s, 32, 16, 4, 3, CMT);
+ TEST_VQRSHRN_N(int, s, 64, 32, 2, 3, CMT);
+ TEST_VQRSHRN_N(uint, u, 16, 8, 8, 3, CMT);
+ TEST_VQRSHRN_N(uint, u, 32, 16, 4, 3, CMT);
+ TEST_VQRSHRN_N(uint, u, 64, 32, 2, 3, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_sh3, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_sh3, CMT);
@@ -152,12 +126,12 @@ FNNAME (INSN)
/* Shift by max amount. */
#undef CMT
#define CMT " (check saturation: shift by max)"
- TEST_VQRSHRN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_shmax, CMT);
- TEST_VQRSHRN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_shmax, CMT);
- TEST_VQRSHRN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_shmax, CMT);
- TEST_VQRSHRN_N(uint, u, 16, 8, 8, 8, expected_cumulative_sat_shmax, CMT);
- TEST_VQRSHRN_N(uint, u, 32, 16, 4, 16, expected_cumulative_sat_shmax, CMT);
- TEST_VQRSHRN_N(uint, u, 64, 32, 2, 32, expected_cumulative_sat_shmax, CMT);
+ TEST_VQRSHRN_N(int, s, 16, 8, 8, 8, CMT);
+ TEST_VQRSHRN_N(int, s, 32, 16, 4, 16, CMT);
+ TEST_VQRSHRN_N(int, s, 64, 32, 2, 32, CMT);
+ TEST_VQRSHRN_N(uint, u, 16, 8, 8, 8, CMT);
+ TEST_VQRSHRN_N(uint, u, 32, 16, 4, 16, CMT);
+ TEST_VQRSHRN_N(uint, u, 64, 32, 2, 32, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_shmax, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_shmax, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c
index f5e431e..5d4c493 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c
@@ -2,23 +2,12 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag with negative unput. */
-int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 1;
-
/* Expected results with negative input. */
VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with max input value
- shifted by 1. */
-int VECT_VAR(expected_cumulative_sat_max_sh1,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh1,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh1,int,64,2) = 1;
-
/* Expected results with max input value shifted by 1. */
VECT_VAR_DECL(expected_max_sh1,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff };
@@ -27,12 +16,6 @@ VECT_VAR_DECL(expected_max_sh1,uint,16,4) [] = { 0xffff, 0xffff,
VECT_VAR_DECL(expected_max_sh1,uint,32,2) [] = { 0xffffffff, 0xffffffff };
VECT_VAR_DECL(expected_max_sh1,uint,64,1) [] = { 0x3333333333333333 };
-/* Expected values of cumulative_saturation flag with max input value
- shifted by max amount. */
-int VECT_VAR(expected_cumulative_sat_max_shmax,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,int,64,2) = 0;
-
/* Expected results with max input value shifted by max amount. */
VECT_VAR_DECL(expected_max_shmax,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80 };
@@ -40,24 +23,12 @@ VECT_VAR_DECL(expected_max_shmax,uint,16,4) [] = { 0x8000, 0x8000,
0x8000, 0x8000 };
VECT_VAR_DECL(expected_max_shmax,uint,32,2) [] = { 0x80000000, 0x80000000 };
-/* Expected values of cumulative_saturation flag with min input value
- shifted by max amount. */
-int VECT_VAR(expected_cumulative_sat_min_shmax,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_min_shmax,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_min_shmax,int,64,2) = 1;
-
/* Expected results with min input value shifted by max amount. */
VECT_VAR_DECL(expected_min_shmax,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_min_shmax,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_min_shmax,uint,32,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with inputs in usual
- range. */
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results with inputs in usual range. */
VECT_VAR_DECL(expected,uint,8,8) [] = { 0x49, 0x49, 0x49, 0x49,
0x49, 0x49, 0x49, 0x49 };
@@ -73,21 +44,20 @@ VECT_VAR_DECL(expected,uint,32,2) [] = { 0xdeadbf, 0xdeadbf };
FNNAME (INSN)
{
/* Basic test: y=vqrshrun_n(x,v), then store the result. */
-#define TEST_VQRSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQRSHRUN_N2(INSN, T1, T2, W, W2, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, uint, W2, N)); \
VECT_VAR(vector_res, uint, W2, N) = \
INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
V); \
vst1_u##W2(VECT_VAR(result, uint, W2, N), \
- VECT_VAR(vector_res, uint, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, uint, W2, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQRSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRSHRUN_N1(INSN, T1, T2, W, W2, N, V, CMT) \
+ TEST_VQRSHRUN_N2(INSN, T1, T2, W, W2, N, V, CMT)
-#define TEST_VQRSHRUN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQRSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQRSHRUN_N(T1, T2, W, W2, N, V, CMT) \
+ TEST_VQRSHRUN_N1(INSN, T1, T2, W, W2, N, V, CMT)
/* vector is twice as large as vector_res. */
@@ -109,9 +79,9 @@ FNNAME (INSN)
/* Choose shift amount arbitrarily. */
#define CMT " (negative input)"
- TEST_VQRSHRUN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHRUN_N(int, s, 32, 16, 4, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQRSHRUN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 3, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 4, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 2, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
@@ -127,9 +97,9 @@ FNNAME (INSN)
/* shift by 1. */
#undef CMT
#define CMT " (check cumulative saturation: shift by 1)"
- TEST_VQRSHRUN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat_max_sh1, CMT);
- TEST_VQRSHRUN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat_max_sh1, CMT);
- TEST_VQRSHRUN_N(int, s, 64, 32, 2, 1, expected_cumulative_sat_max_sh1, CMT);
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 1, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 1, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 1, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh1, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh1, CMT);
@@ -139,9 +109,9 @@ FNNAME (INSN)
/* shift by max. */
#undef CMT
#define CMT " (check cumulative saturation: shift by max, positive input)"
- TEST_VQRSHRUN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQRSHRUN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQRSHRUN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 8, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 16, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 32, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_shmax, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_shmax, CMT);
@@ -156,9 +126,9 @@ FNNAME (INSN)
/* shift by max */
#undef CMT
#define CMT " (check cumulative saturation: shift by max, negative input)"
- TEST_VQRSHRUN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_min_shmax, CMT);
- TEST_VQRSHRUN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_min_shmax, CMT);
- TEST_VQRSHRUN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_min_shmax, CMT);
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 8, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 16, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 32, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_min_shmax, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_min_shmax, CMT);
@@ -173,9 +143,9 @@ FNNAME (INSN)
/* shift arbitrary amount. */
#undef CMT
#define CMT ""
- TEST_VQRSHRUN_N(int, s, 16, 8, 8, 6, expected_cumulative_sat, CMT);
- TEST_VQRSHRUN_N(int, s, 32, 16, 4, 7, expected_cumulative_sat, CMT);
- TEST_VQRSHRUN_N(int, s, 64, 32, 2, 8, expected_cumulative_sat, CMT);
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 6, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 7, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 8, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c
index 5999b12..7bf20d4 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c
@@ -2,23 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag with input=0. */
-int VECT_VAR(expected_cumulative_sat_0,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0,uint,64,2) = 0;
/* Expected results with input=0. */
VECT_VAR_DECL(expected_0,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
@@ -48,25 +31,6 @@ VECT_VAR_DECL(expected_0,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_0,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_0,uint,64,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with input=0 and
- negative shift amount. */
-int VECT_VAR(expected_cumulative_sat_0_neg,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,2) = 0;
-
/* Expected results with input=0 and negative shift amount. */
VECT_VAR_DECL(expected_0_neg,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -95,24 +59,6 @@ VECT_VAR_DECL(expected_0_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_0_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_0_neg,uint,64,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
0xe8, 0xea, 0xec, 0xee };
@@ -145,25 +91,6 @@ VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_sat_saturation flag with negative shift
- amount. */
-int VECT_VAR(expected_cumulative_sat_neg,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_neg,uint,64,2) = 0;
-
/* Expected results with negative shift amount. */
VECT_VAR_DECL(expected_neg,int,8,8) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
0xfa, 0xfa, 0xfb, 0xfb };
@@ -195,25 +122,6 @@ VECT_VAR_DECL(expected_neg,uint,32,4) [] = { 0x7ffff, 0x7ffff,
0x7ffff, 0x7ffff };
VECT_VAR_DECL(expected_neg,uint,64,2) [] = { 0xfffffffffff, 0xfffffffffff };
-/* Expected values of cumulative_sat_saturation flag with negative
- input and large shift amount. */
-int VECT_VAR(expected_cumulative_sat_neg_large,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg_large,uint,64,2) = 1;
-
/* Expected results with negative input and large shift amount. */
VECT_VAR_DECL(expected_neg_large,int,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80 };
@@ -252,25 +160,6 @@ VECT_VAR_DECL(expected_neg_large,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected_neg_large,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_sat_saturation flag with max input
- and shift by -1. */
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_minus1,uint,64,2) = 0;
-
/* Expected results with max input and shift by -1. */
VECT_VAR_DECL(expected_max_minus1,int,8,8) [] = { 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x3f, 0x3f };
@@ -309,25 +198,6 @@ VECT_VAR_DECL(expected_max_minus1,uint,32,4) [] = { 0x7fffffff, 0x7fffffff,
VECT_VAR_DECL(expected_max_minus1,uint,64,2) [] = { 0x7fffffffffffffff,
0x7fffffffffffffff };
-/* Expected values of cumulative_sat_saturation flag with max input
- and large shift amount. */
-int VECT_VAR(expected_cumulative_sat_max_large,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_large,uint,64,2) = 1;
-
/* Expected results with max input and large shift amount. */
VECT_VAR_DECL(expected_max_large,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -366,11 +236,6 @@ VECT_VAR_DECL(expected_max_large,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected_max_large,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_sat_saturation flag with saturation
- on 64-bits values. */
-int VECT_VAR(expected_cumulative_sat_64,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_64,int,64,2) = 1;
-
/* Expected results with saturation on 64-bits values.. */
VECT_VAR_DECL(expected_64,int,64,1) [] = { 0x8000000000000000 };
VECT_VAR_DECL(expected_64,int,64,2) [] = { 0x7fffffffffffffff,
@@ -385,21 +250,20 @@ VECT_VAR_DECL(expected_64,int,64,2) [] = { 0x7fffffffffffffff,
FNNAME (INSN)
{
/* Basic test: v3=vqshl(v1,v2), then store the result. */
-#define TEST_VQSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQSHL2(INSN, T3, Q, T1, T2, W, N, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
VECT_VAR(vector_shift, T3, W, N)); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N));
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHL1(INSN, T3, Q, T1, T2, W, N, CMT) \
+ TEST_VQSHL2(INSN, T3, Q, T1, T2, W, N, CMT)
-#define TEST_VQSHL(T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHL(T3, Q, T1, T2, W, N, CMT) \
+ TEST_VQSHL1(INSN, T3, Q, T1, T2, W, N, CMT)
DECL_VARIABLE_ALL_VARIANTS(vector);
@@ -470,22 +334,22 @@ FNNAME (INSN)
VSET_LANE(vector_shift, q, int, s, 64, 2, 1, 62);
#define CMT " (with input = 0)"
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0, CMT);
@@ -517,22 +381,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (input 0 and negative shift amount)"
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0_neg, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0_neg, CMT);
@@ -566,22 +430,22 @@ FNNAME (INSN)
#undef CMT
#define CMT ""
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
@@ -613,22 +477,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (negative shift amount)"
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg, CMT);
@@ -660,22 +524,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (large shift amount, negative input)"
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg_large, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg_large, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg_large, CMT);
@@ -725,22 +589,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (max input, shift by -1)"
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_max_minus1, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_minus1, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_minus1, CMT);
@@ -772,22 +636,22 @@ FNNAME (INSN)
#undef CMT
#define CMT " (max input, large shift amount)"
- TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_max_large, CMT);
- TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , int, s, 8, 8, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_large, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_large, CMT);
@@ -815,8 +679,8 @@ FNNAME (INSN)
#undef CMT
#define CMT " (check saturation on 64 bits)"
- TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_64, CMT);
- TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_64, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, CMT);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64, CMT);
CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_64, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c
index cb9c458..602bc36 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c
@@ -2,24 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0xc0, 0xc4, 0xc8, 0xcc,
0xd0, 0xd4, 0xd8, 0xdc };
@@ -51,24 +33,6 @@ VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_saturation flag with max positive input. */
-int VECT_VAR(expected_cumulative_sat_max,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max,uint,64,2) = 1;
-
/* Expected results with max positive input. */
VECT_VAR_DECL(expected_max,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -110,21 +74,20 @@ VECT_VAR_DECL(expected_max,uint,64,2) [] = { 0xffffffffffffffff,
FNNAME (INSN)
{
/* Basic test: v2=vqshl_n(v1,v), then store the result. */
-#define TEST_VQSHL_N2(INSN, Q, T1, T2, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQSHL_N2(INSN, Q, T1, T2, W, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
VECT_VAR(vector_res, T1, W, N) = \
INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
V); \
vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
- VECT_VAR(vector_res, T1, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQSHL_N1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHL_N2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHL_N1(INSN, T3, Q, T1, T2, W, N, CMT) \
+ TEST_VQSHL_N2(INSN, T3, Q, T1, T2, W, N, CMT)
-#define TEST_VQSHL_N(T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHL_N1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHL_N(T3, Q, T1, T2, W, N, CMT) \
+ TEST_VQSHL_N1(INSN, T3, Q, T1, T2, W, N, CMT)
DECL_VARIABLE_ALL_VARIANTS(vector);
DECL_VARIABLE_ALL_VARIANTS(vector_res);
@@ -135,23 +98,23 @@ FNNAME (INSN)
/* Choose shift amount arbitrarily. */
#define CMT ""
- TEST_VQSHL_N(, int, s, 8, 8, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, int, s, 16, 4, 1, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, int, s, 32, 2, 1, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, int, s, 64, 1, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, uint, u, 8, 8, 3, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, uint, u, 16, 4, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, uint, u, 32, 2, 3, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(, uint, u, 64, 1, 3, expected_cumulative_sat, CMT);
-
- TEST_VQSHL_N(q, int, s, 8, 16, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, int, s, 16, 8, 1, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, int, s, 32, 4, 1, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, int, s, 64, 2, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, uint, u, 8, 16, 3, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, uint, u, 16, 8, 2, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, uint, u, 32, 4, 3, expected_cumulative_sat, CMT);
- TEST_VQSHL_N(q, uint, u, 64, 2, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, int, s, 8, 8, 2, CMT);
+ TEST_VQSHL_N(, int, s, 16, 4, 1, CMT);
+ TEST_VQSHL_N(, int, s, 32, 2, 1, CMT);
+ TEST_VQSHL_N(, int, s, 64, 1, 2, CMT);
+ TEST_VQSHL_N(, uint, u, 8, 8, 3, CMT);
+ TEST_VQSHL_N(, uint, u, 16, 4, 2, CMT);
+ TEST_VQSHL_N(, uint, u, 32, 2, 3, CMT);
+ TEST_VQSHL_N(, uint, u, 64, 1, 3, CMT);
+
+ TEST_VQSHL_N(q, int, s, 8, 16, 2, CMT);
+ TEST_VQSHL_N(q, int, s, 16, 8, 1, CMT);
+ TEST_VQSHL_N(q, int, s, 32, 4, 1, CMT);
+ TEST_VQSHL_N(q, int, s, 64, 2, 2, CMT);
+ TEST_VQSHL_N(q, uint, u, 8, 16, 3, CMT);
+ TEST_VQSHL_N(q, uint, u, 16, 8, 2, CMT);
+ TEST_VQSHL_N(q, uint, u, 32, 4, 3, CMT);
+ TEST_VQSHL_N(q, uint, u, 64, 2, 3, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
@@ -191,23 +154,23 @@ FNNAME (INSN)
#undef CMT
#define CMT " (with max input)"
- TEST_VQSHL_N(, int, s, 8, 8, 2, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, int, s, 16, 4, 1, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, int, s, 32, 2, 1, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, int, s, 64, 1, 2, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, uint, u, 8, 8, 3, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, uint, u, 16, 4, 2, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, uint, u, 32, 2, 3, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(, uint, u, 64, 1, 3, expected_cumulative_sat_max, CMT);
-
- TEST_VQSHL_N(q, int, s, 8, 16, 2, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, int, s, 16, 8, 1, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, int, s, 32, 4, 1, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, int, s, 64, 2, 2, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, uint, u, 8, 16, 3, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, uint, u, 16, 8, 2, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, uint, u, 32, 4, 3, expected_cumulative_sat_max, CMT);
- TEST_VQSHL_N(q, uint, u, 64, 2, 3, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, int, s, 8, 8, 2, CMT);
+ TEST_VQSHL_N(, int, s, 16, 4, 1, CMT);
+ TEST_VQSHL_N(, int, s, 32, 2, 1, CMT);
+ TEST_VQSHL_N(, int, s, 64, 1, 2, CMT);
+ TEST_VQSHL_N(, uint, u, 8, 8, 3, CMT);
+ TEST_VQSHL_N(, uint, u, 16, 4, 2, CMT);
+ TEST_VQSHL_N(, uint, u, 32, 2, 3, CMT);
+ TEST_VQSHL_N(, uint, u, 64, 1, 3, CMT);
+
+ TEST_VQSHL_N(q, int, s, 8, 16, 2, CMT);
+ TEST_VQSHL_N(q, int, s, 16, 8, 1, CMT);
+ TEST_VQSHL_N(q, int, s, 32, 4, 1, CMT);
+ TEST_VQSHL_N(q, int, s, 64, 2, 2, CMT);
+ TEST_VQSHL_N(q, uint, u, 8, 16, 3, CMT);
+ TEST_VQSHL_N(q, uint, u, 16, 8, 2, CMT);
+ TEST_VQSHL_N(q, uint, u, 32, 4, 3, CMT);
+ TEST_VQSHL_N(q, uint, u, 64, 2, 3, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c
index a357fbe..6ebb702 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c
@@ -2,17 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag with negative
- input. */
-int VECT_VAR(expected_cumulative_sat_neg,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 1;
-
/* Expected results with negative input. */
VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -28,16 +17,6 @@ VECT_VAR_DECL(expected_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_neg,uint,64,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with shift by 1. */
-int VECT_VAR(expected_cumulative_sat_sh1,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_sh1,int,64,2) = 0;
-
/* Expected results with shift by 1. */
VECT_VAR_DECL(expected_sh1,uint,8,8) [] = { 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe };
@@ -55,16 +34,6 @@ VECT_VAR_DECL(expected_sh1,uint,32,4) [] = { 0xfffffffe, 0xfffffffe,
VECT_VAR_DECL(expected_sh1,uint,64,2) [] = { 0xfffffffffffffffe,
0xfffffffffffffffe };
-/* Expected values of cumulative_saturation flag with shift by 2. */
-int VECT_VAR(expected_cumulative_sat_sh2,int,8,8) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,16,4) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,32,2) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,8,16) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_sh2,int,64,2) = 1;
-
/* Expected results with shift by 2. */
VECT_VAR_DECL(expected_sh2,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff };
@@ -82,16 +51,6 @@ VECT_VAR_DECL(expected_sh2,uint,32,4) [] = { 0xffffffff, 0xffffffff,
VECT_VAR_DECL(expected_sh2,uint,64,2) [] = { 0xffffffffffffffff,
0xffffffffffffffff };
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,uint,8,8) [] = { 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2 };
VECT_VAR_DECL(expected,uint,16,4) [] = { 0x8, 0x8, 0x8, 0x8 };
@@ -116,21 +75,20 @@ VECT_VAR_DECL(expected,uint,64,2) [] = { 0x800, 0x800 };
FNNAME (INSN)
{
/* Basic test: v2=vqshlu_n(v1,v), then store the result. */
-#define TEST_VQSHLU_N2(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQSHLU_N2(INSN, Q, T1, T2, T3, T4, W, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T3, W, N)); \
VECT_VAR(vector_res, T3, W, N) = \
INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
V); \
vst1##Q##_##T4##W(VECT_VAR(result, T3, W, N), \
- VECT_VAR(vector_res, T3, W, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T3, W, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQSHLU_N1(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHLU_N2(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHLU_N1(INSN, Q, T1, T2, T3, T4, W, N, V, CMT) \
+ TEST_VQSHLU_N2(INSN, Q, T1, T2, T3, T4, W, N, V, CMT)
-#define TEST_VQSHLU_N(Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHLU_N1(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHLU_N(Q, T1, T2, T3, T4, W, N, V, CMT) \
+ TEST_VQSHLU_N1(INSN, Q, T1, T2, T3, T4, W, N, V, CMT)
DECL_VARIABLE_ALL_VARIANTS(vector);
@@ -151,14 +109,14 @@ FNNAME (INSN)
/* Choose shift amount arbitrarily. */
#define CMT " (negative input)"
- TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 2, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 1, expected_cumulative_sat_neg, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 2, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
@@ -184,14 +142,14 @@ FNNAME (INSN)
/* shift by 1. */
#undef CMT
#define CMT " (shift by 1)"
- TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 1, expected_cumulative_sat_sh1, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 1, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh1, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh1, CMT);
@@ -205,14 +163,14 @@ FNNAME (INSN)
/* shift by 2 to force saturation. */
#undef CMT
#define CMT " (shift by 2)"
- TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 2, expected_cumulative_sat_sh2, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 2, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh2, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh2, CMT);
@@ -237,14 +195,14 @@ FNNAME (INSN)
/* Arbitrary shift amount. */
#undef CMT
#define CMT ""
- TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 1, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 2, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 3, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 4, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 5, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 6, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 7, expected_cumulative_sat, CMT);
- TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 8, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 3, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 4, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 5, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 6, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 7, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 8, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c
index b3556f4..026a501 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c
@@ -2,14 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
-
/* Expected results. */
VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
0xfa, 0xfa, 0xfb, 0xfb };
@@ -20,15 +12,6 @@ VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
-/* Expected values of cumulative_saturation flag with max input value
- shifted by 3. */
-int VECT_VAR(expected_cumulative_sat_max_sh3,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh3,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh3,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh3,uint,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh3,uint,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh3,uint,64,2) = 1;
-
/* Expected results with max input value shifted by 3. */
VECT_VAR_DECL(expected_max_sh3,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -40,15 +23,6 @@ VECT_VAR_DECL(expected_max_sh3,uint,16,4) [] = { 0xffff, 0xffff,
0xffff, 0xffff };
VECT_VAR_DECL(expected_max_sh3,uint,32,2) [] = { 0xffffffff, 0xffffffff };
-/* Expected values of cumulative_saturation flag with max input value
- shifted by type size. */
-int VECT_VAR(expected_cumulative_sat_max_shmax,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat_max_shmax,uint,64,2) = 0;
-
/* Expected results with max input value shifted by type size. */
VECT_VAR_DECL(expected_max_shmax,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f };
@@ -70,21 +44,20 @@ VECT_VAR_DECL(expected_max_shmax,uint,32,2) [] = { 0xffffffff, 0xffffffff };
FNNAME (INSN)
{
/* Basic test: y=vqshrn_n(x,v), then store the result. */
-#define TEST_VQSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQSHRN_N2(INSN, T1, T2, W, W2, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
VECT_VAR(vector_res, T1, W2, N) = \
INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
V); \
vst1_##T2##W2(VECT_VAR(result, T1, W2, N), \
- VECT_VAR(vector_res, T1, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, T1, W2, N))
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHRN_N1(INSN, T1, T2, W, W2, N, V, CMT) \
+ TEST_VQSHRN_N2(INSN, T1, T2, W, W2, N, V, CMT)
-#define TEST_VQSHRN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHRN_N(T1, T2, W, W2, N, V, CMT) \
+ TEST_VQSHRN_N1(INSN, T1, T2, W, W2, N, V, CMT)
/* vector is twice as large as vector_res. */
@@ -113,12 +86,12 @@ FNNAME (INSN)
/* Choose shift amount arbitrarily. */
#define CMT ""
- TEST_VQSHRN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat, CMT);
- TEST_VQSHRN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat, CMT);
- TEST_VQSHRN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat, CMT);
- TEST_VQSHRN_N(uint, u, 16, 8, 8, 2, expected_cumulative_sat, CMT);
- TEST_VQSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat, CMT);
- TEST_VQSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHRN_N(int, s, 16, 8, 8, 1, CMT);
+ TEST_VQSHRN_N(int, s, 32, 16, 4, 1, CMT);
+ TEST_VQSHRN_N(int, s, 64, 32, 2, 2, CMT);
+ TEST_VQSHRN_N(uint, u, 16, 8, 8, 2, CMT);
+ TEST_VQSHRN_N(uint, u, 32, 16, 4, 3, CMT);
+ TEST_VQSHRN_N(uint, u, 64, 32, 2, 3, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
@@ -138,12 +111,12 @@ FNNAME (INSN)
#undef CMT
#define CMT " (check saturation: shift by 3)"
- TEST_VQSHRN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_max_sh3, CMT);
- TEST_VQSHRN_N(int, s, 32, 16, 4, 3, expected_cumulative_sat_max_sh3, CMT);
- TEST_VQSHRN_N(int, s, 64, 32, 2, 3, expected_cumulative_sat_max_sh3, CMT);
- TEST_VQSHRN_N(uint, u, 16, 8, 8, 3, expected_cumulative_sat_max_sh3, CMT);
- TEST_VQSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat_max_sh3, CMT);
- TEST_VQSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat_max_sh3, CMT);
+ TEST_VQSHRN_N(int, s, 16, 8, 8, 3, CMT);
+ TEST_VQSHRN_N(int, s, 32, 16, 4, 3, CMT);
+ TEST_VQSHRN_N(int, s, 64, 32, 2, 3, CMT);
+ TEST_VQSHRN_N(uint, u, 16, 8, 8, 3, CMT);
+ TEST_VQSHRN_N(uint, u, 32, 16, 4, 3, CMT);
+ TEST_VQSHRN_N(uint, u, 64, 32, 2, 3, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh3, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh3, CMT);
@@ -155,12 +128,12 @@ FNNAME (INSN)
#undef CMT
#define CMT " (check saturation: shift by max)"
- TEST_VQSHRN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQSHRN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQSHRN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQSHRN_N(uint, u, 16, 8, 8, 8, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQSHRN_N(uint, u, 32, 16, 4, 16, expected_cumulative_sat_max_shmax, CMT);
- TEST_VQSHRN_N(uint, u, 64, 32, 2, 32, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQSHRN_N(int, s, 16, 8, 8, 8, CMT);
+ TEST_VQSHRN_N(int, s, 32, 16, 4, 16, CMT);
+ TEST_VQSHRN_N(int, s, 64, 32, 2, 32, CMT);
+ TEST_VQSHRN_N(uint, u, 16, 8, 8, 8, CMT);
+ TEST_VQSHRN_N(uint, u, 32, 16, 4, 16, CMT);
+ TEST_VQSHRN_N(uint, u, 64, 32, 2, 32, CMT);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_shmax, CMT);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_shmax, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c
index ce1a3ff..c0d95e9 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c
@@ -2,10 +2,6 @@
#include "arm-neon-ref.h"
#include "compute-ref-data.h"
-/* Expected values of cumulative_saturation flag with negative input. */
-int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 1;
/* Expected results with negative input. */
VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
@@ -13,12 +9,6 @@ VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
-/* Expected values of cumulative_saturation flag with max input value
- shifted by 1. */
-int VECT_VAR(expected_cumulative_sat_max_sh1,int,16,8) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh1,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat_max_sh1,int,64,2) = 1;
-
/* Expected results with max input value shifted by 1. */
VECT_VAR_DECL(expected_max_sh1,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff };
@@ -27,11 +17,6 @@ VECT_VAR_DECL(expected_max_sh1,uint,16,4) [] = { 0xffff, 0xffff,
VECT_VAR_DECL(expected_max_sh1,uint,32,2) [] = { 0xffffffff, 0xffffffff };
VECT_VAR_DECL(expected_max_sh1,uint,64,1) [] = { 0x3333333333333333 };
-/* Expected values of cumulative_saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-
/* Expected results. */
VECT_VAR_DECL(expected,uint,8,8) [] = { 0x48, 0x48, 0x48, 0x48,
0x48, 0x48, 0x48, 0x48 };
@@ -48,21 +33,20 @@ VECT_VAR_DECL(expected,uint,32,2) [] = { 0xdeadbe, 0xdeadbe };
FNNAME (INSN)
{
/* Basic test: y=vqshrun_n(x,v), then store the result. */
-#define TEST_VQSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+#define TEST_VQSHRUN_N2(INSN, T1, T2, W, W2, N, V, CMT) \
Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, uint, W2, N)); \
VECT_VAR(vector_res, uint, W2, N) = \
INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
V); \
vst1_u##W2(VECT_VAR(result, uint, W2, N), \
- VECT_VAR(vector_res, uint, W2, N)); \
- CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+ VECT_VAR(vector_res, uint, W2, N));
/* Two auxliary macros are necessary to expand INSN */
-#define TEST_VQSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHRUN_N1(INSN, T1, T2, W, W2, N, V, CMT) \
+ TEST_VQSHRUN_N2(INSN, T1, T2, W, W2, N, V, CMT)
-#define TEST_VQSHRUN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
- TEST_VQSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+#define TEST_VQSHRUN_N(T1, T2, W, W2, N, V, CMT) \
+ TEST_VQSHRUN_N1(INSN, T1, T2, W, W2, N, V, CMT)
/* vector is twice as large as vector_res. */
@@ -84,9 +68,9 @@ FNNAME (INSN)
/* Choose shift amount arbitrarily. */
#define CMT " (negative input)"
- TEST_VQSHRUN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_neg, CMT);
- TEST_VQSHRUN_N(int, s, 32, 16, 4, 4, expected_cumulative_sat_neg, CMT);
- TEST_VQSHRUN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHRUN_N(int, s, 16, 8, 8, 3, CMT);
+ TEST_VQSHRUN_N(int, s, 32, 16, 4, 4, CMT);
+ TEST_VQSHRUN_N(int, s, 64, 32, 2, 2, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
@@ -101,9 +85,9 @@ FNNAME (INSN)
#undef CMT
#define CMT " (check cumulative saturation)"
- TEST_VQSHRUN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat_max_sh1, CMT);
- TEST_VQSHRUN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat_max_sh1, CMT);
- TEST_VQSHRUN_N(int, s, 64, 32, 2, 1, expected_cumulative_sat_max_sh1, CMT);
+ TEST_VQSHRUN_N(int, s, 16, 8, 8, 1, CMT);
+ TEST_VQSHRUN_N(int, s, 32, 16, 4, 1, CMT);
+ TEST_VQSHRUN_N(int, s, 64, 32, 2, 1, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh1, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh1, CMT);
@@ -117,9 +101,9 @@ FNNAME (INSN)
#undef CMT
#define CMT ""
- TEST_VQSHRUN_N(int, s, 16, 8, 8, 6, expected_cumulative_sat, CMT);
- TEST_VQSHRUN_N(int, s, 32, 16, 4, 7, expected_cumulative_sat, CMT);
- TEST_VQSHRUN_N(int, s, 64, 32, 2, 8, expected_cumulative_sat, CMT);
+ TEST_VQSHRUN_N(int, s, 16, 8, 8, 6, CMT);
+ TEST_VQSHRUN_N(int, s, 32, 16, 4, 7, CMT);
+ TEST_VQSHRUN_N(int, s, 64, 32, 2, 8, CMT);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqsub.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqsub.c
index 3068d4b..0efe8bc 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqsub.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqsub.c
@@ -46,24 +46,6 @@ VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffff79, 0xffffff7a,
VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffff68,
0xffffffffffffff69 };
-/* Expected values of cumulative saturation flag. */
-int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,8) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,16,4) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,32,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,8,16) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,16,8) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,32,4) = 0;
-int VECT_VAR(expected_cumulative_sat,uint,64,2) = 0;
-
/* 64-bits types, with 0 as second input. */
VECT_VAR_DECL(expected_64,int,64,1) [] = { 0xfffffffffffffff0 };
VECT_VAR_DECL(expected_64,uint,64,1) [] = { 0xfffffffffffffff0 };
@@ -71,11 +53,6 @@ VECT_VAR_DECL(expected_64,int,64,2) [] = { 0xfffffffffffffff0,
0xfffffffffffffff1 };
VECT_VAR_DECL(expected_64,uint,64,2) [] = { 0xfffffffffffffff0,
0xfffffffffffffff1 };
-int VECT_VAR(expected_cumulative_sat_64,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_64,uint,64,2) = 0;
-
/* 64-bits types, other cases. */
VECT_VAR_DECL(expected_64_2,int,64,1) [] = { 0xffffffffffffffac };
VECT_VAR_DECL(expected_64_2,uint,64,1) [] = { 0xffffffffffffff68 };
@@ -83,10 +60,6 @@ VECT_VAR_DECL(expected_64_2,int,64,2) [] = { 0xffffffffffffffac,
0xffffffffffffffad };
VECT_VAR_DECL(expected_64_2,uint,64,2) [] = { 0xffffffffffffff68,
0xffffffffffffff69 };
-int VECT_VAR(expected_cumulative_sat_64_2,int,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64_2,uint,64,1) = 0;
-int VECT_VAR(expected_cumulative_sat_64_2,int,64,2) = 0;
-int VECT_VAR(expected_cumulative_sat_64_2,uint,64,2) = 0;
/* 64-bits types, all causing cumulative saturation. */
VECT_VAR_DECL(expected_64_3,int,64,1) [] = { 0x8000000000000000 };
@@ -94,10 +67,6 @@ VECT_VAR_DECL(expected_64_3,uint,64,1) [] = { 0x0 };
VECT_VAR_DECL(expected_64_3,int,64,2) [] = { 0x7fffffffffffffff,
0x7fffffffffffffff };
VECT_VAR_DECL(expected_64_3,uint,64,2) [] = { 0x0, 0x0 };
-int VECT_VAR(expected_cumulative_sat_64_3,int,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_64_3,uint,64,1) = 1;
-int VECT_VAR(expected_cumulative_sat_64_3,int,64,2) = 1;
-int VECT_VAR(expected_cumulative_sat_64_3,uint,64,2) = 1;
/* smaller types, corner cases causing cumulative saturation. (1) */
VECT_VAR_DECL(expected_lt_64_1,int,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
@@ -115,13 +84,6 @@ VECT_VAR_DECL(expected_lt_64_1,int,16,8) [] = { 0x8000, 0x8000,
0x8000, 0x8000 };
VECT_VAR_DECL(expected_lt_64_1,int,32,4) [] = { 0x80000000, 0x80000000,
0x80000000, 0x80000000 };
-int VECT_VAR(expected_csat_lt_64_1,int,8,8) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,16,4) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,32,2) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,8,16) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,16,8) = 1;
-int VECT_VAR(expected_csat_lt_64_1,int,32,4) = 1;
-
/* smaller types, corner cases causing cumulative saturation. (2) */
VECT_VAR_DECL(expected_lt_64_2,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
@@ -134,12 +96,6 @@ VECT_VAR_DECL(expected_lt_64_2,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
VECT_VAR_DECL(expected_lt_64_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0 };
VECT_VAR_DECL(expected_lt_64_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
-int VECT_VAR(expected_csat_lt_64_2,uint,8,8) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,16,4) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,32,2) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,8,16) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,16,8) = 1;
-int VECT_VAR(expected_csat_lt_64_2,uint,32,4) = 1;
void vqsub_extras(void)
{
@@ -157,10 +113,10 @@ void vqsub_extras(void)
VDUP(vector2, q, uint, u, 64, 2, 0x0);
#define MSG "64 bits saturation when adding zero"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat_64, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat_64, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat_64, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat_64, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, MSG);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64, MSG);
CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_64, MSG);
@@ -175,10 +131,10 @@ void vqsub_extras(void)
#undef MSG
#define MSG "64 bits saturation cumulative_sat (2)"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat_64_2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, MSG);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64_2, MSG);
CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_64_2, MSG);
@@ -197,10 +153,10 @@ void vqsub_extras(void)
#undef MSG
#define MSG "64 bits saturation cumulative_sat (3)"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, expected_cumulative_sat_64_3, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, expected_cumulative_sat_64_3, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, expected_cumulative_sat_64_3, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, expected_cumulative_sat_64_3, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 64, 1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 64, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 64, 2, MSG);
CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64_3, MSG);
CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_64_3, MSG);
@@ -218,12 +174,12 @@ void vqsub_extras(void)
#undef MSG
#define MSG "less than 64 bits saturation cumulative_sat (1)"
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 8, 8, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 16, 4, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 32, 2, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, expected_csat_lt_64_1, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, expected_csat_lt_64_1, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 8, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 16, 4, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , int, s, 32, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 8, 16, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 16, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, int, s, 32, 4, MSG);
CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_lt_64_1, MSG);
CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_lt_64_1, MSG);
@@ -250,12 +206,12 @@ void vqsub_extras(void)
#undef MSG
#define MSG "less than 64 bits saturation cumulative_sat (2)"
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 8, 8, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 16, 4, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 32, 2, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 8, 16, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 16, 8, expected_csat_lt_64_2, MSG);
- TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 32, 4, expected_csat_lt_64_2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 8, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 16, 4, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, , uint, u, 32, 2, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 8, 16, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 16, 8, MSG);
+ TEST_BINARY_SAT_OP(INSN_NAME, q, uint, u, 32, 4, MSG);
CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_lt_64_2, MSG);
CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_lt_64_2, MSG);