aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndre Vieira (lists) <andre.simoesdiasvieira@arm.com>2024-04-11 17:54:37 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2024-04-11 17:54:37 +0100
commitb87ba79200f2a727aa5c523abcc5c03fa11fc007 (patch)
tree7c580b13b385e93e5587ef3225bef50681bbb6b8
parenteec220142b95d77277238b30f4e08d41ba969e1b (diff)
downloadgcc-b87ba79200f2a727aa5c523abcc5c03fa11fc007.zip
gcc-b87ba79200f2a727aa5c523abcc5c03fa11fc007.tar.gz
gcc-b87ba79200f2a727aa5c523abcc5c03fa11fc007.tar.bz2
aarch64: Fix _BitInt testcases
This patch fixes some testisms introduced by: commit 5aa3fec38cc6f52285168b161bab1a869d864b44 Author: Andre Vieira <andre.simoesdiasvieira@arm.com> Date: Wed Apr 10 16:29:46 2024 +0100 aarch64: Add support for _BitInt The testcases were relying on an unnecessary sign-extend that is no longer generated. The tested version was just slightly behind top of trunk when the patch was committed, and the codegen had changed, for the better, by then. gcc/testsuite/ChangeLog: * gcc.target/aarch64/bitfield-bitint-abi-align16.c (g1, g8, g16, g1p, g8p, g16p): Remove unnecessary sbfx. * gcc.target/aarch64/bitfield-bitint-abi-align8.c (g1, g8, g16, g1p, g8p, g16p): Likewise.
-rw-r--r--gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c30
2 files changed, 24 insertions, 36 deletions
diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
index 3f292a4..4a228b0 100644
--- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
+++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
@@ -55,9 +55,8 @@
** g1:
** mov (x[0-9]+), x0
** mov w0, w1
-** sbfx (x[0-9]+), \1, 0, 63
-** and x4, \2, 9223372036854775807
-** and x2, \2, 1
+** and x4, \1, 9223372036854775807
+** and x2, \1, 1
** mov x3, 0
** b f1
*/
@@ -66,9 +65,8 @@
** g8:
** mov (x[0-9]+), x0
** mov w0, w1
-** sbfx (x[0-9]+), \1, 0, 63
-** and x4, \2, 9223372036854775807
-** and x2, \2, 1
+** and x4, \1, 9223372036854775807
+** and x2, \1, 1
** mov x3, 0
** b f8
*/
@@ -76,9 +74,8 @@
** g16:
** mov (x[0-9]+), x0
** mov w0, w1
-** sbfx (x[0-9]+), \1, 0, 63
-** and x4, \2, 9223372036854775807
-** and x2, \2, 1
+** and x4, \1, 9223372036854775807
+** and x2, \1, 1
** mov x3, 0
** b f16
*/
@@ -107,9 +104,8 @@
/*
** g1p:
** mov (w[0-9]+), w1
-** sbfx (x[0-9]+), x0, 0, 63
-** and x3, \2, 9223372036854775807
-** and x1, \2, 1
+** and x3, x0, 9223372036854775807
+** and x1, x0, 1
** mov x2, 0
** mov w0, \1
** b f1p
@@ -117,9 +113,8 @@
/*
** g8p:
** mov (w[0-9]+), w1
-** sbfx (x[0-9]+), x0, 0, 63
-** and x3, \2, 9223372036854775807
-** and x1, \2, 1
+** and x3, x0, 9223372036854775807
+** and x1, x0, 1
** mov x2, 0
** mov w0, \1
** b f8p
@@ -128,9 +123,8 @@
** g16p:
** mov (x[0-9]+), x0
** mov w0, w1
-** sbfx (x[0-9]+), \1, 0, 63
-** and x4, \2, 9223372036854775807
-** and x2, \2, 1
+** and x4, \1, 9223372036854775807
+** and x2, \1, 1
** mov x3, 0
** b f16p
*/
diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
index da3c235..e7f7736 100644
--- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
+++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
@@ -54,9 +54,8 @@
/*
** g1:
** mov (w[0-9]+), w1
-** sbfx (x[0-9]+), x0, 0, 63
-** and x3, \2, 9223372036854775807
-** and x1, \2, 1
+** and x3, x0, 9223372036854775807
+** and x1, x0, 1
** mov x2, 0
** mov w0, \1
** b f1
@@ -65,9 +64,8 @@
/*
** g8:
** mov (w[0-9]+), w1
-** sbfx (x[0-9]+), x0, 0, 63
-** and x3, \2, 9223372036854775807
-** and x1, \2, 1
+** and x3, x0, 9223372036854775807
+** and x1, x0, 1
** mov x2, 0
** mov w0, \1
** b f8
@@ -76,9 +74,8 @@
** g16:
** mov (x[0-9]+), x0
** mov w0, w1
-** sbfx (x[0-9]+), \1, 0, 63
-** and x4, \2, 9223372036854775807
-** and x2, \2, 1
+** and x4, \1, 9223372036854775807
+** and x2, \1, 1
** mov x3, 0
** b f16
*/
@@ -107,9 +104,8 @@
/*
** g1p:
** mov (w[0-9]+), w1
-** sbfx (x[0-9]+), x0, 0, 63
-** and x3, \2, 9223372036854775807
-** and x1, \2, 1
+** and x3, x0, 9223372036854775807
+** and x1, x0, 1
** mov x2, 0
** mov w0, \1
** b f1p
@@ -117,9 +113,8 @@
/*
** g8p:
** mov (w[0-9]+), w1
-** sbfx (x[0-9]+), x0, 0, 63
-** and x3, \2, 9223372036854775807
-** and x1, \2, 1
+** and x3, x0, 9223372036854775807
+** and x1, x0, 1
** mov x2, 0
** mov w0, \1
** b f8p
@@ -128,9 +123,8 @@
** g16p:
** mov (x[0-9]+), x0
** mov w0, w1
-** sbfx (x[0-9]+), \1, 0, 63
-** and x4, \2, 9223372036854775807
-** and x2, \2, 1
+** and x4, \1, 9223372036854775807
+** and x2, \1, 1
** mov x3, 0
** b f16p
*/