aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJin Ma <jinma@linux.alibaba.com>2025-04-02 13:37:07 -0600
committerJeff Law <jlaw@ventanamicro.com>2025-04-02 13:37:07 -0600
commitdd6ebc0a3473a830115995bdcaf8f797ebd085a3 (patch)
tree09d3616a947cdf2790de1ec753f8d5ccb24883bb /gcc
parent3b00f8b56c1b92356da81c0d80ab40da2075d536 (diff)
downloadgcc-dd6ebc0a3473a830115995bdcaf8f797ebd085a3.zip
gcc-dd6ebc0a3473a830115995bdcaf8f797ebd085a3.tar.gz
gcc-dd6ebc0a3473a830115995bdcaf8f797ebd085a3.tar.bz2
[PATCH v2] RISC-V: Fixbug for slli + addw + zext.w into sh[123]add + zext.w
Assuming we have the following variables: unsigned long long a0, a1; unsigned int a2; For the expression: a0 = (a0 << 50) >> 49; // slli a0, a0, 50 + srli a0, a0, 49 a2 = a1 + a0; // addw a2, a1, a0 + slli a2, a2, 32 + srli a2, a2, 32 In the optimization process of ZBA (combine pass), it would be optimized to: a2 = a0 << 1 + a1; // sh1add a2, a0, a1 + zext.w a2, a2 This is clearly incorrect, as it overlooks the fact that a0=a0&0x7ffe, meaning that the bits a0[32:14] are set to zero. gcc/ChangeLog: * config/riscv/bitmanip.md: The optimization can only be applied if the high bit of operands[3] is set to 1. gcc/testsuite/ChangeLog: * gcc.target/riscv/zba-shNadd-09.c: New test. * gcc.target/riscv/zba-shNadd-10.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/bitmanip.md4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c21
3 files changed, 36 insertions, 1 deletions
diff --git a/gcc/config/riscv/bitmanip.md b/gcc/config/riscv/bitmanip.md
index b29c127..5ed5e18 100644
--- a/gcc/config/riscv/bitmanip.md
+++ b/gcc/config/riscv/bitmanip.md
@@ -80,7 +80,9 @@
(match_operand:DI 3 "consecutive_bits_operand")) 0)
(subreg:SI (match_operand:DI 4 "register_operand") 0))))]
"TARGET_64BIT && TARGET_ZBA
- && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))"
+ && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))
+ /* Ensure the mask includes all the bits in SImode. */
+ && ((INTVAL (operands[3]) & (HOST_WIDE_INT_1U << 31)) != 0)"
[(set (match_dup 0) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 4)))
(set (match_dup 0) (zero_extend:DI (subreg:SI (match_dup 0) 0)))])
diff --git a/gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c b/gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c
new file mode 100644
index 0000000..303f3cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zba -mabi=lp64" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" } } */
+
+long long sub (unsigned long long a, unsigned long long b)
+{
+ b = (b << 50) >> 49;
+ unsigned int x = a + b;
+ return x;
+}
+
+/* { dg-final { scan-assembler-not {\msh1add} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c b/gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c
new file mode 100644
index 0000000..883cce2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c
@@ -0,0 +1,21 @@
+/* { dg-do run { target { rv64 } } } */
+/* { dg-options "-march=rv64gc_zba -mabi=lp64d -O2" } */
+
+struct {
+ unsigned a : 14;
+ unsigned b : 3;
+} c;
+
+unsigned long long d;
+void e (unsigned long long *f, long p2) { *f = p2; }
+signed g;
+long i;
+
+int main () {
+ c.b = 4;
+ i = -(-c.a - (3023282U + c.a + g));
+ e (&d, i);
+ if (d != 3023282)
+ __builtin_abort ();
+ __builtin_exit (0);
+}