aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJeff Law <jlaw@ventanamicro.com>2024-05-14 18:17:59 -0600
committerJeff Law <jlaw@ventanamicro.com>2024-05-14 18:18:48 -0600
commit32ff344d57d56fddb66c4976b5651345d40b7157 (patch)
tree18ec440e6b39e5aaafdd3063437f42845d62253f /gcc
parent3700bd68d1b01f0fe6d15f8a40b7fdca0904d5aa (diff)
downloadgcc-32ff344d57d56fddb66c4976b5651345d40b7157.zip
gcc-32ff344d57d56fddb66c4976b5651345d40b7157.tar.gz
gcc-32ff344d57d56fddb66c4976b5651345d40b7157.tar.bz2
[to-be-committed,RISC-V] Remove redundant AND in shift-add sequence
So this patch allows us to eliminate an redundant AND in some shift-add style sequences. I think the testcase was reduced from xz by the RAU team, but I'm not highly confident of that. Specifically the AND is masking off the upper 32 bits of the un-shifted value and there's an outer SIGN_EXTEND from SI to DI. However in the RTL it's working on the post-shifted value, so the constant is left shifted, so we have to account for that in the pattern's condition. We can just drop the AND in this case. So instead we do a 64bit shift, then a sign extending ADD utilizing the low part of that 64bit shift result. This has run through Ventana's CI as well as my own. I'll wait for it to run through the larger CI system before pushing. Jeff gcc/ * config/riscv/riscv.md: Add pattern for sign extended shift-add sequence with a masked input. gcc/testsuite * gcc.target/riscv/shift-add-2.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/riscv.md25
-rw-r--r--gcc/testsuite/gcc.target/riscv/shift-add-2.c16
2 files changed, 41 insertions, 0 deletions
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 893040f..ee15c63 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -4120,6 +4120,31 @@
[(set_attr "type" "load")
(set (attr "length") (const_int 8))])
+;; The AND is redunant here. It always turns off the high 32 bits and the
+;; low number of bits equal to the shift count. Those upper 32 bits will be
+;; reset by the SIGN_EXTEND at the end.
+;;
+;; One could argue combine should have realized this and simplified what it
+;; presented to the backend. But we can obviously cope with what it gave us.
+(define_insn_and_split ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (plus:SI (subreg:SI
+ (and:DI
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))
+ (match_operand 3 "const_int_operand" "n")) 0)
+ (match_operand:SI 4 "register_operand" "r"))))
+ (clobber (match_scratch:DI 5 "=&r"))]
+ "TARGET_64BIT
+ && (INTVAL (operands[3]) | ((1 << INTVAL (operands[2])) - 1)) == 0xffffffff"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 5) (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 6) (match_dup 4))))]
+ "{ operands[6] = gen_lowpart (SImode, operands[5]); }"
+ [(set_attr "type" "arith")])
+
(include "bitmanip.md")
(include "crypto.md")
(include "sync.md")
diff --git a/gcc/testsuite/gcc.target/riscv/shift-add-2.c b/gcc/testsuite/gcc.target/riscv/shift-add-2.c
new file mode 100644
index 0000000..8743985
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/shift-add-2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zba_zbb_zbs -mabi=lp64" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" } } */
+
+int sub2(int a, long long b) {
+ b = (b << 32) >> 31;
+ unsigned int x = a + b;
+ return x;
+}
+
+
+/* { dg-final { scan-assembler-times "\tslli\t" 1 } } */
+/* { dg-final { scan-assembler-times "\taddw\t" 1 } } */
+/* { dg-final { scan-assembler-not "\tsrai\t" } } */
+/* { dg-final { scan-assembler-not "\tsh.add\t" } } */
+