aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorPhilipp Tomsich <philipp.tomsich@vrull.eu>2022-09-14 14:16:27 +0200
committerPhilipp Tomsich <philipp.tomsich@vrull.eu>2022-11-18 21:15:24 +0100
commit787ac95917a666b3d186e2d5afec07ee5b75c6df (patch)
tree226dc225e6f03496f02c9783675e857eb0f0bb5f /gcc
parent30c2d8df173a6f3ca145cda9f9e261616fca8467 (diff)
downloadgcc-787ac95917a666b3d186e2d5afec07ee5b75c6df.zip
gcc-787ac95917a666b3d186e2d5afec07ee5b75c6df.tar.gz
gcc-787ac95917a666b3d186e2d5afec07ee5b75c6df.tar.bz2
RISC-V: Optimize slli(.uw)? + addw + zext.w into sh[123]add + zext.w
gcc/ChangeLog: * config/riscv/bitmanip.md: Handle corner-cases for combine when chaining slli(.uw)? + addw * config/riscv/riscv-protos.h (riscv_shamt_matches_mask_p): Define prototype. * config/riscv/riscv.cc (riscv_shamt_matches_mask_p): Helper for evaluating the relationship between two operands. gcc/testsuite/ChangeLog: * gcc.target/riscv/zba-shNadd-04.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/bitmanip.md49
-rw-r--r--gcc/config/riscv/riscv-protos.h1
-rw-r--r--gcc/config/riscv/riscv.cc9
-rw-r--r--gcc/testsuite/gcc.target/riscv/zba-shNadd-04.c23
4 files changed, 82 insertions, 0 deletions
diff --git a/gcc/config/riscv/bitmanip.md b/gcc/config/riscv/bitmanip.md
index 73881a9..2f89fd6 100644
--- a/gcc/config/riscv/bitmanip.md
+++ b/gcc/config/riscv/bitmanip.md
@@ -56,6 +56,55 @@
[(set (match_dup 5) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
(set (match_dup 0) (sign_extend:DI (div:SI (subreg:SI (match_dup 5) 0) (subreg:SI (match_dup 4) 0))))])
+; Zba does not provide W-forms of sh[123]add(.uw)?, which leads to an
+; interesting irregularity: we can generate a signed 32-bit result
+; using slli(.uw)?+ addw, but a unsigned 32-bit result can be more
+; efficiently be generated as sh[123]add+zext.w (the .uw can be
+; dropped, if we zero-extend the output anyway).
+;
+; To enable this optimization, we split [ slli(.uw)?, addw, zext.w ]
+; into [ sh[123]add, zext.w ] for use during combine.
+(define_split
+ [(set (match_operand:DI 0 "register_operand")
+ (zero_extend:DI (plus:SI (ashift:SI (subreg:SI (match_operand:DI 1 "register_operand") 0)
+ (match_operand:QI 2 "imm123_operand"))
+ (subreg:SI (match_operand:DI 3 "register_operand") 0))))]
+ "TARGET_64BIT && TARGET_ZBA"
+ [(set (match_dup 0) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 0) (zero_extend:DI (subreg:SI (match_dup 0) 0)))])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand")
+ (zero_extend:DI (plus:SI (subreg:SI (and:DI (ashift:DI (match_operand:DI 1 "register_operand")
+ (match_operand:QI 2 "imm123_operand"))
+ (match_operand:DI 3 "consecutive_bits_operand")) 0)
+ (subreg:SI (match_operand:DI 4 "register_operand") 0))))]
+ "TARGET_64BIT && TARGET_ZBA
+ && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))"
+ [(set (match_dup 0) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 4)))
+ (set (match_dup 0) (zero_extend:DI (subreg:SI (match_dup 0) 0)))])
+
+; Make sure that an andi followed by a sh[123]add remains a two instruction
+; sequence--and is not torn apart into slli, slri, add.
+(define_insn_and_split "*andi_add.uw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:QI 2 "imm123_operand" "Ds3"))
+ (match_operand:DI 3 "consecutive_bits_operand" ""))
+ (match_operand:DI 4 "register_operand" "r")))
+ (clobber (match_scratch:DI 5 "=&r"))]
+ "TARGET_64BIT && TARGET_ZBA
+ && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))
+ && SMALL_OPERAND (INTVAL (operands[3]) >> INTVAL (operands[2]))"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 5) (and:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:DI (ashift:DI (match_dup 5) (match_dup 2))
+ (match_dup 4)))]
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2]));
+})
+
(define_insn "*shNadduw"
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index 5a718bb..2ec3af0 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -77,6 +77,7 @@ extern bool riscv_gpr_save_operation_p (rtx);
extern void riscv_reinit (void);
extern poly_uint64 riscv_regmode_natural_size (machine_mode);
extern bool riscv_v_ext_vector_mode_p (machine_mode);
+extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT);
/* Routines implemented in riscv-c.cc. */
void riscv_cpu_cpp_builtins (cpp_reader *);
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index d459851..7ec4ce9 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -6772,6 +6772,15 @@ riscv_dwarf_poly_indeterminate_value (unsigned int i, unsigned int *factor,
return RISCV_DWARF_VLENB;
}
+/* Return true if a shift-amount matches the trailing cleared bits on
+ a bitmask. */
+
+bool
+riscv_shamt_matches_mask_p (int shamt, HOST_WIDE_INT mask)
+{
+ return shamt == ctz_hwi (mask);
+}
+
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
diff --git a/gcc/testsuite/gcc.target/riscv/zba-shNadd-04.c b/gcc/testsuite/gcc.target/riscv/zba-shNadd-04.c
new file mode 100644
index 0000000..abed149
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zba-shNadd-04.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zba -mabi=lp64" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" } } */
+
+long long sub1(unsigned long long a, unsigned long long b)
+{
+ b = (b << 32) >> 31;
+ unsigned int x = a + b;
+ return x;
+}
+
+long long sub2(unsigned long long a, unsigned long long b)
+{
+ return (unsigned int)(a + (b << 1));
+}
+
+long long sub3(unsigned long long a, unsigned long long b)
+{
+ return (a + (b << 1)) & ~0u;
+}
+
+/* { dg-final { scan-assembler-times "sh1add" 3 } } */
+/* { dg-final { scan-assembler-times "zext.w\t" 3 } } */