aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>2023-06-06 23:42:48 +0100
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2023-06-16 13:52:23 +0100
commit207db5d92f9cc533627c6bd5b3ebae9128b49741 (patch)
tree522b077913e87a42d5baafd7c33265bc7111ba08 /gcc/config
parentd20b2ad845876eec0ee80a3933ad49f9f6c4ee30 (diff)
downloadgcc-207db5d92f9cc533627c6bd5b3ebae9128b49741.zip
gcc-207db5d92f9cc533627c6bd5b3ebae9128b49741.tar.gz
gcc-207db5d92f9cc533627c6bd5b3ebae9128b49741.tar.bz2
aarch64: Add ASHIFTRT handling for shrn pattern
The first patch in the series has some fallout in the testsuite, particularly gcc.target/aarch64/shrn-combine-2.c. Our previous patterns for SHRN matched both (truncate (ashiftrt (x) (N))) and (truncate (lshiftrt (x) (N)) as these are equivalent for the shift amounts involved. In our refactoring, however, we mapped shrn to truncate+lshiftrt. The fix here is to iterate over ashiftrt,lshiftrt in the pattern for it. However, we don't want to allow ashiftrt for us_truncate or lshiftrt for ss_truncate from the ALL_TRUNC iterator. This patch addds a AARCH64_VALID_SHRN_OP helper to gate the valid combinations of truncations and shifts. Bootstrapped and tested on aarch64-none-linux-gnu and aarch64_be-none-elf. gcc/ChangeLog: * config/aarch64/aarch64.h (AARCH64_VALID_SHRN_OP): Define. * config/aarch64/aarch64-simd.md (*aarch64_<shrn_op>shrn_n<mode>_insn<vczle><vczbe>): Rename to... (*aarch64_<shrn_op><shrn_s>shrn_n<mode>_insn<vczle><vczbe>): ... This. Use SHIFTRT iterator and add AARCH64_VALID_SHRN_OP to condition. * config/aarch64/iterators.md (shrn_s): New code attribute.
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/aarch64/aarch64-simd.md6
-rw-r--r--gcc/config/aarch64/aarch64.h5
-rw-r--r--gcc/config/aarch64/iterators.md2
3 files changed, 10 insertions, 3 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index bbb5434..ce5885e 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -6665,13 +6665,13 @@
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
-(define_insn "*aarch64_<shrn_op>shrn_n<mode>_insn<vczle><vczbe>"
+(define_insn "*aarch64_<shrn_op><shrn_s>shrn_n<mode>_insn<vczle><vczbe>"
[(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
(ALL_TRUNC:<VNARROWQ>
- (<TRUNC_SHIFT>:VQN
+ (SHIFTRT:VQN
(match_operand:VQN 1 "register_operand" "w")
(match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>"))))]
- "TARGET_SIMD"
+ "TARGET_SIMD && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)"
"<shrn_op>shrn\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 801f9eb..a01f1ee 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -1297,4 +1297,9 @@ extern poly_uint16 aarch64_sve_vg;
#define REG_ALLOC_ORDER {}
#define ADJUST_REG_ALLOC_ORDER aarch64_adjust_reg_alloc_order ()
+#define AARCH64_VALID_SHRN_OP(T,S) \
+((T) == TRUNCATE \
+ || ((T) == US_TRUNCATE && (S) == LSHIFTRT) \
+ || ((T) == SS_TRUNCATE && (S) == ASHIFTRT))
+
#endif /* GCC_AARCH64_H */
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index acc7a3e..15436c8 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -2398,6 +2398,8 @@
;; op prefix for shift right and narrow.
(define_code_attr srn_op [(ashiftrt "r") (lshiftrt "")])
+(define_code_attr shrn_s [(ashiftrt "s") (lshiftrt "")])
+
;; Map shift operators onto underlying bit-field instructions
(define_code_attr bfshift [(ashift "ubfiz") (ashiftrt "sbfx")
(lshiftrt "ubfx") (rotatert "extr")])