diff options
author | Kyrylo Tkachov <kyrylo.tkachov@arm.com> | 2023-06-07 15:24:46 +0100 |
---|---|---|
committer | Kyrylo Tkachov <kyrylo.tkachov@arm.com> | 2023-06-16 13:52:24 +0100 |
commit | b5ecca346f3aead5755f4b5df10d7d8c0cabd773 (patch) | |
tree | 9d338490977fd3ec379471b19ec2012b1cf40364 | |
parent | c8e9a5ced0dbe4fef6c1cefee773895a662ba868 (diff) | |
download | gcc-b5ecca346f3aead5755f4b5df10d7d8c0cabd773.zip gcc-b5ecca346f3aead5755f4b5df10d7d8c0cabd773.tar.gz gcc-b5ecca346f3aead5755f4b5df10d7d8c0cabd773.tar.bz2 |
aarch64: Handle ASHIFTRT in patterns for shrn2
Similar to the low-half patterns, we want to match both ashiftrt and
lshiftrt with the truncate for SHRN2. We reuse the SHIFTRT iterator
and the AARCH64_VALID_SHRN_OP check to help, but because we expand the
high-half patterns by their gen_* names we need to disambiguate all the
different trunc+shift combinations in the pattern name, which leads to a
slight renaming of the builtins. The AARCH64_VALID_SHRN_OP check on the
expander and the define_insns ensures that no invalid combination ends
up getting matched.
Bootstrapped and tested on aarch64-none-linux-gnu and
aarch64_be-none-elf.
gcc/ChangeLog:
* config/aarch64/aarch64-simd-builtins.def (shrn2_n): Rename builtins to...
(ushrn2_n): ... This.
(sqshrn2_n): Rename builtins to...
(ssqshrn2_n): ... This.
(uqshrn2_n): Rename builtins to...
(uqushrn2_n): ... This.
* config/aarch64/arm_neon.h (vqshrn_high_n_s16): Adjust for the above.
(vqshrn_high_n_s32): Likewise.
(vqshrn_high_n_s64): Likewise.
(vqshrn_high_n_u16): Likewise.
(vqshrn_high_n_u32): Likewise.
(vqshrn_high_n_u64): Likewise.
(vshrn_high_n_s16): Likewise.
(vshrn_high_n_s32): Likewise.
(vshrn_high_n_s64): Likewise.
(vshrn_high_n_u16): Likewise.
(vshrn_high_n_u32): Likewise.
(vshrn_high_n_u64): Likewise.
* config/aarch64/aarch64-simd.md (aarch64_<shrn_op>shrn2_n<mode>_insn_le):
Rename to...
(aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_le): ... This.
Use SHIFTRT iterator and AARCH64_VALID_SHRN_OP check.
(aarch64_<shrn_op>shrn2_n<mode>_insn_be): Rename to...
(aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_be): ... This.
Use SHIFTRT iterator and AARCH64_VALID_SHRN_OP check.
(aarch64_<shrn_op>shrn2_n<mode>): Rename to...
(aarch64_<shrn_op><sra_op>shrn2_n<mode>): ... This.
Update expander for the above.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 8 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 28 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 24 |
3 files changed, 31 insertions, 29 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 01cd85d..e2b94ad 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -263,8 +263,8 @@ BUILTIN_VQN (SHIFTIMM, shrn_n, 0, NONE) BUILTIN_VQN (USHIFTIMM, shrn_n, 0, NONE) - BUILTIN_VQN (SHIFT2IMM, shrn2_n, 0, NONE) - BUILTIN_VQN (USHIFT2IMM, shrn2_n, 0, NONE) + BUILTIN_VQN (SHIFT2IMM, ushrn2_n, 0, NONE) + BUILTIN_VQN (USHIFT2IMM, ushrn2_n, 0, NONE) BUILTIN_VQN (SHIFTIMM, rshrn_n, 0, NONE) BUILTIN_VQN (USHIFTIMM, rshrn_n, 0, NONE) @@ -480,8 +480,8 @@ BUILTIN_SD_HSDI (USHIFTIMM, uqrshrn_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM_UUSS, sqshrun2_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM_UUSS, sqrshrun2_n, 0, NONE) - BUILTIN_VQN (SHIFT2IMM, sqshrn2_n, 0, NONE) - BUILTIN_VQN (USHIFT2IMM, uqshrn2_n, 0, NONE) + BUILTIN_VQN (SHIFT2IMM, sqsshrn2_n, 0, NONE) + BUILTIN_VQN (USHIFT2IMM, uqushrn2_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM, sqrshrn2_n, 0, NONE) BUILTIN_VQN (USHIFT2IMM, uqrshrn2_n, 0, NONE) /* Implemented by aarch64_<sur>s<lr>i_n<mode>. */ diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index b31c713..cd04cbd 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -6773,49 +6773,51 @@ } ) -(define_insn "aarch64_<shrn_op>shrn2_n<mode>_insn_le" +(define_insn "aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_le" [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w") (vec_concat:<VNARROWQ2> (match_operand:<VNARROWQ> 1 "register_operand" "0") (ALL_TRUNC:<VNARROWQ> - (<TRUNC_SHIFT>:VQN + (SHIFTRT:VQN (match_operand:VQN 2 "register_operand" "w") (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>")))))] - "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "TARGET_SIMD && !BYTES_BIG_ENDIAN + && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)" "<shrn_op>shrn2\t%<vn2>0.<V2ntype>, %<v>2.<Vtype>, %3" [(set_attr "type" "neon_shift_imm_narrow_q")] ) -(define_insn "aarch64_<shrn_op>shrn2_n<mode>_insn_be" +(define_insn "aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_be" [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w") (vec_concat:<VNARROWQ2> (ALL_TRUNC:<VNARROWQ> - (<TRUNC_SHIFT>:VQN + (SHIFTRT:VQN (match_operand:VQN 2 "register_operand" "w") (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>"))) (match_operand:<VNARROWQ> 1 "register_operand" "0")))] - "TARGET_SIMD && BYTES_BIG_ENDIAN" + "TARGET_SIMD && BYTES_BIG_ENDIAN + && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)" "<shrn_op>shrn2\t%<vn2>0.<V2ntype>, %<v>2.<Vtype>, %3" [(set_attr "type" "neon_shift_imm_narrow_q")] ) -(define_expand "aarch64_<shrn_op>shrn2_n<mode>" +(define_expand "aarch64_<shrn_op><sra_op>shrn2_n<mode>" [(match_operand:<VNARROWQ2> 0 "register_operand") (match_operand:<VNARROWQ> 1 "register_operand") (ALL_TRUNC:<VNARROWQ> - (match_operand:VQN 2 "register_operand")) + (SHIFTRT:VQN (match_operand:VQN 2 "register_operand"))) (match_operand:SI 3 "aarch64_simd_shift_imm_offset_<vn_mode>")] - "TARGET_SIMD" + "TARGET_SIMD && AARCH64_VALID_SHRN_OP (<ALL_TRUNC:CODE>, <SHIFTRT:CODE>)" { operands[3] = aarch64_simd_gen_const_vector_dup (<MODE>mode, INTVAL (operands[3])); if (BYTES_BIG_ENDIAN) - emit_insn (gen_aarch64_<shrn_op>shrn2_n<mode>_insn_be (operands[0], - operands[1], operands[2], operands[3])); + emit_insn (gen_aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_be ( + operands[0], operands[1], operands[2], operands[3])); else - emit_insn (gen_aarch64_<shrn_op>shrn2_n<mode>_insn_le (operands[0], - operands[1], operands[2], operands[3])); + emit_insn (gen_aarch64_<shrn_op><sra_op>shrn2_n<mode>_insn_le ( + operands[0], operands[1], operands[2], operands[3])); DONE; } ) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index d350d9e..0ace1ee 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -5469,42 +5469,42 @@ __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) { - return __builtin_aarch64_sqshrn2_nv8hi (__a, __b, __c); + return __builtin_aarch64_sqsshrn2_nv8hi (__a, __b, __c); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) { - return __builtin_aarch64_sqshrn2_nv4si (__a, __b, __c); + return __builtin_aarch64_sqsshrn2_nv4si (__a, __b, __c); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) { - return __builtin_aarch64_sqshrn2_nv2di (__a, __b, __c); + return __builtin_aarch64_sqsshrn2_nv2di (__a, __b, __c); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) { - return __builtin_aarch64_uqshrn2_nv8hi_uuus (__a, __b, __c); + return __builtin_aarch64_uqushrn2_nv8hi_uuus (__a, __b, __c); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) { - return __builtin_aarch64_uqshrn2_nv4si_uuus (__a, __b, __c); + return __builtin_aarch64_uqushrn2_nv4si_uuus (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) { - return __builtin_aarch64_uqshrn2_nv2di_uuus (__a, __b, __c); + return __builtin_aarch64_uqushrn2_nv2di_uuus (__a, __b, __c); } __extension__ extern __inline uint8x16_t @@ -5630,42 +5630,42 @@ __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv8hi (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv8hi (__a, __b, __c); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv4si (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv4si (__a, __b, __c); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv2di (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv2di (__a, __b, __c); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv8hi_uuus (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv8hi_uuus (__a, __b, __c); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv4si_uuus (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv4si_uuus (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv2di_uuus (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv2di_uuus (__a, __b, __c); } __extension__ extern __inline poly8x8_t |