diff options
author | Jonathan Wright <jonathan.wright@arm.com> | 2021-02-12 15:37:05 +0000 |
---|---|---|
committer | Jonathan Wright <jonathan.wright@arm.com> | 2021-04-28 21:13:07 +0100 |
commit | 4362c9c88d9092a6585cd061e5535cb2f0453d13 (patch) | |
tree | 189ff14e470d24fbca113bfed733a8c4f5900952 | |
parent | ffb112289452f58fbf00a4e57c0d7de930aca6b1 (diff) | |
download | gcc-4362c9c88d9092a6585cd061e5535cb2f0453d13.zip gcc-4362c9c88d9092a6585cd061e5535cb2f0453d13.tar.gz gcc-4362c9c88d9092a6585cd061e5535cb2f0453d13.tar.bz2 |
aarch64: Use RTL builtins for v[q]tbx intrinsics
Rewrite v[q]tbx Neon intrinsics to use RTL builtins rather than
inline assembly code, allowing for better scheduling and
optimization.
gcc/ChangeLog:
2021-02-12 Jonathan Wright <jonathan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add tbx1 builtin
generator macros.
* config/aarch64/aarch64-simd.md (aarch64_tbx1<mode>):
Define.
* config/aarch64/arm_neon.h (vqtbx1_s8): USE RTL builtin
instead of inline asm.
(vqtbx1_u8): Likewise.
(vqtbx1_p8): Likewise.
(vqtbx1q_s8): Likewise.
(vqtbx1q_u8): Likewise.
(vqtbx1q_p8): Likewise.
(vtbx2_s8): Likewise.
(vtbx2_u8): Likewise.
(vtbx2_p8): Likewise.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 4 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 11 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 69 |
3 files changed, 30 insertions, 54 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 04b392b..a7d4f2b 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -708,6 +708,10 @@ VAR1 (BINOP, qtbl4, 0, NONE, v8qi) VAR1 (BINOP, qtbl4, 0, NONE, v16qi) + /* Implemented by aarch64_tbx1<mode>. */ + VAR2 (TERNOP, tbx1, 0, NONE, v8qi, v16qi) + VAR2 (TERNOPU, tbx1, 0, NONE, v8qi, v16qi) + /* Implemented by aarch64_tbx4<mode>. */ VAR1 (TERNOP, tbx4, 0, NONE, v8qi) VAR1 (TERNOP, tbx4, 0, NONE, v16qi) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 565ce5a..299d911 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -6852,6 +6852,17 @@ [(set_attr "type" "neon_tbl1<q>")] ) +(define_insn "aarch64_tbx1<mode>" + [(set (match_operand:VB 0 "register_operand" "=w") + (unspec:VB [(match_operand:VB 1 "register_operand" "0") + (match_operand:V16QI 2 "register_operand" "w") + (match_operand:VB 3 "register_operand" "w")] + UNSPEC_TBX))] + "TARGET_SIMD" + "tbx\\t%0.<Vtype>, {%2.16b}, %3.<Vtype>" + [(set_attr "type" "neon_tbl1<q>")] +) + ;; Two source registers. (define_insn "aarch64_tbl2v16qi" diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 0817129..ead2bd0 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -9625,72 +9625,46 @@ __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx) { - int8x8_t __result = __r; - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi (__r, __tab, (int8x8_t) __idx); } __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx) { - uint8x8_t __result = __r; - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi_uuuu (__r, __tab, __idx); } __extension__ extern __inline poly8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx) { - poly8x8_t __result = __r; - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r, + (int8x16_t) __tab, + (int8x8_t) __idx); } __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx) { - int8x16_t __result = __r; - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v16qi (__r, __tab, (int8x16_t) __idx); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx) { - uint8x16_t __result = __r; - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v16qi_uuuu (__r, __tab, __idx); } __extension__ extern __inline poly8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx) { - poly8x16_t __result = __r; - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return (poly8x16_t) __builtin_aarch64_tbx1v16qi ((int8x16_t) __r, + (int8x16_t) __tab, + (int8x16_t) __idx); } /* V7 legacy table intrinsics. */ @@ -9854,39 +9828,26 @@ __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx) { - int8x8_t __result = __r; int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]); - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" - : "+w"(__result) - : "w"(__temp), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi (__r, __temp, __idx); } __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx) { - uint8x8_t __result = __r; uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]); - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" - : "+w"(__result) - : "w"(__temp), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi_uuuu (__r, __temp, __idx); } __extension__ extern __inline poly8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx) { - poly8x8_t __result = __r; poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]); - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" - : "+w"(__result) - : "w"(__temp), "w"(__idx) - : /* No clobbers */); - return __result; + return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r, + (int8x16_t) __temp, + (int8x8_t) __idx); } /* End of temporary inline asm. */ |