aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>2021-01-12 10:07:19 +0000
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2021-01-14 08:36:19 +0000
commitc7f1ff01a2342ffd2873528018e5c3650b791d7e (patch)
tree26df14b4c978d2685b68214e74c7670a2ed3d3c4
parentbe0851b8e934dfe95881f97dcf98518f92e7508c (diff)
downloadgcc-c7f1ff01a2342ffd2873528018e5c3650b791d7e.zip
gcc-c7f1ff01a2342ffd2873528018e5c3650b791d7e.tar.gz
gcc-c7f1ff01a2342ffd2873528018e5c3650b791d7e.tar.bz2
aarch64: Reimplement vmovn_high_* intrinsics using builtins
The vmovn_high* intrinsics are supposed to map to XTN2 instructions that narrow their source vector and instert it into the top half of the destination vector. This patch reimplements them away from inline assembly to an RTL builtin that performs a vec_concat with a truncate. gcc/ * config/aarch64/aarch64-simd.md (aarch64_xtn2<mode>_le): Define. (aarch64_xtn2<mode>_be): Likewise. (aarch64_xtn2<mode>): Likewise. * config/aarch64/aarch64-simd-builtins.def (xtn2): Define builtins. * config/aarch64/arm_neon.h (vmovn_high_s16): Reimplement using builtins. (vmovn_high_s32): Likewise. (vmovn_high_s64): Likewise. (vmovn_high_u16): Likewise. (vmovn_high_u32): Likewise. (vmovn_high_u64): Likewise. gcc/testsuite/ * gcc.target/aarch64/narrow_high-intrinsics.c: Adjust scan-assembler-times for xtn2.
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def3
-rw-r--r--gcc/config/aarch64/aarch64-simd.md36
-rw-r--r--gcc/config/aarch64/arm_neon.h45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c2
4 files changed, 49 insertions, 37 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 0d611878..6efc7706 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -315,6 +315,9 @@
BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0, ALL)
BUILTIN_VSDQ_I (USHIFTIMM, uqshl_n, 0, ALL)
+ /* Implemented by aarch64_xtn2<mode>. */
+ BUILTIN_VQN (UNOP, xtn2, 0, NONE)
+
/* Implemented by aarch64_reduc_plus_<mode>. */
BUILTIN_VALL (UNOP, reduc_plus_scal_, 10, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index b6629bf..6520968 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -7271,6 +7271,42 @@
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
+(define_insn "aarch64_xtn2<mode>_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "xtn2\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_xtn2<mode>_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "xtn2\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_xtn2<mode>"
+ [(match_operand:<VNARROWQ2> 0 "register_operand")
+ (match_operand:<VNARROWQ> 1 "register_operand")
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))]
+ "TARGET_SIMD"
+ {
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+)
+
(define_insn "aarch64_bfdot<mode>"
[(set (match_operand:VDQSF 0 "register_operand" "=w")
(plus:VDQSF
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index d3a81da..b2a6b58 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8751,72 +8751,45 @@ __extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s16 (int8x8_t __a, int16x8_t __b)
{
- int8x16_t __result = vcombine_s8 (__a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.16b,%1.8h"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_xtn2v8hi (__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s32 (int16x4_t __a, int32x4_t __b)
{
- int16x8_t __result = vcombine_s16 (__a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.8h,%1.4s"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_xtn2v4si (__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s64 (int32x2_t __a, int64x2_t __b)
{
- int32x4_t __result = vcombine_s32 (__a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.4s,%1.2d"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_xtn2v2di (__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u16 (uint8x8_t __a, uint16x8_t __b)
{
- uint8x16_t __result = vcombine_u8 (__a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.16b,%1.8h"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (uint8x16_t)
+ __builtin_aarch64_xtn2v8hi ((int8x8_t) __a, (int16x8_t) __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u32 (uint16x4_t __a, uint32x4_t __b)
{
- uint16x8_t __result = vcombine_u16 (__a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.8h,%1.4s"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (uint16x8_t)
+ __builtin_aarch64_xtn2v4si ((int16x4_t) __a, (int32x4_t) __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u64 (uint32x2_t __a, uint64x2_t __b)
{
- uint32x4_t __result = vcombine_u32 (__a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.4s,%1.2d"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (uint32x4_t)
+ __builtin_aarch64_xtn2v2di ((int32x2_t) __a, (int64x2_t) __b);
}
__extension__ extern __inline int8x8_t
diff --git a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c
index 07d7803..a2e0cb9 100644
--- a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c
+++ b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c
@@ -122,4 +122,4 @@ ONE (vmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
/* { dg-final { scan-assembler-times "uqxtn2 v" 3} } */
/* { dg-final { scan-assembler-times "sqxtn2 v" 3} } */
/* { dg-final { scan-assembler-times "sqxtun2 v" 3} } */
-/* { dg-final { scan-assembler-times "\\txtn2 v" 6} } */
+/* { dg-final { scan-assembler-times "\\txtn2\\tv" 6} } */