aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorJiong Wang <jiong.wang@arm.com>2016-05-17 16:39:39 +0000
committerJiong Wang <jiwang@gcc.gnu.org>2016-05-17 16:39:39 +0000
commitdaab45621346c95196ce84fa3b9b9301bfe7e5a9 (patch)
treecdb3cb878c699d53fb3399281f591dda61194fcb /gcc/config
parent22330033389ee5b16f5923ace84ad551bd778adf (diff)
downloadgcc-daab45621346c95196ce84fa3b9b9301bfe7e5a9.zip
gcc-daab45621346c95196ce84fa3b9b9301bfe7e5a9.tar.gz
gcc-daab45621346c95196ce84fa3b9b9301bfe7e5a9.tar.bz2
[AArch64, 3/4] Reimplement multiply by element to get rid of inline assembly
gcc/ * config/aarch64/aarch64-simd.md (vmul_n_f32): Remove inline assembly. Use builtin. (vmul_n_s16): Likewise. (vmul_n_s32): Likewise. (vmul_n_u16): Likewise. (vmul_n_u32): Likewise. (vmulq_n_f32): Likewise. (vmulq_n_f64): Likewise. (vmulq_n_s16): Likewise. (vmulq_n_s32): Likewise. (vmulq_n_u16): Likewise. (vmulq_n_u32): Likewise. gcc/testsuite/ * gcc.target/aarch64/simd/vmul_elem_1.c: Use intrinsics. From-SVN: r236333
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/aarch64/arm_neon.h189
1 files changed, 68 insertions, 121 deletions
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index ca7ace5..84931ae 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7938,61 +7938,6 @@ vmovn_u64 (uint64x2_t a)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmul_n_f32 (float32x2_t a, float32_t b)
-{
- float32x2_t result;
- __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmul_n_s16 (int16x4_t a, int16_t b)
-{
- int16x4_t result;
- __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmul_n_s32 (int32x2_t a, int32_t b)
-{
- int32x2_t result;
- __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmul_n_u16 (uint16x4_t a, uint16_t b)
-{
- uint16x4_t result;
- __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmul_n_u32 (uint32x2_t a, uint32_t b)
-{
- uint32x2_t result;
- __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
#define vmull_high_lane_s16(a, b, c) \
__extension__ \
({ \
@@ -8443,72 +8388,6 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmulq_n_f32 (float32x4_t a, float32_t b)
-{
- float32x4_t result;
- __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmulq_n_f64 (float64x2_t a, float64_t b)
-{
- float64x2_t result;
- __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmulq_n_s16 (int16x8_t a, int16_t b)
-{
- int16x8_t result;
- __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmulq_n_s32 (int32x4_t a, int32_t b)
-{
- int32x4_t result;
- __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmulq_n_u16 (uint16x8_t a, uint16_t b)
-{
- uint16x8_t result;
- __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmulq_n_u32 (uint32x4_t a, uint32_t b)
-{
- uint32x4_t result;
- __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vmvn_p8 (poly8x8_t a)
{
@@ -18924,6 +18803,74 @@ vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
return __a * __aarch64_vget_lane_any (__b, __lane);
}
+/* vmul_n. */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_n_f64 (float64x2_t __a, float64_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __a * __b;
+}
+
/* vneg */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))