aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-08-12 12:27:15 +0100
committerJonathan Wright <jonathan.wright@arm.com>2021-08-17 11:44:07 +0100
commit5ed35a9874ba8c3aa2bbbd720e46783db264b684 (patch)
tree8568129962073bbe2e6832147eac9722e92dea2d /gcc
parent92aadbd593c1aef6798e7a64b8f7a91fed32aa68 (diff)
downloadgcc-5ed35a9874ba8c3aa2bbbd720e46783db264b684.zip
gcc-5ed35a9874ba8c3aa2bbbd720e46783db264b684.tar.gz
gcc-5ed35a9874ba8c3aa2bbbd720e46783db264b684.tar.bz2
aarch64: Remove macros for vld2[q]_lane Neon intrinsics
Remove macros for vld2[q]_lane Neon intrinsics. This is a preparatory step before adding new modes for structures of Advanced SIMD vectors. gcc/ChangeLog: 2021-08-12 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/arm_neon.h (__LD2_LANE_FUNC): Delete. (__LD2Q_LANE_FUNC): Likewise. (vld2_lane_u8): Define without macro. (vld2_lane_u16): Likewise. (vld2_lane_u32): Likewise. (vld2_lane_u64): Likewise. (vld2_lane_s8): Likewise. (vld2_lane_s16): Likewise. (vld2_lane_s32): Likewise. (vld2_lane_s64): Likewise. (vld2_lane_f16): Likewise. (vld2_lane_f32): Likewise. (vld2_lane_f64): Likewise. (vld2_lane_p8): Likewise. (vld2_lane_p16): Likewise. (vld2_lane_p64): Likewise. (vld2q_lane_u8): Likewise. (vld2q_lane_u16): Likewise. (vld2q_lane_u32): Likewise. (vld2q_lane_u64): Likewise. (vld2q_lane_s8): Likewise. (vld2q_lane_s16): Likewise. (vld2q_lane_s32): Likewise. (vld2q_lane_s64): Likewise. (vld2q_lane_f16): Likewise. (vld2q_lane_f32): Likewise. (vld2q_lane_f64): Likewise. (vld2q_lane_p8): Likewise. (vld2q_lane_p16): Likewise. (vld2q_lane_p64): Likewise. (vld2_lane_bf16): Likewise. (vld2q_lane_bf16): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/arm_neon.h558
1 files changed, 474 insertions, 84 deletions
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 390cf9a..91c072f 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -19882,92 +19882,455 @@ vld4q_dup_p64 (const poly64_t * __a)
/* vld2_lane */
-#define __LD2_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
- qmode, ptrmode, funcsuffix, signedtype) \
-__extension__ extern __inline intype \
-__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
-vld2_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
-{ \
- __builtin_aarch64_simd_oi __o; \
- largetype __temp; \
- __temp.val[0] = \
- vcombine_##funcsuffix (__b.val[0], vcreate_##funcsuffix (0)); \
- __temp.val[1] = \
- vcombine_##funcsuffix (__b.val[1], vcreate_##funcsuffix (0)); \
- __o = __builtin_aarch64_set_qregoi##qmode (__o, \
- (signedtype) __temp.val[0], \
- 0); \
- __o = __builtin_aarch64_set_qregoi##qmode (__o, \
- (signedtype) __temp.val[1], \
- 1); \
- __o = __builtin_aarch64_ld2_lane##mode ( \
- (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
- __b.val[0] = (vectype) __builtin_aarch64_get_dregoidi (__o, 0); \
- __b.val[1] = (vectype) __builtin_aarch64_get_dregoidi (__o, 1); \
- return __b; \
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_u8 (const uint8_t * __ptr, uint8x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint8x16x2_t __temp;
+ __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (0));
+ __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (0));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8qi (
+ (__builtin_aarch64_simd_qi *) __ptr, __o, __c);
+ __b.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
}
-__LD2_LANE_FUNC (float16x4x2_t, float16x4_t, float16x8x2_t, float16_t, v4hf,
- v8hf, hf, f16, float16x8_t)
-__LD2_LANE_FUNC (float32x2x2_t, float32x2_t, float32x4x2_t, float32_t, v2sf, v4sf,
- sf, f32, float32x4_t)
-__LD2_LANE_FUNC (float64x1x2_t, float64x1_t, float64x2x2_t, float64_t, df, v2df,
- df, f64, float64x2_t)
-__LD2_LANE_FUNC (poly8x8x2_t, poly8x8_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi, p8,
- int8x16_t)
-__LD2_LANE_FUNC (poly16x4x2_t, poly16x4_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi,
- p16, int16x8_t)
-__LD2_LANE_FUNC (poly64x1x2_t, poly64x1_t, poly64x2x2_t, poly64_t, di,
- v2di_ssps, di, p64, poly64x2_t)
-__LD2_LANE_FUNC (int8x8x2_t, int8x8_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
- int8x16_t)
-__LD2_LANE_FUNC (int16x4x2_t, int16x4_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
- int16x8_t)
-__LD2_LANE_FUNC (int32x2x2_t, int32x2_t, int32x4x2_t, int32_t, v2si, v4si, si, s32,
- int32x4_t)
-__LD2_LANE_FUNC (int64x1x2_t, int64x1_t, int64x2x2_t, int64_t, di, v2di, di, s64,
- int64x2_t)
-__LD2_LANE_FUNC (uint8x8x2_t, uint8x8_t, uint8x16x2_t, uint8_t, v8qi, v16qi, qi, u8,
- int8x16_t)
-__LD2_LANE_FUNC (uint16x4x2_t, uint16x4_t, uint16x8x2_t, uint16_t, v4hi, v8hi, hi,
- u16, int16x8_t)
-__LD2_LANE_FUNC (uint32x2x2_t, uint32x2_t, uint32x4x2_t, uint32_t, v2si, v4si, si,
- u32, int32x4_t)
-__LD2_LANE_FUNC (uint64x1x2_t, uint64x1_t, uint64x2x2_t, uint64_t, di, v2di, di,
- u64, int64x2_t)
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_u16 (const uint16_t * __ptr, uint16x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint16x8x2_t __temp;
+ __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (0));
+ __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (0));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4hi (
+ (__builtin_aarch64_simd_hi *) __ptr, __o, __c);
+ __b.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_u32 (const uint32_t * __ptr, uint32x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint32x4x2_t __temp;
+ __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (0));
+ __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (0));
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2si (
+ (__builtin_aarch64_simd_si *) __ptr, __o, __c);
+ __b.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline uint64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_u64 (const uint64_t * __ptr, uint64x1x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint64x2x2_t __temp;
+ __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (0));
+ __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (0));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanedi (
+ (__builtin_aarch64_simd_di *) __ptr, __o, __c);
+ __b.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_s8 (const int8_t * __ptr, int8x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int8x16x2_t __temp;
+ __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (0));
+ __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (0));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8qi (
+ (__builtin_aarch64_simd_qi *) __ptr, __o, __c);
+ __b.val[0] = (int8x8_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (int8x8_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_s16 (const int16_t * __ptr, int16x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int16x8x2_t __temp;
+ __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (0));
+ __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (0));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4hi (
+ (__builtin_aarch64_simd_hi *) __ptr, __o, __c);
+ __b.val[0] = (int16x4_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (int16x4_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_s32 (const int32_t * __ptr, int32x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int32x4x2_t __temp;
+ __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (0));
+ __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (0));
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2si (
+ (__builtin_aarch64_simd_si *) __ptr, __o, __c);
+ __b.val[0] = (int32x2_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (int32x2_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline int64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_s64 (const int64_t * __ptr, int64x1x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int64x2x2_t __temp;
+ __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (0));
+ __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (0));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanedi (
+ (__builtin_aarch64_simd_di *) __ptr, __o, __c);
+ __b.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_f16 (const float16_t * __ptr, float16x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ float16x8x2_t __temp;
+ __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (0));
+ __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (0));
+ __o = __builtin_aarch64_set_qregoiv8hf (__o, (float16x8_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hf (__o, (float16x8_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4hf (
+ (__builtin_aarch64_simd_hf *) __ptr, __o, __c);
+ __b.val[0] = (float16x4_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (float16x4_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_f32 (const float32_t * __ptr, float32x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ float32x4x2_t __temp;
+ __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (0));
+ __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (0));
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2sf (
+ (__builtin_aarch64_simd_sf *) __ptr, __o, __c);
+ __b.val[0] = (float32x2_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (float32x2_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline float64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_f64 (const float64_t * __ptr, float64x1x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ float64x2x2_t __temp;
+ __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (0));
+ __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (0));
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanedf (
+ (__builtin_aarch64_simd_df *) __ptr, __o, __c);
+ __b.val[0] = (float64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (float64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_p8 (const poly8_t * __ptr, poly8x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly8x16x2_t __temp;
+ __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (0));
+ __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (0));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8qi (
+ (__builtin_aarch64_simd_qi *) __ptr, __o, __c);
+ __b.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_p16 (const poly16_t * __ptr, poly16x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly16x8x2_t __temp;
+ __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (0));
+ __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (0));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4hi (
+ (__builtin_aarch64_simd_hi *) __ptr, __o, __c);
+ __b.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_p64 (const poly64_t * __ptr, poly64x1x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly64x2x2_t __temp;
+ __temp.val[0] = vcombine_p64 (__b.val[0], vcreate_p64 (0));
+ __temp.val[1] = vcombine_p64 (__b.val[1], vcreate_p64 (0));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanedi (
+ (__builtin_aarch64_simd_di *) __ptr, __o, __c);
+ __b.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
/* vld2q_lane */
-#define __LD2Q_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
-__extension__ extern __inline intype \
-__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
-vld2q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
-{ \
- __builtin_aarch64_simd_oi __o; \
- intype ret; \
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); \
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); \
- __o = __builtin_aarch64_ld2_lane##mode ( \
- (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
- ret.val[0] = (vtype) __builtin_aarch64_get_qregoiv4si (__o, 0); \
- ret.val[1] = (vtype) __builtin_aarch64_get_qregoiv4si (__o, 1); \
- return ret; \
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_u8 (const uint8_t * __ptr, uint8x16x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint8x16x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev16qi (
+ (__builtin_aarch64_simd_qi *) __ptr, __o, __c);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
}
-__LD2Q_LANE_FUNC (float16x8x2_t, float16x8_t, float16_t, v8hf, hf, f16)
-__LD2Q_LANE_FUNC (float32x4x2_t, float32x4_t, float32_t, v4sf, sf, f32)
-__LD2Q_LANE_FUNC (float64x2x2_t, float64x2_t, float64_t, v2df, df, f64)
-__LD2Q_LANE_FUNC (poly8x16x2_t, poly8x16_t, poly8_t, v16qi, qi, p8)
-__LD2Q_LANE_FUNC (poly16x8x2_t, poly16x8_t, poly16_t, v8hi, hi, p16)
-__LD2Q_LANE_FUNC (poly64x2x2_t, poly64x2_t, poly64_t, v2di, di, p64)
-__LD2Q_LANE_FUNC (int8x16x2_t, int8x16_t, int8_t, v16qi, qi, s8)
-__LD2Q_LANE_FUNC (int16x8x2_t, int16x8_t, int16_t, v8hi, hi, s16)
-__LD2Q_LANE_FUNC (int32x4x2_t, int32x4_t, int32_t, v4si, si, s32)
-__LD2Q_LANE_FUNC (int64x2x2_t, int64x2_t, int64_t, v2di, di, s64)
-__LD2Q_LANE_FUNC (uint8x16x2_t, uint8x16_t, uint8_t, v16qi, qi, u8)
-__LD2Q_LANE_FUNC (uint16x8x2_t, uint16x8_t, uint16_t, v8hi, hi, u16)
-__LD2Q_LANE_FUNC (uint32x4x2_t, uint32x4_t, uint32_t, v4si, si, u32)
-__LD2Q_LANE_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, v2di, di, u64)
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_u16 (const uint16_t * __ptr, uint16x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint16x8x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8hi (
+ (__builtin_aarch64_simd_hi *) __ptr, __o, __c);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_u32 (const uint32_t * __ptr, uint32x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint32x4x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4si (
+ (__builtin_aarch64_simd_si *) __ptr, __o, __c);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline uint64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_u64 (const uint64_t * __ptr, uint64x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint64x2x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2di (
+ (__builtin_aarch64_simd_di *) __ptr, __o, __c);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_s8 (const int8_t * __ptr, int8x16x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int8x16x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev16qi (
+ (__builtin_aarch64_simd_qi *) __ptr, __o, __c);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_s16 (const int16_t * __ptr, int16x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int16x8x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8hi (
+ (__builtin_aarch64_simd_hi *) __ptr, __o, __c);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_s32 (const int32_t * __ptr, int32x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int32x4x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4si (
+ (__builtin_aarch64_simd_si *) __ptr, __o, __c);
+ ret.val[0] = __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_s64 (const int64_t * __ptr, int64x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ int64x2x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2di (
+ (__builtin_aarch64_simd_di *) __ptr, __o, __c);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_f16 (const float16_t * __ptr, float16x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ float16x8x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8hf (
+ (__builtin_aarch64_simd_hf *) __ptr, __o, __c);
+ ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_f32 (const float32_t * __ptr, float32x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ float32x4x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4sf (
+ (__builtin_aarch64_simd_sf *) __ptr, __o, __c);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline float64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_f64 (const float64_t * __ptr, float64x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ float64x2x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2df (
+ (__builtin_aarch64_simd_df *) __ptr, __o, __c);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_p8 (const poly8_t * __ptr, poly8x16x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly8x16x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev16qi (
+ (__builtin_aarch64_simd_qi *) __ptr, __o, __c);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_p16 (const poly16_t * __ptr, poly16x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly16x8x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8hi (
+ (__builtin_aarch64_simd_hi *) __ptr, __o, __c);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline poly64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_p64 (const poly64_t * __ptr, poly64x2x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly64x2x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev2di (
+ (__builtin_aarch64_simd_di *) __ptr, __o, __c);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
/* vld3_lane */
@@ -34584,9 +34947,38 @@ vcopyq_laneq_bf16 (bfloat16x8_t __a, const int __lane1,
__a, __lane1);
}
-__LD2_LANE_FUNC (bfloat16x4x2_t, bfloat16x4_t, bfloat16x8x2_t, bfloat16_t, v4bf,
- v8bf, bf, bf16, bfloat16x8_t)
-__LD2Q_LANE_FUNC (bfloat16x8x2_t, bfloat16x8_t, bfloat16_t, v8bf, bf, bf16)
+__extension__ extern __inline bfloat16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2_lane_bf16 (const bfloat16_t * __ptr, bfloat16x4x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ bfloat16x8x2_t __temp;
+ __temp.val[0] = vcombine_bf16 (__b.val[0], vcreate_bf16 (0));
+ __temp.val[1] = vcombine_bf16 (__b.val[1], vcreate_bf16 (0));
+ __o = __builtin_aarch64_set_qregoiv8bf (__o, (bfloat16x8_t) __temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8bf (__o, (bfloat16x8_t) __temp.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev4bf (
+ (__builtin_aarch64_simd_bf *) __ptr, __o, __c);
+ __b.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ __b.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return __b;
+}
+
+__extension__ extern __inline bfloat16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__))
+vld2q_lane_bf16 (const bfloat16_t * __ptr, bfloat16x8x2_t __b, const int __c)
+{
+ __builtin_aarch64_simd_oi __o;
+ bfloat16x8x2_t ret;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1);
+ __o = __builtin_aarch64_ld2_lanev8bf (
+ (__builtin_aarch64_simd_bf *) __ptr, __o, __c);
+ ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
__LD3_LANE_FUNC (bfloat16x4x3_t, bfloat16x4_t, bfloat16x8x3_t, bfloat16_t, v4bf,
v8bf, bf, bf16, bfloat16x8_t)
__LD3Q_LANE_FUNC (bfloat16x8x3_t, bfloat16x8_t, bfloat16_t, v8bf, bf, bf16)
@@ -34888,8 +35280,6 @@ vaddq_p128 (poly128_t __a, poly128_t __b)
#undef __aarch64_vdupq_laneq_u32
#undef __aarch64_vdupq_laneq_u64
-#undef __LD2_LANE_FUNC
-#undef __LD2Q_LANE_FUNC
#undef __LD3_LANE_FUNC
#undef __LD3Q_LANE_FUNC
#undef __LD4_LANE_FUNC