aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejas Belagod <tejas.belagod@arm.com>2013-09-06 11:57:14 +0100
committerTejas Belagod <belagod@gcc.gnu.org>2013-09-06 11:57:14 +0100
commitbb1ae543739b50a8559f52ce3760af6bb2e090e2 (patch)
tree88dbb0dfd350f1d15db2c38eaf8bccfb3df745ff
parentf23c07427173f7d0f1d89d2b7dc247adb98ea523 (diff)
downloadgcc-bb1ae543739b50a8559f52ce3760af6bb2e090e2.zip
gcc-bb1ae543739b50a8559f52ce3760af6bb2e090e2.tar.gz
gcc-bb1ae543739b50a8559f52ce3760af6bb2e090e2.tar.bz2
arm_neon.h: Fix all vdup<bhsd_lane<q> intrinsics to have the correct lane parameter.
2013-09-06 Tejas Belagod <tejas.belagod@arm.com> gcc/ * config/aarch64/arm_neon.h: Fix all vdup<bhsd_lane<q> intrinsics to have the correct lane parameter. From-SVN: r202321
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/config/aarch64/arm_neon.h82
2 files changed, 46 insertions, 41 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index c603c82..f9fb662 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,8 @@
+2013-09-06 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/arm_neon.h: Fix all vdup<bhsd_lane<q> intrinsics to
+ have the correct lane parameter.
+
2013-09-06 Richard Biener <rguenther@suse.de>
* cfganal.c (control_dependences::~control_dependences):
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 29d1378..93ed6d3 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -19750,59 +19750,59 @@ vdupq_laneq_u64 (uint64x2_t __a, const int __b)
/* vdupb_lane */
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
-vdupb_lane_p8 (poly8x8_t __a, const int __attribute__ ((unused)) __b)
+vdupb_lane_p8 (poly8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_p8 (__a, 0);
+ return __aarch64_vget_lane_p8 (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vdupb_lane_s8 (int8x8_t __a, const int __attribute__ ((unused)) __b)
+vdupb_lane_s8 (int8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_s8 (__a, 0);
+ return __aarch64_vget_lane_s8 (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vdupb_lane_u8 (uint8x8_t __a, const int __attribute__ ((unused)) __b)
+vdupb_lane_u8 (uint8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_u8 (__a, 0);
+ return __aarch64_vget_lane_u8 (__a, __b);
}
/* vduph_lane */
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
-vduph_lane_p16 (poly16x4_t __a, const int __attribute__ ((unused)) __b)
+vduph_lane_p16 (poly16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_p16 (__a, 0);
+ return __aarch64_vget_lane_p16 (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vduph_lane_s16 (int16x4_t __a, const int __attribute__ ((unused)) __b)
+vduph_lane_s16 (int16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_s16 (__a, 0);
+ return __aarch64_vget_lane_s16 (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vduph_lane_u16 (uint16x4_t __a, const int __attribute__ ((unused)) __b)
+vduph_lane_u16 (uint16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_u16 (__a, 0);
+ return __aarch64_vget_lane_u16 (__a, __b);
}
/* vdups_lane */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vdups_lane_f32 (float32x2_t __a, const int __attribute__ ((unused)) __b)
+vdups_lane_f32 (float32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_f32 (__a, 0);
+ return __aarch64_vget_lane_f32 (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vdups_lane_s32 (int32x2_t __a, const int __attribute__ ((unused)) __b)
+vdups_lane_s32 (int32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_s32 (__a, 0);
+ return __aarch64_vget_lane_s32 (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vdups_lane_u32 (uint32x2_t __a, const int __attribute__ ((unused)) __b)
+vdups_lane_u32 (uint32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_u32 (__a, 0);
+ return __aarch64_vget_lane_u32 (__a, __b);
}
/* vdupd_lane */
@@ -19826,78 +19826,78 @@ vdupd_lane_u64 (uint64x1_t __a, const int __attribute__ ((unused)) __b)
/* vdupb_laneq */
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
-vdupb_laneq_p8 (poly8x16_t __a, const int __attribute__ ((unused)) __b)
+vdupb_laneq_p8 (poly8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p8 (__a, 0);
+ return __aarch64_vgetq_lane_p8 (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b)
{
- return __aarch64_vgetq_lane_s8 (__a, 0);
+ return __aarch64_vgetq_lane_s8 (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vdupb_laneq_u8 (uint8x16_t __a, const int __attribute__ ((unused)) __b)
+vdupb_laneq_u8 (uint8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u8 (__a, 0);
+ return __aarch64_vgetq_lane_u8 (__a, __b);
}
/* vduph_laneq */
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
-vduph_laneq_p16 (poly16x8_t __a, const int __attribute__ ((unused)) __b)
+vduph_laneq_p16 (poly16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p16 (__a, 0);
+ return __aarch64_vgetq_lane_p16 (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vduph_laneq_s16 (int16x8_t __a, const int __attribute__ ((unused)) __b)
+vduph_laneq_s16 (int16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s16 (__a, 0);
+ return __aarch64_vgetq_lane_s16 (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vduph_laneq_u16 (uint16x8_t __a, const int __attribute__ ((unused)) __b)
+vduph_laneq_u16 (uint16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u16 (__a, 0);
+ return __aarch64_vgetq_lane_u16 (__a, __b);
}
/* vdups_laneq */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vdups_laneq_f32 (float32x4_t __a, const int __attribute__ ((unused)) __b)
+vdups_laneq_f32 (float32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f32 (__a, 0);
+ return __aarch64_vgetq_lane_f32 (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vdups_laneq_s32 (int32x4_t __a, const int __attribute__ ((unused)) __b)
+vdups_laneq_s32 (int32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s32 (__a, 0);
+ return __aarch64_vgetq_lane_s32 (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vdups_laneq_u32 (uint32x4_t __a, const int __attribute__ ((unused)) __b)
+vdups_laneq_u32 (uint32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u32 (__a, 0);
+ return __aarch64_vgetq_lane_u32 (__a, __b);
}
/* vdupd_laneq */
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vdupd_laneq_f64 (float64x2_t __a, const int __attribute__ ((unused)) __b)
+vdupd_laneq_f64 (float64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f64 (__a, 0);
+ return __aarch64_vgetq_lane_f64 (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-vdupd_laneq_s64 (int64x2_t __a, const int __attribute__ ((unused)) __b)
+vdupd_laneq_s64 (int64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s64 (__a, 0);
+ return __aarch64_vgetq_lane_s64 (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-vdupd_laneq_u64 (uint64x2_t __a, const int __attribute__ ((unused)) __b)
+vdupd_laneq_u64 (uint64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u64 (__a, 0);
+ return __aarch64_vgetq_lane_u64 (__a, __b);
}
/* vld1 */