aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/arm_neon.h
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/aarch64/arm_neon.h')
-rw-r--r--gcc/config/aarch64/arm_neon.h32
1 files changed, 32 insertions, 0 deletions
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index ab3a00c..fcdc977 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -17201,6 +17201,14 @@ vmax_f32 (float32x2_t __a, float32x2_t __b)
return __builtin_aarch64_smax_nanv2sf (__a, __b);
}
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vmax_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_smax_nandf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmax_s8 (int8x8_t __a, int8x8_t __b)
{
@@ -17692,6 +17700,14 @@ vmaxnm_f32 (float32x2_t __a, float32x2_t __b)
return __builtin_aarch64_fmaxv2sf (__a, __b);
}
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vmaxnm_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_fmaxdf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
{
@@ -17824,6 +17840,14 @@ vmin_f32 (float32x2_t __a, float32x2_t __b)
return __builtin_aarch64_smin_nanv2sf (__a, __b);
}
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vmin_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_smin_nandf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmin_s8 (int8x8_t __a, int8x8_t __b)
{
@@ -17922,6 +17946,14 @@ vminnm_f32 (float32x2_t __a, float32x2_t __b)
return __builtin_aarch64_fminv2sf (__a, __b);
}
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vminnm_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_fmind (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vminnmq_f32 (float32x4_t __a, float32x4_t __b)
{