diff options
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 9 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-builtins.c | 1 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 1 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 10 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 6 | ||||
-rw-r--r-- | gcc/config/aarch64/iterators.md | 3 | ||||
-rw-r--r-- | gcc/testsuite/ChangeLog | 5 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/simd/vfma_f64.c | 8 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/simd/vfms_f64.c | 8 |
9 files changed, 33 insertions, 18 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index ed4299a..d661ec1 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,12 @@ +2014-11-17 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-builtins.c (TYPES_CREATE): Remove. + * config/aarch64/aarch64-simd-builtins.def (create): Remove. + * config/aarch64/aarch64-simd.md (aarch64_create<mode>): Remove. + * config/aarch64/arm_neon.h (vcreate_f64, vreinterpret_f64_s64, + vreinterpret_f64_u64): Replace __builtin_aarch64_createv1df with C casts. + * config/aarch64/iterators.md (VD1): Remove. + 2014-11-17 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/aarch64/aarch64-cores.def (cortex-a53): Remove diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index 87962f1..2637c71 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -138,7 +138,6 @@ static enum aarch64_type_qualifiers aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] = { qualifier_unsigned, qualifier_unsigned }; #define TYPES_UNOPU (aarch64_types_unopu_qualifiers) -#define TYPES_CREATE (aarch64_types_unop_qualifiers) static enum aarch64_type_qualifiers aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS] = { qualifier_none, qualifier_none, qualifier_maybe_immediate }; diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 10bf67e..545c7da 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -39,7 +39,6 @@ 1-9 - CODE_FOR_<name><mode><1-9> 10 - CODE_FOR_<name><mode>. */ - BUILTIN_VD1 (CREATE, create, 0) BUILTIN_VDC (COMBINE, combine, 0) BUILTIN_VB (BINOP, pmul, 0) BUILTIN_VDQF (UNOP, sqrt, 2) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 7f4d46e..148567b 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -2327,16 +2327,6 @@ ;; Patterns for AArch64 SIMD Intrinsics. -(define_expand "aarch64_create<mode>" - [(match_operand:VD1 0 "register_operand" "") - (match_operand:DI 1 "general_operand" "")] - "TARGET_SIMD" -{ - rtx src = gen_lowpart (<MODE>mode, operands[1]); - emit_move_insn (operands[0], src); - DONE; -}) - ;; Lane extraction with sign extension to general purpose register. (define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>" [(set (match_operand:GPI 0 "register_operand" "=r") diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index b3b80b8..6200f62 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -2638,7 +2638,7 @@ vcreate_u64 (uint64_t __a) __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vcreate_f64 (uint64_t __a) { - return __builtin_aarch64_createv1df (__a); + return (float64x1_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) @@ -3238,7 +3238,7 @@ vreinterpret_f64_s32 (int32x2_t __a) __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_s64 (int64x1_t __a) { - return __builtin_aarch64_createv1df ((uint64_t) vget_lane_s64 (__a, 0)); + return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) @@ -3262,7 +3262,7 @@ vreinterpret_f64_u32 (uint32x2_t __a) __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_u64 (uint64x1_t __a) { - return __builtin_aarch64_createv1df (vget_lane_u64 (__a, 0)); + return (float64x1_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index 9935167..7dd3917 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -147,9 +147,6 @@ ;; Double vector modes for combines. (define_mode_iterator VDIC [V8QI V4HI V2SI]) -;; Double vector modes inc V1DF -(define_mode_iterator VD1 [V8QI V4HI V2SI V2SF V1DF]) - ;; Vector modes except double int. (define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF]) diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 46ab25f..6c25013 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2014-11-17 Alan Lawrence <alan.lawrence@arm.com> + + * gcc.target/aarch64/simd/vfma_f64.c: Add asm volatile memory. + * gcc.target/aarch64/simd/vfms_f64.c: Likewise. + 2014-11-17 Ilya Enkovich <ilya.enkovich@intel.com> * gcc.target/i386/chkp-strlen-1.c: New. diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vfma_f64.c b/gcc/testsuite/gcc.target/aarch64/simd/vfma_f64.c index 272b79c..8083d2c 100644 --- a/gcc/testsuite/gcc.target/aarch64/simd/vfma_f64.c +++ b/gcc/testsuite/gcc.target/aarch64/simd/vfma_f64.c @@ -7,6 +7,10 @@ #define EPS 1.0e-15 +#define INHIB_OPT(x) asm volatile ("mov %d0, %1.d[0]" \ + : "=w"(x) \ + : "w"(x) \ + : /* No clobbers. */); extern void abort (void); @@ -24,6 +28,10 @@ main (void) arg2 = vcreate_f64 (0x3fa88480812d6670ULL); arg3 = vcreate_f64 (0x3fd5791ae2a92572ULL); + INHIB_OPT (arg1); + INHIB_OPT (arg2); + INHIB_OPT (arg3); + expected = 0.6280448184360076; actual = vget_lane_f64 (vfma_f64 (arg1, arg2, arg3), 0); diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vfms_f64.c b/gcc/testsuite/gcc.target/aarch64/simd/vfms_f64.c index f6e1f77..ede140d 100644 --- a/gcc/testsuite/gcc.target/aarch64/simd/vfms_f64.c +++ b/gcc/testsuite/gcc.target/aarch64/simd/vfms_f64.c @@ -7,6 +7,10 @@ #define EPS 1.0e-15 +#define INHIB_OPT(x) asm volatile ("mov %d0, %1.d[0]" \ + : "=w"(x) \ + : "w"(x) \ + : /* No clobbers. */); extern void abort (void); @@ -24,6 +28,10 @@ main (void) arg2 = vcreate_f64 (0x3fe6b78680fa29ceULL); arg3 = vcreate_f64 (0x3feea3cbf921fbe0ULL); + INHIB_OPT (arg1); + INHIB_OPT (arg2); + INHIB_OPT (arg3); + expected = 4.4964705746355915e-2; actual = vget_lane_f64 (vfms_f64 (arg1, arg2, arg3), 0); |