aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64
diff options
context:
space:
mode:
authorAkram Ahmad <Akram.Ahmad@arm.com>2025-01-17 17:43:49 +0000
committerTamar Christina <tamar.christina@arm.com>2025-01-18 11:14:59 +0000
commitaa361611490947eb228e5b625a3f0f23ff647dbd (patch)
tree81ffc2295b1b68ac0e2e5b374dbc93e48ecde1ab /gcc/config/aarch64
parent8f8ca83f2f6f165c4060ee1fc18ed3c74571ab7a (diff)
downloadgcc-aa361611490947eb228e5b625a3f0f23ff647dbd.zip
gcc-aa361611490947eb228e5b625a3f0f23ff647dbd.tar.gz
gcc-aa361611490947eb228e5b625a3f0f23ff647dbd.tar.bz2
AArch64: Use standard names for saturating arithmetic
This renames the existing {s,u}q{add,sub} instructions to use the standard names {s,u}s{add,sub}3 which are used by IFN_SAT_ADD and IFN_SAT_SUB. The NEON intrinsics for saturating arithmetic and their corresponding builtins are changed to use these standard names too. Using the standard names for the instructions causes 32 and 64-bit unsigned scalar saturating arithmetic to use the NEON instructions, resulting in an additional (and inefficient) FMOV to be generated when the original operands are in GP registers. This patch therefore also restores the original behaviour of using the adds/subs instructions in this circumstance. Additional tests are written for the scalar and Adv. SIMD cases to ensure that the correct instructions are used. The NEON intrinsics are already tested elsewhere. gcc/ChangeLog: * config/aarch64/aarch64-builtins.cc: Expand iterators. * config/aarch64/aarch64-simd-builtins.def: Use standard names * config/aarch64/aarch64-simd.md: Use standard names, split insn definitions on signedness of operator and type of operands. * config/aarch64/arm_neon.h: Use standard builtin names. * config/aarch64/iterators.md: Add VSDQ_I_QI_HI iterator to simplify splitting of insn for unsigned scalar arithmetic. gcc/testsuite/ChangeLog: * gcc.target/aarch64/scalar_intrinsics.c: Update testcases. * gcc.target/aarch64/advsimd-intrinsics/saturating_arithmetic_autovect.inc: Template file for unsigned vector saturating arithmetic tests. * gcc.target/aarch64/advsimd-intrinsics/saturating_arithmetic_autovect_1.c: 8-bit vector type tests. * gcc.target/aarch64/advsimd-intrinsics/saturating_arithmetic_autovect_2.c: 16-bit vector type tests. * gcc.target/aarch64/advsimd-intrinsics/saturating_arithmetic_autovect_3.c: 32-bit vector type tests. * gcc.target/aarch64/advsimd-intrinsics/saturating_arithmetic_autovect_4.c: 64-bit vector type tests. * gcc.target/aarch64/saturating_arithmetic.inc: Template file for scalar saturating arithmetic tests. * gcc.target/aarch64/saturating_arithmetic_1.c: 8-bit tests. * gcc.target/aarch64/saturating_arithmetic_2.c: 16-bit tests. * gcc.target/aarch64/saturating_arithmetic_3.c: 32-bit tests. * gcc.target/aarch64/saturating_arithmetic_4.c: 64-bit tests. Co-authored-by: Tamar Christina <tamar.christina@arm.com>
Diffstat (limited to 'gcc/config/aarch64')
-rw-r--r--gcc/config/aarch64/aarch64-builtins.cc12
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def8
-rw-r--r--gcc/config/aarch64/aarch64-simd.md207
-rw-r--r--gcc/config/aarch64/arm_neon.h96
-rw-r--r--gcc/config/aarch64/iterators.md4
5 files changed, 271 insertions, 56 deletions
diff --git a/gcc/config/aarch64/aarch64-builtins.cc b/gcc/config/aarch64/aarch64-builtins.cc
index 86eebc16..6d5479c 100644
--- a/gcc/config/aarch64/aarch64-builtins.cc
+++ b/gcc/config/aarch64/aarch64-builtins.cc
@@ -5039,6 +5039,18 @@ aarch64_general_gimple_fold_builtin (unsigned int fcode, gcall *stmt,
new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
LSHIFT_EXPR, args[0], args[1]);
break;
+ /* lower saturating add/sub neon builtins to gimple. */
+ BUILTIN_VSDQ_I (BINOP, ssadd, 3, DEFAULT)
+ BUILTIN_VSDQ_I (BINOPU, usadd, 3, DEFAULT)
+ new_stmt = gimple_build_call_internal (IFN_SAT_ADD, 2, args[0], args[1]);
+ gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+ break;
+ BUILTIN_VSDQ_I (BINOP, sssub, 3, DEFAULT)
+ BUILTIN_VSDQ_I (BINOPU, ussub, 3, DEFAULT)
+ new_stmt = gimple_build_call_internal (IFN_SAT_SUB, 2, args[0], args[1]);
+ gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+ break;
+
BUILTIN_VSDQ_I_DI (BINOP, sshl, 0, DEFAULT)
BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0, DEFAULT)
{
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 286272a..6cc45b1 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -71,10 +71,10 @@
BUILTIN_VSDQ_I (BINOP, sqrshl, 0, DEFAULT)
BUILTIN_VSDQ_I (BINOP_UUS, uqrshl, 0, DEFAULT)
/* Implemented by aarch64_<su_optab><optab><mode>. */
- BUILTIN_VSDQ_I (BINOP, sqadd, 0, DEFAULT)
- BUILTIN_VSDQ_I (BINOPU, uqadd, 0, DEFAULT)
- BUILTIN_VSDQ_I (BINOP, sqsub, 0, DEFAULT)
- BUILTIN_VSDQ_I (BINOPU, uqsub, 0, DEFAULT)
+ BUILTIN_VSDQ_I (BINOP, ssadd, 3, DEFAULT)
+ BUILTIN_VSDQ_I (BINOPU, usadd, 3, DEFAULT)
+ BUILTIN_VSDQ_I (BINOP, sssub, 3, DEFAULT)
+ BUILTIN_VSDQ_I (BINOPU, ussub, 3, DEFAULT)
/* Implemented by aarch64_<sur>qadd<mode>. */
BUILTIN_VSDQ_I (BINOP_SSU, suqadd, 0, DEFAULT)
BUILTIN_VSDQ_I (BINOP_UUS, usqadd, 0, DEFAULT)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index eeb626f..e2afe87 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -5162,15 +5162,214 @@
)
;; <su>q<addsub>
-(define_insn "aarch64_<su_optab>q<addsub><mode><vczle><vczbe>"
- [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
- (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
- (match_operand:VSDQ_I 2 "register_operand" "w")))]
+(define_insn "<su_optab>s<addsub><mode>3<vczle><vczbe>"
+ [(set (match_operand:VSDQ_I_QI_HI 0 "register_operand" "=w")
+ (BINQOPS:VSDQ_I_QI_HI
+ (match_operand:VSDQ_I_QI_HI 1 "register_operand" "w")
+ (match_operand:VSDQ_I_QI_HI 2 "register_operand" "w")))]
"TARGET_SIMD"
"<su_optab>q<addsub>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
[(set_attr "type" "neon_q<addsub><q>")]
)
+(define_expand "<su_optab>s<addsub><mode>3"
+ [(parallel
+ [(set (match_operand:GPI 0 "register_operand")
+ (SBINQOPS:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_operand")))
+ (clobber (scratch:GPI))
+ (clobber (reg:CC CC_REGNUM))])]
+)
+
+;; Introducing a temporary GP reg allows signed saturating arithmetic with GPR
+;; operands to be calculated without the use of costly transfers to and from FP
+;; registers. For example, saturating addition usually uses three FMOVs:
+;;
+;; fmov d0, x0
+;; fmov d1, x1
+;; sqadd d0, d0, d1
+;; fmov x0, d0
+;;
+;; Using a temporary register results in three cheaper instructions being used
+;; in place of the three FMOVs, which calculate the saturating limit accounting
+;; for the signedness of operand2:
+;;
+;; asr x2, x1, 63
+;; adds x0, x0, x1
+;; eor x2, x2, 0x8000000000000000
+;; csinv x0, x0, x2, vc
+;;
+;; If operand2 is a constant value, the temporary register can be used to store
+;; the saturating limit without the need for asr, xor to calculate said limit.
+
+(define_insn_and_split "aarch64_<su_optab>s<addsub><mode>3<vczle><vczbe>"
+ [(set (match_operand:GPI 0 "register_operand")
+ (SBINQOPS:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_operand")))
+ (clobber (match_scratch:GPI 3))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ {@ [ cons: =0, 1 , 2 , =3 ; attrs: type , arch , length ]
+ [ w , w , w , X ; neon_q<addsub><q> , simd , 4 ] <su_optab>q<addsub>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
+ [ r , r , JIr , &r ; * , * , 8 ] #
+ }
+ "&& reload_completed && GP_REGNUM_P (REGNO (operands[0]))"
+ [(set (match_dup 0)
+ (if_then_else:GPI
+ (match_dup 4)
+ (match_dup 5)
+ (match_dup 6)))]
+ {
+ if (REG_P (operands[2]))
+ {
+ rtx shift_constant = gen_int_mode (GET_MODE_BITSIZE (<MODE>mode) - 1,
+ <MODE>mode);
+ auto limit = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (<MODE>mode) - 1);
+ rtx limit_constant = gen_int_mode (limit, <MODE>mode);
+ emit_insn (gen_ashr<mode>3 (operands[3], operands[2], shift_constant));
+ emit_insn (gen_xor<mode>3 (operands[3], operands[3], limit_constant));
+
+ switch (<CODE>)
+ {
+ case SS_MINUS:
+ emit_insn (gen_sub<mode>3_compare1 (operands[0], operands[1],
+ operands[2]));
+ break;
+ case SS_PLUS:
+ emit_insn (gen_add<mode>3_compare0 (operands[0], operands[1],
+ operands[2]));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ rtx ccin = gen_rtx_REG (E_CC_Vmode, CC_REGNUM);
+ switch (<CODE>)
+ {
+ case SS_PLUS:
+ operands[4] = gen_rtx_NE (<MODE>mode, ccin, const0_rtx);
+ operands[5] = gen_rtx_NOT (<MODE>mode, operands[3]);
+ operands[6] = operands[0];
+ break;
+ case SS_MINUS:
+ operands[4] = gen_rtx_EQ (<MODE>mode, ccin, const0_rtx);
+ operands[5] = operands[0];
+ operands[6] = operands[3];
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ auto imm = INTVAL (operands[2]);
+ rtx neg_imm = gen_int_mode (-imm, <MODE>mode);
+ wide_int limit;
+
+ switch (<CODE>)
+ {
+ case SS_MINUS:
+ emit_insn (gen_sub<mode>3_compare1_imm (operands[0], operands[1],
+ operands[2], neg_imm));
+ limit = imm >= 0 ? wi::min_value (<MODE>mode, SIGNED)
+ : wi::max_value (<MODE>mode, SIGNED);
+ break;
+ case SS_PLUS:
+ emit_insn (gen_sub<mode>3_compare1_imm (operands[0], operands[1],
+ neg_imm, operands[2]));
+ limit = imm >= 0 ? wi::max_value (<MODE>mode, SIGNED)
+ : wi::min_value (<MODE>mode, SIGNED);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ rtx sat_limit = immed_wide_int_const (limit, <MODE>mode);
+ emit_insn (gen_rtx_SET (operands[3], sat_limit));
+
+ rtx ccin = gen_rtx_REG (E_CC_Vmode, CC_REGNUM);
+ operands[4] = gen_rtx_EQ (<MODE>mode, ccin, const0_rtx);
+ operands[5] = operands[0];
+ operands[6] = operands[3];
+ }
+ }
+)
+
+;; Unsigned saturating arithmetic with GPR operands can be optimised similarly
+;; to the signed case, albeit without the need for a temporary register as the
+;; saturating limit can be inferred from the <addsub> code. This applies only
+;; to SImode and DImode.
+
+(define_insn_and_split "<su_optab>s<addsub><mode>3<vczle><vczbe>"
+ [(set (match_operand:GPI 0 "register_operand")
+ (UBINQOPS:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:GPI 2 "aarch64_plus_operand")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ {@ [ cons: =0, 1 , 2 ; attrs: type , arch , length ]
+ [ w , w , w ; neon_q<addsub><q> , simd , 4 ] <su_optab>q<addsub>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
+ [ r , r , JIr ; * , * , 8 ] #
+ }
+ "&& reload_completed && GP_REGNUM_P (REGNO (operands[0]))"
+ [(set (match_dup 0)
+ (if_then_else:GPI
+ (match_dup 3)
+ (match_dup 0)
+ (match_dup 4)))]
+ {
+
+ if (REG_P (operands[2]))
+ {
+ switch (<CODE>)
+ {
+ case US_MINUS:
+ emit_insn (gen_sub<mode>3_compare1 (operands[0], operands[1],
+ operands[2]));
+ break;
+ case US_PLUS:
+ emit_insn (gen_add<mode>3_compare0 (operands[0], operands[1],
+ operands[2]));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ auto imm = UINTVAL (operands[2]);
+ rtx neg_imm = gen_int_mode (-imm, <MODE>mode);
+ switch (<CODE>)
+ {
+ case US_MINUS:
+ emit_insn (gen_sub<mode>3_compare1_imm (operands[0], operands[1],
+ operands[2], neg_imm));
+ break;
+ case US_PLUS:
+ emit_insn (gen_sub<mode>3_compare1_imm (operands[0], operands[1],
+ neg_imm, operands[2]));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ rtx ccin = gen_rtx_REG (CCmode, CC_REGNUM);
+ switch (<CODE>)
+ {
+ case US_PLUS:
+ operands[3] = gen_rtx_LTU (<MODE>mode, ccin, const0_rtx);
+ operands[4] = gen_int_mode (-1, <MODE>mode);
+ break;
+ case US_MINUS:
+ operands[3] = gen_rtx_GEU (<MODE>mode, ccin, const0_rtx);
+ operands[4] = const0_rtx;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+)
+
;; suqadd and usqadd
(define_insn "aarch64_<sur>qadd<mode><vczle><vczbe>"
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 33594cb..4899ace 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -1864,35 +1864,35 @@ __extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
+ return (int8x8_t) __builtin_aarch64_ssaddv8qi (__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
+ return (int16x4_t) __builtin_aarch64_ssaddv4hi (__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
+ return (int32x2_t) __builtin_aarch64_ssaddv2si (__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t) {__builtin_aarch64_sqadddi (__a[0], __b[0])};
+ return (int64x1_t) {__builtin_aarch64_ssadddi (__a[0], __b[0])};
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return __builtin_aarch64_uqaddv8qi_uuu (__a, __b);
+ return __builtin_aarch64_usaddv8qi_uuu (__a, __b);
}
__extension__ extern __inline int8x8_t
@@ -2151,189 +2151,189 @@ __extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return __builtin_aarch64_uqaddv4hi_uuu (__a, __b);
+ return __builtin_aarch64_usaddv4hi_uuu (__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return __builtin_aarch64_uqaddv2si_uuu (__a, __b);
+ return __builtin_aarch64_usaddv2si_uuu (__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {__builtin_aarch64_uqadddi_uuu (__a[0], __b[0])};
+ return (uint64x1_t) {__builtin_aarch64_usadddi_uuu (__a[0], __b[0])};
}
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
+ return (int8x16_t) __builtin_aarch64_ssaddv16qi (__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
+ return (int16x8_t) __builtin_aarch64_ssaddv8hi (__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
+ return (int32x4_t) __builtin_aarch64_ssaddv4si (__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
+ return (int64x2_t) __builtin_aarch64_ssaddv2di (__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return __builtin_aarch64_uqaddv16qi_uuu (__a, __b);
+ return __builtin_aarch64_usaddv16qi_uuu (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return __builtin_aarch64_uqaddv8hi_uuu (__a, __b);
+ return __builtin_aarch64_usaddv8hi_uuu (__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return __builtin_aarch64_uqaddv4si_uuu (__a, __b);
+ return __builtin_aarch64_usaddv4si_uuu (__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return __builtin_aarch64_uqaddv2di_uuu (__a, __b);
+ return __builtin_aarch64_usaddv2di_uuu (__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
+ return (int8x8_t) __builtin_aarch64_sssubv8qi (__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
+ return (int16x4_t) __builtin_aarch64_sssubv4hi (__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
+ return (int32x2_t) __builtin_aarch64_sssubv2si (__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t) {__builtin_aarch64_sqsubdi (__a[0], __b[0])};
+ return (int64x1_t) {__builtin_aarch64_sssubdi (__a[0], __b[0])};
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return __builtin_aarch64_uqsubv8qi_uuu (__a, __b);
+ return __builtin_aarch64_ussubv8qi_uuu (__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return __builtin_aarch64_uqsubv4hi_uuu (__a, __b);
+ return __builtin_aarch64_ussubv4hi_uuu (__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return __builtin_aarch64_uqsubv2si_uuu (__a, __b);
+ return __builtin_aarch64_ussubv2si_uuu (__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {__builtin_aarch64_uqsubdi_uuu (__a[0], __b[0])};
+ return (uint64x1_t) {__builtin_aarch64_ussubdi_uuu (__a[0], __b[0])};
}
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
+ return (int8x16_t) __builtin_aarch64_sssubv16qi (__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
+ return (int16x8_t) __builtin_aarch64_sssubv8hi (__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
+ return (int32x4_t) __builtin_aarch64_sssubv4si (__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
+ return (int64x2_t) __builtin_aarch64_sssubv2di (__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return __builtin_aarch64_uqsubv16qi_uuu (__a, __b);
+ return __builtin_aarch64_ussubv16qi_uuu (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return __builtin_aarch64_uqsubv8hi_uuu (__a, __b);
+ return __builtin_aarch64_ussubv8hi_uuu (__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return __builtin_aarch64_uqsubv4si_uuu (__a, __b);
+ return __builtin_aarch64_ussubv4si_uuu (__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return __builtin_aarch64_uqsubv2di_uuu (__a, __b);
+ return __builtin_aarch64_ussubv2di_uuu (__a, __b);
}
__extension__ extern __inline int8x8_t
@@ -17543,56 +17543,56 @@ __extension__ extern __inline int8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddb_s8 (int8_t __a, int8_t __b)
{
- return (int8_t) __builtin_aarch64_sqaddqi (__a, __b);
+ return (int8_t) __builtin_aarch64_ssaddqi (__a, __b);
}
__extension__ extern __inline int16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddh_s16 (int16_t __a, int16_t __b)
{
- return (int16_t) __builtin_aarch64_sqaddhi (__a, __b);
+ return (int16_t) __builtin_aarch64_ssaddhi (__a, __b);
}
__extension__ extern __inline int32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadds_s32 (int32_t __a, int32_t __b)
{
- return (int32_t) __builtin_aarch64_sqaddsi (__a, __b);
+ return (int32_t) __builtin_aarch64_ssaddsi (__a, __b);
}
__extension__ extern __inline int64_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddd_s64 (int64_t __a, int64_t __b)
{
- return __builtin_aarch64_sqadddi (__a, __b);
+ return __builtin_aarch64_ssadddi (__a, __b);
}
__extension__ extern __inline uint8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddb_u8 (uint8_t __a, uint8_t __b)
{
- return (uint8_t) __builtin_aarch64_uqaddqi_uuu (__a, __b);
+ return (uint8_t) __builtin_aarch64_usaddqi_uuu (__a, __b);
}
__extension__ extern __inline uint16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddh_u16 (uint16_t __a, uint16_t __b)
{
- return (uint16_t) __builtin_aarch64_uqaddhi_uuu (__a, __b);
+ return (uint16_t) __builtin_aarch64_usaddhi_uuu (__a, __b);
}
__extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadds_u32 (uint32_t __a, uint32_t __b)
{
- return (uint32_t) __builtin_aarch64_uqaddsi_uuu (__a, __b);
+ return (uint32_t) __builtin_aarch64_usaddsi_uuu (__a, __b);
}
__extension__ extern __inline uint64_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddd_u64 (uint64_t __a, uint64_t __b)
{
- return __builtin_aarch64_uqadddi_uuu (__a, __b);
+ return __builtin_aarch64_usadddi_uuu (__a, __b);
}
/* vqdmlal */
@@ -19242,56 +19242,56 @@ __extension__ extern __inline int8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubb_s8 (int8_t __a, int8_t __b)
{
- return (int8_t) __builtin_aarch64_sqsubqi (__a, __b);
+ return (int8_t) __builtin_aarch64_sssubqi (__a, __b);
}
__extension__ extern __inline int16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubh_s16 (int16_t __a, int16_t __b)
{
- return (int16_t) __builtin_aarch64_sqsubhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sssubhi (__a, __b);
}
__extension__ extern __inline int32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubs_s32 (int32_t __a, int32_t __b)
{
- return (int32_t) __builtin_aarch64_sqsubsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sssubsi (__a, __b);
}
__extension__ extern __inline int64_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubd_s64 (int64_t __a, int64_t __b)
{
- return __builtin_aarch64_sqsubdi (__a, __b);
+ return __builtin_aarch64_sssubdi (__a, __b);
}
__extension__ extern __inline uint8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubb_u8 (uint8_t __a, uint8_t __b)
{
- return (uint8_t) __builtin_aarch64_uqsubqi_uuu (__a, __b);
+ return (uint8_t) __builtin_aarch64_ussubqi_uuu (__a, __b);
}
__extension__ extern __inline uint16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubh_u16 (uint16_t __a, uint16_t __b)
{
- return (uint16_t) __builtin_aarch64_uqsubhi_uuu (__a, __b);
+ return (uint16_t) __builtin_aarch64_ussubhi_uuu (__a, __b);
}
__extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubs_u32 (uint32_t __a, uint32_t __b)
{
- return (uint32_t) __builtin_aarch64_uqsubsi_uuu (__a, __b);
+ return (uint32_t) __builtin_aarch64_ussubsi_uuu (__a, __b);
}
__extension__ extern __inline uint64_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubd_u64 (uint64_t __a, uint64_t __b)
{
- return __builtin_aarch64_uqsubdi_uuu (__a, __b);
+ return __builtin_aarch64_ussubdi_uuu (__a, __b);
}
/* vqtbl2 */
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index ff0f34d..2f7aa48 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -95,6 +95,10 @@
;; integer modes; 64-bit scalar integer mode.
(define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
+;; Advanced SIMD and scalar, 64 & 128-bit container; 8 and 16-bit scalar
+;; integer modes.
+(define_mode_iterator VSDQ_I_QI_HI [VDQ_I HI QI])
+
;; Double vector modes.
(define_mode_iterator VD [V8QI V4HI V4HF V2SI V2SF V4BF])