aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/aarch64-sve.md
diff options
context:
space:
mode:
authorAndrea Corallo <andrea.corallo@arm.com>2023-09-19 15:12:08 +0200
committerAndrea Corallo <andrea.corallo@arm.com>2023-10-03 10:39:05 +0200
commit3ca09d684e496240a87c0327687e2898060c2363 (patch)
treeb006455c38c08a7338bdb8b2df0b9e691a72b235 /gcc/config/aarch64/aarch64-sve.md
parent9d31045b21324166c3997d603961d99e3c4c357d (diff)
downloadgcc-3ca09d684e496240a87c0327687e2898060c2363.zip
gcc-3ca09d684e496240a87c0327687e2898060c2363.tar.gz
gcc-3ca09d684e496240a87c0327687e2898060c2363.tar.bz2
aarch64: Convert aarch64 multi choice patterns to new syntax
Hi all, this patch converts a number of multi multi choice patterns within the aarch64 backend to the new syntax. The list of the converted patterns is in the Changelog. For completeness here follows the list of multi choice patterns that were rejected for conversion by my parser, they typically have some C as asm output and require some manual intervention: aarch64_simd_vec_set<mode>, aarch64_get_lane<mode>, aarch64_cm<optab>di, aarch64_cm<optab>di, aarch64_cmtstdi, *aarch64_movv8di, *aarch64_be_mov<mode>, *aarch64_be_movci, *aarch64_be_mov<mode>, *aarch64_be_movxi, *aarch64_sve_mov<mode>_le, *aarch64_sve_mov<mode>_be, @aarch64_pred_mov<mode>, @aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx4SI_ONLY:mode>, @aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>, *aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_sxtw, *aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_uxtw, @aarch64_vec_duplicate_vq<mode>_le, *vec_extract<mode><Vel>_0, *vec_extract<mode><Vel>_v128, *cmp<cmp_op><mode>_and, *fcm<cmp_op><mode>_and_combine, @aarch64_sve_ext<mode>, @aarch64_sve2_<su>aba<mode>, *sibcall_insn, *sibcall_value_insn, *xor_one_cmpl<mode>3, *insv_reg<mode>_<SUBDI_BITS>, *aarch64_bfi<GPI:mode><ALLX:mode>_<SUBDI_BITS>, *aarch64_bfidi<ALLX:mode>_subreg_<SUBDI_BITS>, *aarch64_bfxil<mode>, *aarch64_bfxilsi_uxtw, *aarch64_<su_optab>cvtf<fcvt_target><GPF:mode>2_mult, atomic_store<mode>. Bootstraped and reg tested on aarch64-unknown-linux-gnu, also I analysed tmp-mddump.md (from 'make mddump') and could not find effective differences, okay for trunk? Bests Andrea gcc/ChangeLog: * config/aarch64/aarch64.md (@ccmp<CC_ONLY:mode><GPI:mode>) (@ccmp<CC_ONLY:mode><GPI:mode>_rev, *call_insn, *call_value_insn) (*mov<mode>_aarch64, load_pair_sw_<SX:mode><SX2:mode>) (load_pair_dw_<DX:mode><DX2:mode>) (store_pair_sw_<SX:mode><SX2:mode>) (store_pair_dw_<DX:mode><DX2:mode>, *extendsidi2_aarch64) (*zero_extendsidi2_aarch64, *load_pair_zero_extendsidi2_aarch64) (*extend<SHORT:mode><GPI:mode>2_aarch64) (*zero_extend<SHORT:mode><GPI:mode>2_aarch64) (*extendqihi2_aarch64, *zero_extendqihi2_aarch64) (*add<mode>3_aarch64, *addsi3_aarch64_uxtw, *add<mode>3_poly_1) (add<mode>3_compare0, *addsi3_compare0_uxtw) (*add<mode>3_compareC_cconly, add<mode>3_compareC) (*add<mode>3_compareV_cconly_imm, add<mode>3_compareV_imm) (*add<mode>3nr_compare0, subdi3, subv<GPI:mode>_imm) (*cmpv<GPI:mode>_insn, sub<mode>3_compare1_imm, neg<mode>2) (cmp<mode>, fcmp<mode>, fcmpe<mode>, *cmov<mode>_insn) (*cmovsi_insn_uxtw, <optab><mode>3, *<optab>si3_uxtw) (*and<mode>3_compare0, *andsi3_compare0_uxtw, one_cmpl<mode>2) (*<NLOGICAL:optab>_one_cmpl<mode>3, *and<mode>3nr_compare0) (*aarch64_ashl_sisd_or_int_<mode>3) (*aarch64_lshr_sisd_or_int_<mode>3) (*aarch64_ashr_sisd_or_int_<mode>3, *ror<mode>3_insn) (*<optab>si3_insn_uxtw, <optab>_trunc<fcvt_target><GPI:mode>2) (<optab><fcvt_target><GPF:mode>2) (<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3) (<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3) (*aarch64_<optab><mode>3_cssc, copysign<GPF:mode>3_insn): Update to new syntax. * config/aarch64/aarch64-sve2.md (@aarch64_scatter_stnt<mode>) (@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>) (*aarch64_mul_unpredicated_<mode>) (@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>_2) (*cond_<sve_int_op><mode>_3, *cond_<sve_int_op><mode>_any) (*cond_<sve_int_op><mode>_z, @aarch64_pred_<sve_int_op><mode>) (*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_3) (*cond_<sve_int_op><mode>_any, @aarch64_sve_<sve_int_op><mode>) (@aarch64_sve_<sve_int_op>_lane_<mode>) (@aarch64_sve_add_mul_lane_<mode>) (@aarch64_sve_sub_mul_lane_<mode>, @aarch64_sve2_xar<mode>) (*aarch64_sve2_bcax<mode>, @aarch64_sve2_eor3<mode>) (*aarch64_sve2_nor<mode>, *aarch64_sve2_nand<mode>) (*aarch64_sve2_bsl<mode>, *aarch64_sve2_nbsl<mode>) (*aarch64_sve2_bsl1n<mode>, *aarch64_sve2_bsl2n<mode>) (*aarch64_sve2_sra<mode>, @aarch64_sve_add_<sve_int_op><mode>) (*aarch64_sve2_<su>aba<mode>, @aarch64_sve_add_<sve_int_op><mode>) (@aarch64_sve_add_<sve_int_op>_lane_<mode>) (@aarch64_sve_qadd_<sve_int_op><mode>) (@aarch64_sve_qadd_<sve_int_op>_lane_<mode>) (@aarch64_sve_sub_<sve_int_op><mode>) (@aarch64_sve_sub_<sve_int_op>_lane_<mode>) (@aarch64_sve_qsub_<sve_int_op><mode>) (@aarch64_sve_qsub_<sve_int_op>_lane_<mode>) (@aarch64_sve_<sve_fp_op><mode>, @aarch64_<sve_fp_op>_lane_<mode>) (@aarch64_pred_<sve_int_op><mode>) (@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_int_op><mode>_2) (*cond_<sve_int_op><mode>_z, @aarch64_sve_<optab><mode>) (@aarch64_<optab>_lane_<mode>, @aarch64_sve_<optab><mode>) (@aarch64_<optab>_lane_<mode>, @aarch64_pred_<sve_fp_op><mode>) (*cond_<sve_fp_op><mode>_any_relaxed) (*cond_<sve_fp_op><mode>_any_strict) (@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>) (@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_fp_op><mode>) (*cond_<sve_fp_op><mode>_strict): Update to new syntax. * config/aarch64/aarch64-sve.md (*aarch64_sve_mov<mode>_ldr_str) (*aarch64_sve_mov<mode>_no_ldr_str, @aarch64_pred_mov<mode>) (*aarch64_sve_mov<mode>, aarch64_wrffr) (mask_scatter_store<mode><v_int_container>) (*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked) (*mask_scatter_store<mode><v_int_container>_sxtw) (*mask_scatter_store<mode><v_int_container>_uxtw) (@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>) (@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>) (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw) (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw) (*vec_duplicate<mode>_reg, vec_shl_insert_<mode>) (vec_series<mode>, @extract_<last_op>_<mode>) (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2) (*cond_<optab><mode>_any, @aarch64_pred_<optab><mode>) (@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>) (@cond_<optab><mode>) (*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2) (@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>) (@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>) (*cond_uxt<mode>_2, *cond_uxt<mode>_any, *cnot<mode>) (*cond_cnot<mode>_2, *cond_cnot<mode>_any) (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed) (*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed) (*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>) (*cond_<optab><mode>_2, *cond_<optab><mode>_3) (*cond_<optab><mode>_any, add<mode>3, sub<mode>3) (@aarch64_pred_<su>abd<mode>, *aarch64_cond_<su>abd<mode>_2) (*aarch64_cond_<su>abd<mode>_3, *aarch64_cond_<su>abd<mode>_any) (@aarch64_sve_<optab><mode>, @aarch64_pred_<optab><mode>) (*cond_<optab><mode>_2, *cond_<optab><mode>_z) (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2) (*cond_<optab><mode>_3, *cond_<optab><mode>_any, <optab><mode>3) (*cond_bic<mode>_2, *cond_bic<mode>_any) (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_const) (*cond_<optab><mode>_any_const, *cond_<sve_int_op><mode>_m) (*cond_<sve_int_op><mode>_z, *sdiv_pow2<mode>3) (*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_any) (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed) (*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed) (*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>) (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict) (*cond_<optab><mode>_2_const_relaxed) (*cond_<optab><mode>_2_const_strict) (*cond_<optab><mode>_3_relaxed, *cond_<optab><mode>_3_strict) (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict) (*cond_<optab><mode>_any_const_relaxed) (*cond_<optab><mode>_any_const_strict) (@aarch64_pred_<optab><mode>, *cond_add<mode>_2_const_relaxed) (*cond_add<mode>_2_const_strict) (*cond_add<mode>_any_const_relaxed) (*cond_add<mode>_any_const_strict, @aarch64_pred_<optab><mode>) (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict) (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict) (@aarch64_pred_<optab><mode>, *cond_sub<mode>_3_const_relaxed) (*cond_sub<mode>_3_const_strict, *cond_sub<mode>_const_relaxed) (*cond_sub<mode>_const_strict, *aarch64_pred_abd<mode>_relaxed) (*aarch64_pred_abd<mode>_strict) (*aarch64_cond_abd<mode>_2_relaxed) (*aarch64_cond_abd<mode>_2_strict) (*aarch64_cond_abd<mode>_3_relaxed) (*aarch64_cond_abd<mode>_3_strict) (*aarch64_cond_abd<mode>_any_relaxed) (*aarch64_cond_abd<mode>_any_strict, @aarch64_pred_<optab><mode>) (@aarch64_pred_fma<mode>, *cond_fma<mode>_2, *cond_fma<mode>_4) (*cond_fma<mode>_any, @aarch64_pred_fnma<mode>) (*cond_fnma<mode>_2, *cond_fnma<mode>_4, *cond_fnma<mode>_any) (<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>) (@<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>) (@aarch64_sve_add_<optab><vsi2qi>, @aarch64_pred_<optab><mode>) (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict) (*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict) (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict) (@aarch64_<optab>_lane_<mode>, @aarch64_pred_<optab><mode>) (*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict) (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict) (@aarch64_<optab>_lane_<mode>, @aarch64_sve_tmad<mode>) (@aarch64_sve_<sve_fp_op>vnx4sf) (@aarch64_sve_<sve_fp_op>_lanevnx4sf) (@aarch64_sve_<sve_fp_op><mode>, *vcond_mask_<mode><vpred>) (@aarch64_sel_dup<mode>, @aarch64_pred_cmp<cmp_op><mode>) (*cmp<cmp_op><mode>_cc, *cmp<cmp_op><mode>_ptest) (@aarch64_pred_fcm<cmp_op><mode>, @fold_extract_<last_op>_<mode>) (@aarch64_fold_extract_vector_<last_op>_<mode>) (@aarch64_sve_splice<mode>) (@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>) (@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>) (*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed) (*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict) (*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>) (@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>) (@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>) (*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed) (*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict) (*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>) (@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>) (*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>) (@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>) (*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>) (@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>) (*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>) (@aarch64_brk<brk_op>, *aarch64_sve_<inc_dec><mode>_cntp): Update to new syntax. * config/aarch64/aarch64-simd.md (aarch64_simd_dup<mode>) (load_pair<DREG:mode><DREG2:mode>) (vec_store_pair<DREG:mode><DREG2:mode>, aarch64_simd_stp<mode>) (aarch64_simd_mov_from_<mode>low) (aarch64_simd_mov_from_<mode>high, and<mode>3<vczle><vczbe>) (ior<mode>3<vczle><vczbe>, aarch64_simd_ashr<mode><vczle><vczbe>) (aarch64_simd_bsl<mode>_internal<vczle><vczbe>) (*aarch64_simd_bsl<mode>_alt<vczle><vczbe>) (aarch64_simd_bsldi_internal, aarch64_simd_bsldi_alt) (store_pair_lanes<mode>, *aarch64_combine_internal<mode>) (*aarch64_combine_internal_be<mode>, *aarch64_combinez<mode>) (*aarch64_combinez_be<mode>) (aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_cm<optab>di) (aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_mov<mode>) (*aarch64_be_mov<mode>, *aarch64_be_movoi): Update to new syntax.
Diffstat (limited to 'gcc/config/aarch64/aarch64-sve.md')
-rw-r--r--gcc/config/aarch64/aarch64-sve.md2973
1 files changed, 1514 insertions, 1459 deletions
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index b223e7d..5a652d8 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -687,33 +687,35 @@
;; and after RA; before RA we want the predicated load and store patterns to
;; be used instead.
(define_insn "*aarch64_sve_mov<mode>_ldr_str"
- [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w")
- (match_operand:SVE_FULL 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))]
+ [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand")
+ (match_operand:SVE_FULL 1 "aarch64_sve_general_operand"))]
"TARGET_SVE
&& (<MODE>mode == VNx16QImode || !BYTES_BIG_ENDIAN)
&& ((lra_in_progress || reload_completed)
|| (register_operand (operands[0], <MODE>mode)
&& nonmemory_operand (operands[1], <MODE>mode)))"
- "@
- ldr\t%0, %1
- str\t%1, %0
- mov\t%0.d, %1.d
- * return aarch64_output_sve_mov_immediate (operands[1]);"
+ {@ [ cons: =0 , 1 ]
+ [ w , Utr ] ldr\t%0, %1
+ [ Utr , w ] str\t%1, %0
+ [ w , w ] mov\t%0.d, %1.d
+ [ w , Dn ] << aarch64_output_sve_mov_immediate (operands[1]);
+ }
)
;; Unpredicated moves that cannot use LDR and STR, i.e. partial vectors
;; or vectors for which little-endian ordering isn't acceptable. Memory
;; accesses require secondary reloads.
(define_insn "*aarch64_sve_mov<mode>_no_ldr_str"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w")
- (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand" "w, Dn"))]
+ [(set (match_operand:SVE_ALL 0 "register_operand")
+ (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand"))]
"TARGET_SVE
&& <MODE>mode != VNx16QImode
&& (BYTES_BIG_ENDIAN
|| maybe_ne (BYTES_PER_SVE_VECTOR, GET_MODE_SIZE (<MODE>mode)))"
- "@
- mov\t%0.d, %1.d
- * return aarch64_output_sve_mov_immediate (operands[1]);"
+ {@ [ cons: =0 , 1 ]
+ [ w , w ] mov\t%0.d, %1.d
+ [ w , Dn ] << aarch64_output_sve_mov_immediate (operands[1]);
+ }
)
;; Handle memory reloads for modes that can't use LDR and STR. We use
@@ -743,18 +745,19 @@
;; Note that this pattern is generated directly by aarch64_emit_sve_pred_move,
;; so changes to this pattern will need changes there as well.
(define_insn_and_split "@aarch64_pred_mov<mode>"
- [(set (match_operand:SVE_ALL 0 "nonimmediate_operand" "=w, w, m")
+ [(set (match_operand:SVE_ALL 0 "nonimmediate_operand")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (match_operand:SVE_ALL 2 "nonimmediate_operand" "w, m, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_ALL 2 "nonimmediate_operand")]
UNSPEC_PRED_X))]
"TARGET_SVE
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
- "@
- #
- ld1<Vesize>\t%0.<Vctype>, %1/z, %2
- st1<Vesize>\t%2.<Vctype>, %1, %0"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , Upl , w ] #
+ [ w , Upl , m ] ld1<Vesize>\t%0.<Vctype>, %1/z, %2
+ [ m , Upl , w ] st1<Vesize>\t%2.<Vctype>, %1, %0
+ }
"&& register_operand (operands[0], <MODE>mode)
&& register_operand (operands[2], <MODE>mode)"
[(set (match_dup 0) (match_dup 2))]
@@ -949,16 +952,17 @@
)
(define_insn "*aarch64_sve_mov<mode>"
- [(set (match_operand:PRED_ALL 0 "nonimmediate_operand" "=Upa, m, Upa, Upa")
- (match_operand:PRED_ALL 1 "aarch64_mov_operand" "Upa, Upa, m, Dn"))]
+ [(set (match_operand:PRED_ALL 0 "nonimmediate_operand")
+ (match_operand:PRED_ALL 1 "aarch64_mov_operand"))]
"TARGET_SVE
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
- "@
- mov\t%0.b, %1.b
- str\t%1, %0
- ldr\t%0, %1
- * return aarch64_output_sve_mov_immediate (operands[1]);"
+ {@ [ cons: =0 , 1 ]
+ [ Upa , Upa ] mov\t%0.b, %1.b
+ [ m , Upa ] str\t%1, %0
+ [ Upa , m ] ldr\t%0, %1
+ [ Upa , Dn ] << aarch64_output_sve_mov_immediate (operands[1]);
+ }
)
;; Match PTRUES Pn.B when both the predicate and flags are useful.
@@ -1079,13 +1083,14 @@
;; Write to the FFR and start a new FFRT scheduling region.
(define_insn "aarch64_wrffr"
[(set (reg:VNx16BI FFR_REGNUM)
- (match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one" "Dm, Upa"))
+ (match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one"))
(set (reg:VNx16BI FFRT_REGNUM)
(unspec:VNx16BI [(match_dup 0)] UNSPEC_WRFFR))]
"TARGET_SVE"
- "@
- setffr
- wrffr\t%0.b"
+ {@ [ cons: 0 ]
+ [ Dm ] setffr
+ [ Upa ] wrffr\t%0.b
+ }
)
;; [L2 in the block comment above about FFR handling]
@@ -2331,21 +2336,22 @@
(define_insn "mask_scatter_store<mode><v_int_container>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>" "Z, vgw, rk, rk, rk, rk")
- (match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
- (match_operand:SVE_4 4 "register_operand" "w, w, w, w, w, w")]
+ [(match_operand:VNx4BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>")
+ (match_operand:VNx4SI 1 "register_operand")
+ (match_operand:DI 2 "const_int_operand")
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_4 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.s, %5, [%1.s]
- st1<Vesize>\t%4.s, %5, [%1.s, #%0]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
- st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+ {@ [ cons: 0 , 1 , 2 , 3 , 4 , 5 ]
+ [ Z , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%1.s]
+ [ vgw , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%1.s, #%0]
+ [ rk , w , Z , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
+ [ rk , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
+ [ rk , w , Z , i , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
+ [ rk , w , Ui1 , i , w , Upl ] st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]
+ }
)
;; Predicated scatter stores for 64-bit elements. The value of operand 2
@@ -2353,40 +2359,42 @@
(define_insn "mask_scatter_store<mode><v_int_container>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>" "Z, vgd, rk, rk")
- (match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w, w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%1.d]
- st1<Vesize>\t%4.d, %5, [%1.d, #%0]
- st1<Vesize>\t%4.d, %5, [%0, %1.d]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ Z , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%1.d]
+ [ vgd , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%1.d, #%0]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]
+ }
)
;; Likewise, but with the offset being extended from 32 bits.
(define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(ANY_EXTEND:VNx2DI
- (match_operand:VNx2SI 1 "register_operand" "w, w"))]
+ (match_operand:VNx2SI 1 "register_operand"))]
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (<VPRED>mode);
@@ -2398,22 +2406,23 @@
(define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_sxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 1 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 1 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (<VPRED>mode);
@@ -2425,19 +2434,20 @@
(define_insn "*mask_scatter_store<mode><v_int_container>_uxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_reg_or_zero")
(and:VNx2DI
- (match_operand:VNx2DI 1 "register_operand" "w, w")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 2 "const_int_operand")
- (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
- (match_operand:SVE_2 4 "register_operand" "w, w")]
+ (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+ (match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
- st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+ {@ [ cons: 0 , 1 , 3 , 4 , 5 ]
+ [ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
+ [ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
+ }
)
;; -------------------------------------------------------------------------
@@ -2454,22 +2464,23 @@
(define_insn "@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:VNx4BI 5 "register_operand")
(match_operand:DI 0 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>" "Z, vg<VNx4_NARROW:Vesize>, rk, rk, rk, rk")
- (match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
+ (match_operand:VNx4SI 1 "register_operand")
+ (match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
(truncate:VNx4_NARROW
- (match_operand:VNx4_WIDE 4 "register_operand" "w, w, w, w, w, w"))]
+ (match_operand:VNx4_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
- st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+ {@ [ cons: 1 , 2 , 4 , 5 ]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
+ [ w , Z , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
+ [ w , Z , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
+ [ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]
+ }
)
;; Predicated truncating scatter stores for 64-bit elements. The value of
@@ -2477,43 +2488,45 @@
(define_insn "@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 0 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>" "Z, vg<VNx2_NARROW:Vesize>, rk, rk")
- (match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, Ui1, Ui1, i")
(truncate:VNx2_NARROW
- (match_operand:VNx2_WIDE 4 "register_operand" "w, w, w, w"))]
+ (match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+ {@ [ cons: 1 , 4 , 5 ]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
+ [ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]
+ }
)
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 1 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 1 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
(truncate:VNx2_NARROW
- (match_operand:VNx2_WIDE 4 "register_operand" "w, w"))]
+ (match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]"
+ {@ [ cons: 0 , 1 , 4 , 5 ]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
+ }
"&& !rtx_equal_p (operands[5], operands[6])"
{
operands[6] = copy_rtx (operands[5]);
@@ -2524,20 +2537,21 @@
(define_insn "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw"
[(set (mem:BLK (scratch))
(unspec:BLK
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 0 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 0 "aarch64_reg_or_zero")
(and:VNx2DI
- (match_operand:VNx2DI 1 "register_operand" "w, w")
+ (match_operand:VNx2DI 1 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
(truncate:VNx2_NARROW
- (match_operand:VNx2_WIDE 4 "register_operand" "w, w"))]
+ (match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
- "@
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
- st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+ {@ [ cons: 0 , 1 , 4 , 5 ]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
+ [ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
+ }
)
;; =========================================================================
@@ -2587,15 +2601,16 @@
;; the load at the first opportunity in order to allow the PTRUE to be
;; optimized with surrounding code.
(define_insn_and_split "*vec_duplicate<mode>_reg"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(vec_duplicate:SVE_ALL
- (match_operand:<VEL> 1 "aarch64_sve_dup_operand" "r, w, Uty")))
+ (match_operand:<VEL> 1 "aarch64_sve_dup_operand")))
(clobber (match_scratch:VNx16BI 2 "=X, X, Upl"))]
"TARGET_SVE"
- "@
- mov\t%0.<Vetype>, %<vwcore>1
- mov\t%0.<Vetype>, %<Vetype>1
- #"
+ {@ [ cons: =0 , 1 ; attrs: length ]
+ [ w , r ; 4 ] mov\t%0.<Vetype>, %<vwcore>1
+ [ w , w ; 4 ] mov\t%0.<Vetype>, %<Vetype>1
+ [ w , Uty ; 8 ] #
+ }
"&& MEM_P (operands[1])"
[(const_int 0)]
{
@@ -2607,7 +2622,6 @@
CONST0_RTX (<MODE>mode)));
DONE;
}
- [(set_attr "length" "4,4,8")]
)
;; Duplicate an Advanced SIMD vector to fill an SVE vector (LE version).
@@ -2739,18 +2753,18 @@
;; Shift an SVE vector left and insert a scalar into element 0.
(define_insn "vec_shl_insert_<mode>"
- [(set (match_operand:SVE_FULL 0 "register_operand" "=?w, w, ??&w, ?&w")
+ [(set (match_operand:SVE_FULL 0 "register_operand")
(unspec:SVE_FULL
- [(match_operand:SVE_FULL 1 "register_operand" "0, 0, w, w")
- (match_operand:<VEL> 2 "aarch64_reg_or_zero" "rZ, w, rZ, w")]
+ [(match_operand:SVE_FULL 1 "register_operand")
+ (match_operand:<VEL> 2 "aarch64_reg_or_zero")]
UNSPEC_INSR))]
"TARGET_SVE"
- "@
- insr\t%0.<Vetype>, %<vwcore>2
- insr\t%0.<Vetype>, %<Vetype>2
- movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
- movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2"
- [(set_attr "movprfx" "*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ ?w , 0 , rZ ; * ] insr\t%0.<Vetype>, %<vwcore>2
+ [ w , 0 , w ; * ] insr\t%0.<Vetype>, %<Vetype>2
+ [ ??&w , w , rZ ; yes ] movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2
+ }
)
;; -------------------------------------------------------------------------
@@ -2761,15 +2775,16 @@
;; -------------------------------------------------------------------------
(define_insn "vec_series<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(vec_series:SVE_I
- (match_operand:<VEL> 1 "aarch64_sve_index_operand" "Usi, r, r")
- (match_operand:<VEL> 2 "aarch64_sve_index_operand" "r, Usi, r")))]
+ (match_operand:<VEL> 1 "aarch64_sve_index_operand")
+ (match_operand:<VEL> 2 "aarch64_sve_index_operand")))]
"TARGET_SVE"
- "@
- index\t%0.<Vctype>, #%1, %<vccore>2
- index\t%0.<Vctype>, %<vccore>1, #%2
- index\t%0.<Vctype>, %<vccore>1, %<vccore>2"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , Usi , r ] index\t%0.<Vctype>, #%1, %<vccore>2
+ [ w , r , Usi ] index\t%0.<Vctype>, %<vccore>1, #%2
+ [ w , r , r ] index\t%0.<Vctype>, %<vccore>1, %<vccore>2
+ }
)
;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range
@@ -2968,15 +2983,16 @@
;; Extract the last active element of operand 1 into operand 0.
;; If no elements are active, extract the last inactive element instead.
(define_insn "@extract_<last_op>_<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
+ [(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 2 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_FULL 2 "register_operand")]
LAST))]
"TARGET_SVE"
- "@
- last<ab>\t%<vwcore>0, %1, %2.<Vetype>
- last<ab>\t%<Vetype>0, %1, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ ?r , Upl , w ] last<ab>\t%<vwcore>0, %1, %2.<Vetype>
+ [ w , Upl , w ] last<ab>\t%<Vetype>0, %1, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3036,17 +3052,17 @@
;; Integer unary arithmetic predicated with a PTRUE.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w"))]
+ (match_operand:SVE_I 2 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated integer unary arithmetic with merging.
@@ -3063,18 +3079,18 @@
;; Predicated integer unary arithmetic, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w"))
+ (match_operand:SVE_I 2 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated integer unary arithmetic, merging with an independent value.
@@ -3085,19 +3101,19 @@
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w"))
- (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_I 2 "register_operand"))
+ (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3112,18 +3128,18 @@
;; Predicated integer unary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")]
SVE_INT_UNARY)]
UNSPEC_PRED_X))]
"TARGET_SVE && <elem_bits> >= <min_elem_bits>"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Another way of expressing the REVB, REVH and REVW patterns, with this
@@ -3131,36 +3147,36 @@
;; of lanes and the data mode decides the granularity of the reversal within
;; each lane.
(define_insn "@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
- [(match_operand:PRED_HSD 1 "register_operand" "Upl, Upl")
+ [(match_operand:PRED_HSD 1 "register_operand")
(unspec:SVE_ALL
- [(match_operand:SVE_ALL 2 "register_operand" "0, w")]
+ [(match_operand:SVE_ALL 2 "register_operand")]
UNSPEC_REVBHW)]
UNSPEC_PRED_X))]
"TARGET_SVE && <PRED_HSD:elem_bits> > <SVE_ALL:container_bits>"
- "@
- rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
- movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
+ }
)
;; Predicated integer unary operations with merging.
(define_insn "@cond_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")]
SVE_INT_UNARY)
- (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <elem_bits> >= <min_elem_bits>"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3191,53 +3207,53 @@
;; Predicated sign and zero extension from a narrower mode.
(define_insn "*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2"
- [(set (match_operand:SVE_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_HSDI 0 "register_operand")
(unspec:SVE_HSDI
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand")
(ANY_EXTEND:SVE_HSDI
- (match_operand:SVE_PARTIAL_I 2 "register_operand" "0, w"))]
+ (match_operand:SVE_PARTIAL_I 2 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
- movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
+ }
)
;; Predicated truncate-and-sign-extend operations.
(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(sign_extend:SVE_FULL_HSDI
(truncate:SVE_PARTIAL_I
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")))]
UNSPEC_PRED_X))]
"TARGET_SVE
&& (~<SVE_FULL_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated truncate-and-sign-extend operations with merging.
(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(sign_extend:SVE_FULL_HSDI
(truncate:SVE_PARTIAL_I
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")))
- (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")))
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& (~<SVE_FULL_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated truncate-and-zero-extend operations, merging with the
@@ -3246,19 +3262,19 @@
;; The canonical form of this operation is an AND of a constant rather
;; than (zero_extend (truncate ...)).
(define_insn "*cond_uxt<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated truncate-and-zero-extend operations, merging with an
@@ -3270,20 +3286,20 @@
;; as early-clobber helps to make the instruction more regular to the
;; register allocator.
(define_insn "*cond_uxt<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3338,23 +3354,23 @@
)
(define_insn "*cnot<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
[(unspec:<VPRED>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
(match_operand:SVE_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated logical inverse with merging.
@@ -3385,16 +3401,16 @@
;; Predicated logical inverse, merging with the first input.
(define_insn_and_rewrite "*cond_cnot<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
;; Logical inverse of operand 2 (as above).
(unspec:SVE_I
[(unspec:<VPRED>
[(match_operand 5)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
(match_operand:SVE_I 4 "aarch64_simd_imm_one")
@@ -3403,14 +3419,14 @@
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- cnot\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] cnot\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated logical inverse, merging with an independent value.
@@ -3421,33 +3437,33 @@
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn_and_rewrite "*cond_cnot<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
;; Logical inverse of operand 2 (as above).
(unspec:SVE_I
[(unspec:<VPRED>
[(match_operand 5)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
(match_operand:SVE_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL)
- (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[6])"
- "@
- cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %6\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 6 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %6\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
;; -------------------------------------------------------------------------
@@ -3512,17 +3528,17 @@
;; Predicated floating-point unary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated floating-point unary arithmetic with merging.
@@ -3542,43 +3558,43 @@
;; Predicated floating-point unary arithmetic, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 3)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[3])"
{
operands[3] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; Predicated floating-point unary arithmetic, merging with an independent
@@ -3590,45 +3606,45 @@
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -3767,19 +3783,20 @@
;; and would make the instruction seem less uniform to the register
;; allocator.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_IMM:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, 0, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand" "<sve_imm_con>, w, <sve_imm_con>, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- #
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , <sve_imm_con> ; * ] #
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , <sve_imm_con> ; yes ] #
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -3787,7 +3804,6 @@
[(set (match_dup 0)
(SVE_INT_BINARY_IMM:SVE_I (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,yes,yes")]
)
;; Unpredicated binary operations with a constant (post-RA only).
@@ -3820,57 +3836,58 @@
;; Predicated integer operations, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer operations, merging with the second input.
(define_insn "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "0, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated integer operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -3899,19 +3916,19 @@
;; -------------------------------------------------------------------------
(define_insn "add<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?w, ?w, w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(plus:SVE_I
- (match_operand:SVE_I 1 "register_operand" "%0, 0, 0, w, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_add_operand" "vsa, vsn, vsi, vsa, vsn, w")))]
+ (match_operand:SVE_I 1 "register_operand")
+ (match_operand:SVE_I 2 "aarch64_sve_add_operand")))]
"TARGET_SVE"
- "@
- add\t%0.<Vetype>, %0.<Vetype>, #%D2
- sub\t%0.<Vetype>, %0.<Vetype>, #%N2
- * return aarch64_output_sve_vector_inc_dec (\"%0.<Vetype>\", operands[2]);
- movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
- movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
- add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,*,*,yes,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , vsa ; * ] add\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ w , 0 , vsn ; * ] sub\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ w , 0 , vsi ; * ] << aarch64_output_sve_vector_inc_dec ("%0.<Vetype>", operands[2]);
+ [ ?w , w , vsa ; yes ] movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ ?w , w , vsn ; yes ] movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ w , w , w ; * ] add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ }
)
;; Merging forms are handled through SVE_INT_BINARY.
@@ -3925,16 +3942,16 @@
;; -------------------------------------------------------------------------
(define_insn "sub<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 1 "aarch64_sve_arith_operand" "w, vsa, vsa")
- (match_operand:SVE_I 2 "register_operand" "w, 0, w")))]
+ (match_operand:SVE_I 1 "aarch64_sve_arith_operand")
+ (match_operand:SVE_I 2 "register_operand")))]
"TARGET_SVE"
- "@
- sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
- subr\t%0.<Vetype>, %0.<Vetype>, #%D1
- movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , w , w ; * ] sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ [ w , vsa , 0 ; * ] subr\t%0.<Vetype>, %0.<Vetype>, #%D1
+ [ ?&w , vsa , w ; yes ] movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1
+ }
)
;; Merging forms are handled through SVE_INT_BINARY.
@@ -4108,13 +4125,13 @@
;; Predicated integer absolute difference.
(define_insn "@aarch64_pred_<su>abd<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(minus:SVE_I
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_dup 1)
@@ -4123,10 +4140,10 @@
(match_dup 3))]
UNSPEC_PRED_X)))]
"TARGET_SVE"
- "@
- <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
(define_expand "@aarch64_cond_<su>abd<mode>"
@@ -4156,15 +4173,15 @@
;; Predicated integer absolute difference, merging with the first input.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
(unspec:SVE_I
[(match_operand 4)
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_operand 5)
@@ -4175,27 +4192,27 @@
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
{
operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated integer absolute difference, merging with the second input.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
(unspec:SVE_I
[(match_operand 4)
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "0, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_operand 5)
@@ -4206,27 +4223,27 @@
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
{
operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Predicated integer absolute difference, merging with an independent value.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
(unspec:SVE_I
[(match_operand 5)
(USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
(unspec:SVE_I
[(match_operand 6)
@@ -4234,17 +4251,18 @@
(match_dup 2)
(match_dup 3))]
UNSPEC_PRED_X))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (!CONSTANT_P (operands[5]) || !CONSTANT_P (operands[6]))
@@ -4274,32 +4292,32 @@
;; Unpredicated saturating signed addition and subtraction.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w, w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(SBINQOPS:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "0, 0, w, w, w")
- (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand" "vsQ, vsS, vsQ, vsS, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand")))]
"TARGET_SVE"
- "@
- <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- <binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
- movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
- <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,*,yes,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , vsQ ; * ] <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ w , 0 , vsS ; * ] <binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ ?&w , w , vsQ ; yes ] movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ ?&w , w , vsS ; yes ] movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
+ [ w , w , w ; * ] <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ }
)
;; Unpredicated saturating unsigned addition and subtraction.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(UBINQOPS:SVE_FULL_I
- (match_operand:SVE_FULL_I 1 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand" "vsa, vsa, w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand")))]
"TARGET_SVE"
- "@
- <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
- <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , vsa ; * ] <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ ?&w , w , vsa ; yes ] movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+ [ w , w , w ; * ] <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -4328,19 +4346,19 @@
;; Predicated highpart multiplication.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")]
MUL_HIGHPART)]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated highpart multiplications with merging.
@@ -4364,36 +4382,38 @@
;; Predicated highpart multiplications, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
MUL_HIGHPART)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")])
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
+)
;; Predicated highpart multiplications, merging with zero.
(define_insn "*cond_<optab><mode>_z"
- [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_I
- [(match_operand:SVE_FULL_I 2 "register_operand" "%0, w")
- (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
MUL_HIGHPART)
(match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ &w , Upl , %0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
[(set_attr "movprfx" "yes")])
;; -------------------------------------------------------------------------
@@ -4423,19 +4443,19 @@
;; Integer division predicated with a PTRUE.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w"))]
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer division with merging.
@@ -4453,57 +4473,58 @@
;; Predicated integer division, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer division, merging with the second input.
(define_insn "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w"))
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated integer division, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY_SD:SVE_FULL_SDI
- (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w, w, w"))
- (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
+ (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4526,16 +4547,16 @@
;; Unpredicated integer binary logical operations.
(define_insn "<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?w, w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(LOGICAL:SVE_I
- (match_operand:SVE_I 1 "register_operand" "%0, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_logical_operand" "vsl, vsl, w")))]
+ (match_operand:SVE_I 1 "register_operand")
+ (match_operand:SVE_I 2 "aarch64_sve_logical_operand")))]
"TARGET_SVE"
- "@
- <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
- movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
- <logical>\t%0.d, %1.d, %2.d"
- [(set_attr "movprfx" "*,yes,*")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , %0 , vsl ; * ] <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
+ [ ?w , w , vsl ; yes ] movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
+ [ w , w , w ; * ] <logical>\t%0.d, %1.d, %2.d
+ }
)
;; Merging forms are handled through SVE_INT_BINARY.
@@ -4595,39 +4616,40 @@
;; Predicated integer BIC, merging with the first input.
(define_insn "*cond_bic<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
(not:SVE_I
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 2 "register_operand" "0, w"))
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 2 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer BIC, merging with an independent value.
(define_insn_and_rewrite "*cond_bic<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(and:SVE_I
(not:SVE_I
- (match_operand:SVE_I 3 "register_operand" "w, w, w, w"))
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 2 "register_operand"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4697,24 +4719,24 @@
;; likely to gain much and would make the instruction seem less uniform
;; to the register allocator.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- #
- <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , D<lr> ; * ] #
+ [ w , Upl , 0 , w ; * ] <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& reload_completed
&& !register_operand (operands[3], <MODE>mode)"
[(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,yes")]
)
;; Unpredicated shift operations by a constant (post-RA only).
@@ -4731,36 +4753,37 @@
;; Predicated integer shift, merging with the first input.
(define_insn "*cond_<optab><mode>_2_const"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
)
;; Predicated integer shift, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_const"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4800,36 +4823,38 @@
;; Predicated shifts of narrow elements by 64-bit amounts, merging with
;; the first input.
(define_insn "*cond_<sve_int_op><mode>_m"
- [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
- (match_operand:VNx2DI 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand")
+ (match_operand:VNx2DI 3 "register_operand")]
SVE_SHIFT_WIDE)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
- [(set_attr "movprfx" "*, yes")])
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ }
+)
;; Predicated shifts of narrow elements by 64-bit amounts, merging with zero.
(define_insn "*cond_<sve_int_op><mode>_z"
- [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=&w, &w")
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_BHSI
- [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
- (match_operand:VNx2DI 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand")
+ (match_operand:VNx2DI 3 "register_operand")]
SVE_SHIFT_WIDE)
(match_operand:SVE_FULL_BHSI 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ &w , Upl , 0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+ }
[(set_attr "movprfx" "yes")])
;; -------------------------------------------------------------------------
@@ -4860,19 +4885,20 @@
;; Predicated ASRD.
(define_insn "*sdiv_pow2<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "0, w")
+ [(match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_rshift_imm")]
UNSPEC_ASRD)]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")])
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
+)
;; Predicated shift with merging.
(define_expand "@cond_<sve_int_op><mode>"
@@ -4896,47 +4922,49 @@
;; Predicated shift, merging with the first input.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
[(match_operand 4)
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "0, w")
+ [(match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm")]
SVE_INT_SHIFT_IMM)]
UNSPEC_PRED_X)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")])
+)
;; Predicated shift, merging with an independent value.
(define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_I
[(match_operand 5)
(unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "w, w, w")
+ [(match_operand:SVE_I 2 "register_operand")
(match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm")]
SVE_INT_SHIFT_IMM)]
UNSPEC_PRED_X)
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ &w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -4972,18 +5000,18 @@
;; Predicated floating-point binary operations that take an integer
;; as their second operand.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point binary operations with merging, taking an
@@ -5006,68 +5034,69 @@
;; Predicated floating-point binary operations that take an integer as their
;; second operand, with inactive lanes coming from the first operand.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point binary operations that take an integer as
;; their second operand, with the values of inactive lanes being distinct
;; from the other inputs.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5087,23 +5116,24 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5183,19 +5213,19 @@
;; Predicated floating-point binary operations that have no immediate forms.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY_REG))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 ; * ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point operations with merging.
@@ -5216,155 +5246,156 @@
;; Predicated floating-point operations, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Same for operations that take a 1-bit constant.
(define_insn_and_rewrite "*cond_<optab><mode>_2_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ }
)
;; Predicated floating-point operations, merging with the second input.
(define_insn_and_rewrite "*cond_<optab><mode>_3_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_3_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated floating-point operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5384,26 +5415,27 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_BINARY)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5417,22 +5449,23 @@
;; Same for operations that take a 1-bit constant.
(define_insn_and_rewrite "*cond_<optab><mode>_any_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5452,22 +5485,23 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #"
+ {@ [ cons: =0 , 1 , 2 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5489,22 +5523,23 @@
;; Predicated floating-point addition.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, i, Z, Ui1, i, i, Ui1")
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, 0, w, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, w, w, vsA, vsN, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SI 4 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand")]
SVE_COND_FP_ADD))]
"TARGET_SVE"
- "@
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- #
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , vsA , i ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , vsN , i ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ w , Upl , w , w , Z ; * ] #
+ [ w , Upl , 0 , w , Ui1 ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , vsA , i ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , vsN , i ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?&w , Upl , w , w , Ui1 ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -5512,79 +5547,79 @@
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
[(set (match_dup 0) (plus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
)
;; Predicated floating-point addition of a constant, merging with the
;; first input.
(define_insn_and_rewrite "*cond_add<mode>_2_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , vsA ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , vsN ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , vsN ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,*,yes,yes")]
)
(define_insn "*cond_add<mode>_2_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
- [(set_attr "movprfx" "*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , vsA ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , vsN ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA ; yes ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?w , Upl , w , vsN ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ }
)
;; Predicated floating-point addition of a constant, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_add<mode>_any_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- #
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ w , Upl , w , vsA , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ w , Upl , w , vsA , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA , w ] #
+ [ ?w , Upl , w , vsN , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5604,25 +5639,26 @@
)
(define_insn_and_rewrite "*cond_add<mode>_any_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
UNSPEC_COND_FADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
- #
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ w , Upl , w , vsA , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ w , Upl , w , vsA , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , vsN , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+ [ ?w , Upl , w , vsA , w ] #
+ [ ?w , Upl , w , vsN , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5645,18 +5681,18 @@
;; Predicated FCADD.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD))]
"TARGET_SVE"
- "@
- fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; Predicated FCADD with merging.
@@ -5691,66 +5727,67 @@
;; Predicated FCADD, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; Predicated FCADD, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5770,23 +5807,24 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5808,21 +5846,22 @@
;; Predicated floating-point subtraction.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, Ui1, i, Ui1")
- (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand" "vsA, w, 0, w, vsA, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w, w, 0, w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SI 4 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_SUB))]
"TARGET_SVE"
- "@
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- #
- fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , vsA , 0 , i ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ w , Upl , w , w , Z ; * ] #
+ [ w , Upl , 0 , w , Ui1 ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ w , Upl , w , 0 , Ui1 ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , vsA , w , i ; yes ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?&w , Upl , w , w , Ui1 ; yes ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -5830,72 +5869,72 @@
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
[(set (match_dup 0) (minus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,*,yes,yes")]
)
;; Predicated floating-point subtraction from a constant, merging with the
;; second input.
(define_insn_and_rewrite "*cond_sub<mode>_3_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
+ {@ [ cons: =0 , 1 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_sub<mode>_3_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w ; yes ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ }
)
;; Predicated floating-point subtraction from a constant, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_sub<mode>_const_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- #"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -5915,22 +5954,23 @@
)
(define_insn_and_rewrite "*cond_sub<mode>_const_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
(match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
- #"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ w , Upl , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ w , Upl , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+ [ ?w , Upl , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -5968,45 +6008,45 @@
;; Predicated floating-point absolute difference.
(define_insn_and_rewrite "*aarch64_pred_abd<mode>_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*aarch64_pred_abd<mode>_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
(define_expand "@aarch64_cond_abd<mode>"
@@ -6034,138 +6074,139 @@
;; Predicated floating-point absolute difference, merging with the first
;; input.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
"&& (!rtx_equal_p (operands[1], operands[4])
|| !rtx_equal_p (operands[1], operands[5]))"
{
operands[4] = copy_rtx (operands[1]);
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*aarch64_cond_abd<mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point absolute difference, merging with the second
;; input.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_3_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
"&& (!rtx_equal_p (operands[1], operands[4])
|| !rtx_equal_p (operands[1], operands[5]))"
{
operands[4] = copy_rtx (operands[1]);
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*aarch64_cond_abd<mode>_3_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ }
)
;; Predicated floating-point absolute difference, merging with an
;; independent value.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
(unspec:SVE_FULL_F
[(match_operand 6)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -6189,30 +6230,31 @@
)
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
(unspec:SVE_FULL_F
[(match_dup 1)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ]
+ [ &w , Upl , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ [ &w , Upl , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[4], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[4])"
@@ -6233,20 +6275,21 @@
;; Predicated floating-point multiplication.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, i, Ui1")
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, w, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand" "vsM, w, w, vsM, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SI 4 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand")]
SVE_COND_FP_MUL))]
"TARGET_SVE"
- "@
- fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- #
- fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , vsM , i ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , w , w , Z ; * ] #
+ [ w , Upl , 0 , w , Ui1 ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , vsM , i ; yes ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w , Ui1 ; yes ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
; Split the unpredicated form after reload, so that we don't have
; the unnecessary PTRUE.
"&& reload_completed
@@ -6254,7 +6297,6 @@
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
[(set (match_dup 0) (mult:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
- [(set_attr "movprfx" "*,*,*,yes,yes")]
)
;; Merging forms are handled through SVE_COND_FP_BINARY and
@@ -6441,20 +6483,20 @@
;; Predicated floating-point maximum/minimum.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, w")
- (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand" "vsB, w, vsB, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand")]
SVE_COND_FP_MAXMIN))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
- movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , %0 , vsB ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , vsB ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Merging forms are handled through SVE_COND_FP_BINARY and
@@ -6708,21 +6750,21 @@
;; Predicated integer addition of product.
(define_insn "@aarch64_pred_fma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(plus:SVE_I
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_I 4 "register_operand" "w, 0, w")))]
+ (match_operand:SVE_I 4 "register_operand")))]
"TARGET_SVE"
- "@
- mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , w , w ; * ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ w , Upl , w , w , 0 ; * ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer addition of product with merging.
@@ -6750,65 +6792,66 @@
;; Predicated integer addition of product, merging with the first input.
(define_insn "*cond_fma<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(plus:SVE_I
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 4 "register_operand" "w, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "register_operand"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated integer addition of product, merging with the third input.
(define_insn "*cond_fma<mode>_4"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(plus:SVE_I
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 4 "register_operand" "0, w"))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "register_operand"))
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer addition of product, merging with an independent value.
(define_insn_and_rewrite "*cond_fma<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(plus:SVE_I
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w"))
- (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w"))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 4 "register_operand"))
+ (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -6849,21 +6892,21 @@
;; Predicated integer subtraction of product.
(define_insn "@aarch64_pred_fnma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, 0, w")
+ (match_operand:SVE_I 4 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand"))]
UNSPEC_PRED_X)))]
"TARGET_SVE"
- "@
- msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %0 , w , w ; * ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ w , Upl , w , w , 0 ; * ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer subtraction of product with merging.
@@ -6891,66 +6934,67 @@
;; Predicated integer subtraction of product, merging with the first input.
(define_insn "*cond_fnma<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, w")
+ (match_operand:SVE_I 4 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated integer subtraction of product, merging with the third input.
(define_insn "*cond_fnma<mode>_4"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "0, w")
+ (match_operand:SVE_I 4 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")))
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")))
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated integer subtraction of product, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_fnma<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_I 0 "register_operand")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w")
+ (match_operand:SVE_I 4 "register_operand")
(mult:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w")))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "register_operand")))
+ (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -6974,70 +7018,70 @@
;; Four-element integer dot-product with accumulation.
(define_insn "<sur>dot_prod<vsi2qi>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
- (match_operand:<VSI2QI> 2 "register_operand" "w, w")]
+ [(match_operand:<VSI2QI> 1 "register_operand")
+ (match_operand:<VSI2QI> 2 "register_operand")]
DOTPROD)
- (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 3 "register_operand")))]
"TARGET_SVE"
- "@
- <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
- movprfx\t%0, %3\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
+ }
)
;; Four-element integer dot-product by selected lanes with accumulation.
(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
+ [(match_operand:<VSI2QI> 1 "register_operand")
(unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:<VSI2QI> 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD)
- (match_operand:SVE_FULL_SDI 4 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 4 "register_operand")))]
"TARGET_SVE"
- "@
- <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
- movprfx\t%0, %4\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_con> , 0 ; * ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+ [ ?&w , w , <sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+ }
)
(define_insn "@<sur>dot_prod<vsi2qi>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
- (match_operand:<VSI2QI> 2 "register_operand" "w, w")]
+ [(match_operand:<VSI2QI> 1 "register_operand")
+ (match_operand:<VSI2QI> 2 "register_operand")]
DOTPROD_US_ONLY)
- (match_operand:VNx4SI_ONLY 3 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 3 "register_operand")))]
"TARGET_SVE_I8MM"
- "@
- <sur>dot\\t%0.s, %1.b, %2.b
- movprfx\t%0, %3\;<sur>dot\\t%0.s, %1.b, %2.b"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] <sur>dot\t%0.s, %1.b, %2.b
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;<sur>dot\t%0.s, %1.b, %2.b
+ }
)
(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
+ [(match_operand:<VSI2QI> 1 "register_operand")
(unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 2 "register_operand" "y, y")
+ [(match_operand:<VSI2QI> 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD_I8MM)
- (match_operand:VNx4SI_ONLY 4 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 4 "register_operand")))]
"TARGET_SVE_I8MM"
- "@
- <sur>dot\\t%0.s, %1.b, %2.b[%3]
- movprfx\t%0, %4\;<sur>dot\\t%0.s, %1.b, %2.b[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , y , 0 ; * ] <sur>dot\t%0.s, %1.b, %2.b[%3]
+ [ ?&w , w , y , w ; yes ] movprfx\t%0, %4\;<sur>dot\t%0.s, %1.b, %2.b[%3]
+ }
)
;; -------------------------------------------------------------------------
@@ -7080,18 +7124,18 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_add_<optab><vsi2qi>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 2 "register_operand" "w, w")
- (match_operand:<VSI2QI> 3 "register_operand" "w, w")]
+ [(match_operand:<VSI2QI> 2 "register_operand")
+ (match_operand:<VSI2QI> 3 "register_operand")]
MATMUL)
- (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 1 "register_operand")))]
"TARGET_SVE_I8MM"
- "@
- <sur>mmla\\t%0.s, %2.b, %3.b
- movprfx\t%0, %1\;<sur>mmla\\t%0.s, %2.b, %3.b"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sur>mmla\t%0.s, %2.b, %3.b
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sur>mmla\t%0.s, %2.b, %3.b
+ }
)
;; -------------------------------------------------------------------------
@@ -7126,20 +7170,20 @@
;; Predicated floating-point ternary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "%w, 0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY))]
"TARGET_SVE"
- "@
- <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , %w , w , 0 ; * ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ w , Upl , 0 , w , w ; * ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point ternary operations with merging.
@@ -7167,121 +7211,122 @@
;; Predicated floating-point ternary operations, merging with the
;; first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_2_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , 0 , w , w ; * ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated floating-point ternary operations, merging with the
;; third input.
(define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_4_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ }
)
;; Predicated floating-point ternary operations, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 6)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -7301,29 +7346,30 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
&& !rtx_equal_p (operands[3], operands[5])
&& !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ &w , Upl , 0 , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , 0 , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -7338,20 +7384,20 @@
;; Unpredicated FMLA and FMLS by selected lanes. It doesn't seem worth using
;; (fma ...) since target-independent code won't understand the indexing.
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:SVE_FULL_F 1 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_F 1 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:SVE_FULL_F 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_FP_TERNARY_LANE))]
"TARGET_SVE"
- "@
- <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
- movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_con> , 0 ; * ] <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
+ [ ?&w , w , <sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
+ }
)
;; -------------------------------------------------------------------------
@@ -7363,19 +7409,19 @@
;; Predicated FCMLA.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; unpredicated optab pattern for auto-vectorizer
@@ -7453,69 +7499,70 @@
;; Predicated FCMLA, merging with the third input.
(define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 5)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
"&& !rtx_equal_p (operands[1], operands[5])"
{
operands[5] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes")]
)
(define_insn "*cond_<optab><mode>_4_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ]
+ [ w , Upl , w , w , 0 ; * ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ }
)
;; Predicated FCMLA, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 6)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& 1"
{
if (reload_completed
@@ -7535,24 +7582,25 @@
)
(define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
- (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
- (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[4], operands[5])"
- "@
- movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
- #"
+ {@ [ cons: =0 , 1 , 2 , 3 , 4 , 5 ]
+ [ &w , Upl , w , w , w , Dz ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , 0 , Dz ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ &w , Upl , w , w , w , 0 ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+ [ ?&w , Upl , w , w , w , w ] #
+ }
"&& reload_completed
&& register_operand (operands[5], <MODE>mode)
&& !rtx_equal_p (operands[0], operands[5])"
@@ -7566,20 +7614,20 @@
;; Unpredicated FCMLA with indexing.
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:SVE_FULL_HSF 1 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_HSF 1 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:SVE_FULL_HSF 2 "register_operand" "<sve_lane_pair_con>, <sve_lane_pair_con>")
+ [(match_operand:SVE_FULL_HSF 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_FULL_HSF 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSF 4 "register_operand")]
FCMLA))]
"TARGET_SVE"
- "@
- fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
- movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <sve_lane_pair_con> , 0 ; * ] fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
+ [ ?&w , w , <sve_lane_pair_con> , w ; yes ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
+ }
)
;; -------------------------------------------------------------------------
@@ -7590,17 +7638,17 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_tmad<mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:SVE_FULL_F 1 "register_operand" "0, w")
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ [(match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:DI 3 "const_int_operand")]
UNSPEC_FTMAD))]
"TARGET_SVE"
- "@
- ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
- movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+ [ ?&w , w , w ; yes ] movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+ }
)
;; -------------------------------------------------------------------------
@@ -7614,33 +7662,33 @@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_<sve_fp_op>vnx4sf"
- [(set (match_operand:VNx4SF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF 0 "register_operand")
(unspec:VNx4SF
- [(match_operand:VNx4SF 1 "register_operand" "0, w")
- (match_operand:VNx8BF 2 "register_operand" "w, w")
- (match_operand:VNx8BF 3 "register_operand" "w, w")]
+ [(match_operand:VNx4SF 1 "register_operand")
+ (match_operand:VNx8BF 2 "register_operand")
+ (match_operand:VNx8BF 3 "register_operand")]
SVE_BFLOAT_TERNARY_LONG))]
"TARGET_SVE_BF16"
- "@
- <sve_fp_op>\t%0.s, %2.h, %3.h
- movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_fp_op>\t%0.s, %2.h, %3.h
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h
+ }
)
;; The immediate range is enforced before generating the instruction.
(define_insn "@aarch64_sve_<sve_fp_op>_lanevnx4sf"
- [(set (match_operand:VNx4SF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SF 0 "register_operand")
(unspec:VNx4SF
- [(match_operand:VNx4SF 1 "register_operand" "0, w")
- (match_operand:VNx8BF 2 "register_operand" "w, w")
- (match_operand:VNx8BF 3 "register_operand" "y, y")
+ [(match_operand:VNx4SF 1 "register_operand")
+ (match_operand:VNx8BF 2 "register_operand")
+ (match_operand:VNx8BF 3 "register_operand")
(match_operand:SI 4 "const_int_operand")]
SVE_BFLOAT_TERNARY_LONG_LANE))]
"TARGET_SVE_BF16"
- "@
- <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
- movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , y ; * ] <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
+ [ ?&w , w , w , y ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]
+ }
)
;; -------------------------------------------------------------------------
@@ -7652,17 +7700,17 @@
;; The mode iterator enforces the target requirements.
(define_insn "@aarch64_sve_<sve_fp_op><mode>"
- [(set (match_operand:SVE_MATMULF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_MATMULF 0 "register_operand")
(unspec:SVE_MATMULF
- [(match_operand:SVE_MATMULF 2 "register_operand" "w, w")
- (match_operand:SVE_MATMULF 3 "register_operand" "w, w")
- (match_operand:SVE_MATMULF 1 "register_operand" "0, w")]
+ [(match_operand:SVE_MATMULF 2 "register_operand")
+ (match_operand:SVE_MATMULF 3 "register_operand")
+ (match_operand:SVE_MATMULF 1 "register_operand")]
FMMLA))]
"TARGET_SVE"
- "@
- <sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
- movprfx\t%0, %1\;<sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , 0 , w , w ; * ] <sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ }
)
;; =========================================================================
@@ -7713,24 +7761,24 @@
;; For the other instructions, using the element size is more natural,
;; so we do that for SEL as well.
(define_insn "*vcond_mask_<mode><vpred>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w, w, ?w, ?&w, ?&w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upa, Upa, Upl, Upa, Upa")
- (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm" "w, vss, vss, Ufc, Ufc, vss, Ufc")
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "w, 0, Dz, 0, Dz, w, w")]
+ [(match_operand:<VPRED> 3 "register_operand")
+ (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm")
+ (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE
&& (!register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
- "@
- sel\t%0.<Vetype>, %3, %1.<Vetype>, %2.<Vetype>
- mov\t%0.<Vetype>, %3/m, #%I1
- mov\t%0.<Vetype>, %3/z, #%I1
- fmov\t%0.<Vetype>, %3/m, #%1
- movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
- movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
- movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1"
- [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , Upa ; * ] sel\t%0.<Vetype>, %3, %1.<Vetype>, %2.<Vetype>
+ [ w , vss , 0 , Upa ; * ] mov\t%0.<Vetype>, %3/m, #%I1
+ [ w , vss , Dz , Upa ; * ] mov\t%0.<Vetype>, %3/z, #%I1
+ [ w , Ufc , 0 , Upa ; * ] fmov\t%0.<Vetype>, %3/m, #%1
+ [ ?w , Ufc , Dz , Upl ; yes ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
+ [ ?&w , vss , w , Upa ; yes ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
+ [ ?&w , Ufc , w , Upa ; yes ] movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1
+ }
)
;; Optimize selects between a duplicated scalar variable and another vector,
@@ -7738,22 +7786,22 @@
;; of GPRs as being more expensive than duplicates of FPRs, since they
;; involve a cross-file move.
(define_insn "@aarch64_sel_dup<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=?w, w, ??w, ?&w, ??&w, ?&w")
+ [(set (match_operand:SVE_ALL 0 "register_operand")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 3 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 3 "register_operand")
(vec_duplicate:SVE_ALL
- (match_operand:<VEL> 1 "register_operand" "r, w, r, w, r, w"))
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "0, 0, Dz, Dz, w, w")]
+ (match_operand:<VEL> 1 "register_operand"))
+ (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- mov\t%0.<Vetype>, %3/m, %<vwcore>1
- mov\t%0.<Vetype>, %3/m, %<Vetype>1
- movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
- movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
- movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
- movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1"
- [(set_attr "movprfx" "*,*,yes,yes,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ ?w , r , 0 , Upl ; * ] mov\t%0.<Vetype>, %3/m, %<vwcore>1
+ [ w , w , 0 , Upl ; * ] mov\t%0.<Vetype>, %3/m, %<Vetype>1
+ [ ??w , r , Dz , Upl ; yes ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
+ [ ?&w , w , Dz , Upl ; yes ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
+ [ ??&w , r , w , Upl ; yes ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
+ [ ?&w , w , w , Upl ; yes ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
+ }
)
;; -------------------------------------------------------------------------
@@ -7891,19 +7939,20 @@
;; - the predicate result bit is in the undefined part of a VNx2BI,
;; so its value doesn't matter anyway.
(define_insn "@aarch64_pred_cmp<cmp_op><mode>"
- [(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+ [(set (match_operand:<VPRED> 0 "register_operand")
(unspec:<VPRED>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 3 "register_operand" "w, w")
- (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_I 3 "register_operand")
+ (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
UNSPEC_PRED_Z))
(clobber (reg:CC_NZC CC_REGNUM))]
"TARGET_SVE"
- "@
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ Upa , Upl , w , <sve_imm_con> ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
+ [ Upa , Upl , w , w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Predicated integer comparisons in which both the flag and predicate
@@ -7911,18 +7960,18 @@
(define_insn_and_rewrite "*cmp<cmp_op><mode>_cc"
[(set (reg:CC_NZC CC_REGNUM)
(unspec:CC_NZC
- [(match_operand:VNx16BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx16BI 1 "register_operand")
(match_operand 4)
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
UNSPEC_PRED_Z)]
UNSPEC_PTEST))
- (set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+ (set (match_operand:<VPRED> 0 "register_operand")
(unspec:<VPRED>
[(match_dup 6)
(match_dup 7)
@@ -7932,9 +7981,10 @@
UNSPEC_PRED_Z))]
"TARGET_SVE
&& aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
- "@
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ Upa , Upl , w , <sve_imm_con> ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+ [ Upa , Upl , w , w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[4], operands[6])"
{
operands[6] = copy_rtx (operands[4]);
@@ -7947,23 +7997,24 @@
(define_insn_and_rewrite "*cmp<cmp_op><mode>_ptest"
[(set (reg:CC_NZC CC_REGNUM)
(unspec:CC_NZC
- [(match_operand:VNx16BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx16BI 1 "register_operand")
(match_operand 4)
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
UNSPEC_PRED_Z)]
UNSPEC_PTEST))
(clobber (match_scratch:<VPRED> 0 "=Upa, Upa"))]
"TARGET_SVE
&& aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
- "@
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
- cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+ {@ [ cons: 1 , 2 , 3 ]
+ [ Upl , w , <sve_imm_con> ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+ [ Upl , w , w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+ }
"&& !rtx_equal_p (operands[4], operands[6])"
{
operands[6] = copy_rtx (operands[4]);
@@ -8184,17 +8235,18 @@
;; Predicated floating-point comparisons.
(define_insn "@aarch64_pred_fcm<cmp_op><mode>"
- [(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+ [(set (match_operand:<VPRED> 0 "register_operand")
(unspec:<VPRED>
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
- (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
- (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
SVE_COND_FP_CMP_I0))]
"TARGET_SVE"
- "@
- fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
- fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+ {@ [ cons: =0 , 1 , 3 , 4 ]
+ [ Upa , Upl , w , Dz ] fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
+ [ Upa , Upl , w , w ] fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+ }
)
;; Same for unordered comparisons.
@@ -8576,29 +8628,31 @@
;; Set operand 0 to the last active element in operand 3, or to tied
;; operand 1 if no elements are active.
(define_insn "@fold_extract_<last_op>_<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
+ [(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL>
- [(match_operand:<VEL> 1 "register_operand" "0, 0")
- (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ [(match_operand:<VEL> 1 "register_operand")
+ (match_operand:<VPRED> 2 "register_operand")
+ (match_operand:SVE_FULL 3 "register_operand")]
CLAST))]
"TARGET_SVE"
- "@
- clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
- clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ ?r , 0 , Upl , w ] clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
+ [ w , 0 , Upl , w ] clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>
+ }
)
(define_insn "@aarch64_fold_extract_vector_<last_op>_<mode>"
- [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL 0 "register_operand")
(unspec:SVE_FULL
- [(match_operand:SVE_FULL 1 "register_operand" "0, w")
- (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ [(match_operand:SVE_FULL 1 "register_operand")
+ (match_operand:<VPRED> 2 "register_operand")
+ (match_operand:SVE_FULL 3 "register_operand")]
CLAST))]
"TARGET_SVE"
- "@
- clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ w , 0 , Upl , w ] clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , w , Upl , w ] movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -8865,17 +8919,17 @@
;; Like EXT, but start at the first active element.
(define_insn "@aarch64_sve_splice<mode>"
- [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL 0 "register_operand")
(unspec:SVE_FULL
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_FULL 2 "register_operand" "0, w")
- (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:SVE_FULL 2 "register_operand")
+ (match_operand:SVE_FULL 3 "register_operand")]
UNSPEC_SVE_SPLICE))]
"TARGET_SVE"
- "@
- splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
- movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*, yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , 0 , w ; * ] splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
+ }
)
;; Permutes that take half the elements from one vector and half the
@@ -9057,32 +9111,32 @@
;; Predicated float-to-integer conversion, either to the same width or wider.
(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ }
)
;; Predicated narrowing float-to-integer conversion.
(define_insn "@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:VNx2DF_ONLY 2 "register_operand" "0, w")]
+ (match_operand:VNx2DF_ONLY 2 "register_operand")]
SVE_COND_FCVTI))]
"TARGET_SVE"
- "@
- fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
- movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ }
)
;; Predicated float-to-integer conversion with merging, either to the same
@@ -9107,45 +9161,45 @@
;; alternatives earlyclobber makes things more consistent for the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSDI
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict"
- [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
(unspec:SVE_FULL_HSDI
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSDI
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ }
)
;; Predicated narrowing float-to-integer conversion with merging.
@@ -9164,22 +9218,22 @@
)
(define_insn "*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
- [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(unspec:VNx4SI_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(unspec:VNx4SI_ONLY
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:VNx2DF_ONLY 2 "register_operand" "w, w, w")]
+ (match_operand:VNx2DF_ONLY 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
- movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9244,32 +9298,32 @@
;; Predicated integer-to-float conversion, either to the same width or
;; narrower.
(define_insn "@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated widening integer-to-float conversion.
(define_insn "@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
- [(set (match_operand:VNx2DF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx2DF_ONLY 0 "register_operand")
(unspec:VNx2DF_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SI_ONLY 2 "register_operand" "0, w")]
+ (match_operand:VNx4SI_ONLY 2 "register_operand")]
SVE_COND_ICVTF))]
"TARGET_SVE"
- "@
- <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
- movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ }
)
;; Predicated integer-to-float conversion with merging, either to the same
@@ -9294,45 +9348,45 @@
;; alternatives earlyclobber makes things more consistent for the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_operand 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
}
- [(set_attr "movprfx" "*,yes,yes")]
)
(define_insn "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict"
- [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
(unspec:SVE_FULL_F
- [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
(unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
- "@
- <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ &w , Upl , w , 0 ; * ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ &w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ }
)
;; Predicated widening integer-to-float conversion with merging.
@@ -9351,22 +9405,22 @@
)
(define_insn "*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
- [(set (match_operand:VNx2DF_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:VNx2DF_ONLY 0 "register_operand")
(unspec:VNx2DF_ONLY
- [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:VNx2BI 1 "register_operand")
(unspec:VNx2DF_ONLY
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w")]
+ (match_operand:VNx4SI_ONLY 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:VNx2DF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx2DF_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
- "@
- <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
- movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9442,17 +9496,17 @@
;; Predicated float-to-float truncation.
(define_insn "@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
- [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_SDF 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand")]
SVE_COND_FCVT))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
- movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ }
)
;; Predicated float-to-float truncation with merging.
@@ -9471,22 +9525,22 @@
)
(define_insn "*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
- [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
(unspec:SVE_FULL_HSF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(unspec:SVE_FULL_HSF
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_SDF 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
- movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
- movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9499,17 +9553,17 @@
;; Predicated BFCVT.
(define_insn "@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
- [(set (match_operand:VNx8BF_ONLY 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx8BF_ONLY 0 "register_operand")
(unspec:VNx8BF_ONLY
- [(match_operand:VNx4BI 1 "register_operand" "Upl, Upl")
+ [(match_operand:VNx4BI 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SF_ONLY 2 "register_operand" "0, w")]
+ (match_operand:VNx4SF_ONLY 2 "register_operand")]
SVE_COND_FCVT))]
"TARGET_SVE_BF16"
- "@
- bfcvt\t%0.h, %1/m, %2.s
- movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] bfcvt\t%0.h, %1/m, %2.s
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s
+ }
)
;; Predicated BFCVT with merging.
@@ -9528,22 +9582,22 @@
)
(define_insn "*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
- [(set (match_operand:VNx8BF_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:VNx8BF_ONLY 0 "register_operand")
(unspec:VNx8BF_ONLY
- [(match_operand:VNx4BI 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:VNx4BI 1 "register_operand")
(unspec:VNx8BF_ONLY
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:VNx4SF_ONLY 2 "register_operand" "w, w, w")]
+ (match_operand:VNx4SF_ONLY 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:VNx8BF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:VNx8BF_ONLY 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE_BF16"
- "@
- bfcvt\t%0.h, %1/m, %2.s
- movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
- movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] bfcvt\t%0.h, %1/m, %2.s
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s
+ }
)
;; Predicated BFCVTNT. This doesn't give a natural aarch64_pred_*/cond_*
@@ -9599,17 +9653,17 @@
;; Predicated float-to-float extension.
(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
- [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
(unspec:SVE_FULL_SDF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_HSF 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand")]
SVE_COND_FCVT))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
- movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
- [(set_attr "movprfx" "*,yes")]
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ }
)
;; Predicated float-to-float extension with merging.
@@ -9628,22 +9682,22 @@
)
(define_insn "*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
- [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w, ?&w")
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
(unspec:SVE_FULL_SDF
- [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
(unspec:SVE_FULL_SDF
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_FULL_HSF 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
- "@
- fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
- movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
- movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
- [(set_attr "movprfx" "*,yes,yes")]
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ }
)
;; -------------------------------------------------------------------------
@@ -9716,16 +9770,17 @@
;; zeroing forms, these instructions don't operate elementwise and so
;; don't fit the IFN_COND model.
(define_insn "@aarch64_brk<brk_op>"
- [(set (match_operand:VNx16BI 0 "register_operand" "=Upa, Upa")
+ [(set (match_operand:VNx16BI 0 "register_operand")
(unspec:VNx16BI
- [(match_operand:VNx16BI 1 "register_operand" "Upa, Upa")
- (match_operand:VNx16BI 2 "register_operand" "Upa, Upa")
- (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero" "Dz, 0")]
+ [(match_operand:VNx16BI 1 "register_operand")
+ (match_operand:VNx16BI 2 "register_operand")
+ (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero")]
SVE_BRK_UNARY))]
"TARGET_SVE"
- "@
- brk<brk_op>\t%0.b, %1/z, %2.b
- brk<brk_op>\t%0.b, %1/m, %2.b"
+ {@ [ cons: =0 , 1 , 2 , 3 ]
+ [ Upa , Upa , Upa , Dz ] brk<brk_op>\t%0.b, %1/z, %2.b
+ [ Upa , Upa , Upa , 0 ] brk<brk_op>\t%0.b, %1/m, %2.b
+ }
)
;; Same, but also producing a flags result.
@@ -10446,25 +10501,25 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx2DI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx2DI 0 "register_operand")
(ANY_PLUS:VNx2DI
(vec_duplicate:VNx2DI
(zero_extend:DI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)))
- (match_operand:VNx2DI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx2DI_ONLY 1 "register_operand")))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.d, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.d, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Increment a vector of SIs by the number of set bits in a predicate.
@@ -10486,24 +10541,24 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx4SI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI 0 "register_operand")
(ANY_PLUS:VNx4SI
(vec_duplicate:VNx4SI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP))
- (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx4SI_ONLY 1 "register_operand")))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.s, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.s, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Increment a vector of HIs by the number of set bits in a predicate.
@@ -10526,25 +10581,25 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx8HI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx8HI 0 "register_operand")
(ANY_PLUS:VNx8HI
(vec_duplicate:VNx8HI
(match_operator:HI 3 "subreg_lowpart_operator"
[(unspec:SI
[(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)]))
- (match_operand:VNx8HI_ONLY 1 "register_operand" "0, w")))]
+ (match_operand:VNx8HI_ONLY 1 "register_operand")))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.h, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.h, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; -------------------------------------------------------------------------
@@ -10679,25 +10734,25 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx2DI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx2DI 0 "register_operand")
(ANY_MINUS:VNx2DI
- (match_operand:VNx2DI_ONLY 1 "register_operand" "0, w")
+ (match_operand:VNx2DI_ONLY 1 "register_operand")
(vec_duplicate:VNx2DI
(zero_extend:DI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)))))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.d, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.d, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Decrement a vector of SIs by the number of set bits in a predicate.
@@ -10719,24 +10774,24 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx4SI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx4SI 0 "register_operand")
(ANY_MINUS:VNx4SI
- (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")
+ (match_operand:VNx4SI_ONLY 1 "register_operand")
(vec_duplicate:VNx4SI
(unspec:SI
[(match_operand 3)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP))))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.s, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.s, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2
+ }
"&& !CONSTANT_P (operands[3])"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)
;; Decrement a vector of HIs by the number of set bits in a predicate.
@@ -10759,23 +10814,23 @@
)
(define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
- [(set (match_operand:VNx8HI 0 "register_operand" "=w, ?&w")
+ [(set (match_operand:VNx8HI 0 "register_operand")
(ANY_MINUS:VNx8HI
- (match_operand:VNx8HI_ONLY 1 "register_operand" "0, w")
+ (match_operand:VNx8HI_ONLY 1 "register_operand")
(vec_duplicate:VNx8HI
(match_operator:HI 3 "subreg_lowpart_operator"
[(unspec:SI
[(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+ (match_operand:<VPRED> 2 "register_operand")]
UNSPEC_CNTP)]))))]
"TARGET_SVE"
- "@
- <inc_dec>p\t%0.h, %2
- movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , 0 , Upa ; * ] <inc_dec>p\t%0.h, %2
+ [ ?&w , w , Upa ; yes ] movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
- [(set_attr "movprfx" "*,yes")]
)