aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>2023-06-21 13:40:15 +0100
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2023-06-21 13:40:15 +0100
commit4d9d207c668e757adefcfe5368d86bcc008fa4db (patch)
tree5fa44a063a4dc5f8005e28224c4964d0b2307930 /gcc
parent31cd5f9ae4d34765e593acd83c447006d4e8791c (diff)
downloadgcc-4d9d207c668e757adefcfe5368d86bcc008fa4db.zip
gcc-4d9d207c668e757adefcfe5368d86bcc008fa4db.tar.gz
gcc-4d9d207c668e757adefcfe5368d86bcc008fa4db.tar.bz2
aarch64: Convert SVE gather patterns to compact syntax
This patch converts the SVE load gather patterns to the new compact syntax that Tamar introduced. This allows for a future patch I want to contribute to add more alternatives that are better viewed in the more compact form. The lines in some patterns are >80 long now, but I think that's unavoidable and those patterns already had overly long constraint strings. No functional change intended. Bootstrapped and tested on aarch64-none-linux-gnu. gcc/ChangeLog: * config/aarch64/aarch64-sve.md (mask_gather_load<mode><v_int_container>): Convert to compact alternatives syntax. (mask_gather_load<mode><v_int_container>): Likewise. (*mask_gather_load<mode><v_int_container>_<su>xtw_unpacked): Likewise. (*mask_gather_load<mode><v_int_container>_sxtw): Likewise. (*mask_gather_load<mode><v_int_container>_uxtw): Likewise. (@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode><SVE_4BHI:mode>): Likewise. (@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>): Likewise. (*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode> <SVE_2BHSI:mode>_<ANY_EXTEND2:su>xtw_unpacked): Likewise. (*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode> <SVE_2BHSI:mode>_sxtw): Likewise. (*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode> <SVE_2BHSI:mode>_uxtw): Likewise. (@aarch64_ldff1_gather<mode>): Likewise. (@aarch64_ldff1_gather<mode>): Likewise. (*aarch64_ldff1_gather<mode>_sxtw): Likewise. (*aarch64_ldff1_gather<mode>_uxtw): Likewise. (@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mode> <VNx4_NARROW:mode>): Likewise. (@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode> <VNx2_NARROW:mode>): Likewise. (*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode> <VNx2_NARROW:mode>_sxtw): Likewise. (*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode> <VNx2_NARROW:mode>_uxtw): Likewise. * config/aarch64/aarch64-sve2.md (@aarch64_gather_ldnt<mode>): Likewise. (@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:mode> <SVE_PARTIAL_I:mode>): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/aarch64-sve.md370
-rw-r--r--gcc/config/aarch64/aarch64-sve2.md32
2 files changed, 211 insertions, 191 deletions
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 2de651a..955bbdf 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1418,64 +1418,67 @@
;; Predicated gather loads for 32-bit elements. Operand 3 is true for
;; unsigned extension and false for signed extension.
(define_insn "mask_gather_load<mode><v_int_container>"
- [(set (match_operand:SVE_4 0 "register_operand" "=w, w, w, w, w, w")
+ [(set (match_operand:SVE_4 0 "register_operand")
(unspec:SVE_4
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>" "Z, vgw, rk, rk, rk, rk")
- (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
+ [(match_operand:VNx4BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>")
+ (match_operand:VNx4SI 2 "register_operand")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
- "@
- ld1<Vesize>\t%0.s, %5/z, [%2.s]
- ld1<Vesize>\t%0.s, %5/z, [%2.s, #%1]
- ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
- ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
- ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
- ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5 ]
+ [w, Z, w, Ui1, Ui1, Upl ] ld1<Vesize>\t%0.s, %5/z, [%2.s]
+ [w, vgw, w, Ui1, Ui1, Upl ] ld1<Vesize>\t%0.s, %5/z, [%2.s, #%1]
+ [w, rk, w, Z, Ui1, Upl ] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
+ [w, rk, w, Ui1, Ui1, Upl ] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
+ [w, rk, w, Z, i, Upl ] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
+ [w, rk, w, Ui1, i, Upl ] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
+ }
)
;; Predicated gather loads for 64-bit elements. The value of operand 3
;; doesn't matter in this case.
(define_insn "mask_gather_load<mode><v_int_container>"
- [(set (match_operand:SVE_2 0 "register_operand" "=w, w, w, w")
+ [(set (match_operand:SVE_2 0 "register_operand")
(unspec:SVE_2
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>" "Z, vgd, rk, rk")
- (match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
- "@
- ld1<Vesize>\t%0.d, %5/z, [%2.d]
- ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1]
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d]
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5 ]
+ [w, Z, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%2.d]
+ [w, vgd, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1]
+ [w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d]
+ [w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]
+ }
)
;; Likewise, but with the offset being extended from 32 bits.
(define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_<su>xtw_unpacked"
- [(set (match_operand:SVE_2 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_2 0 "register_operand")
(unspec:SVE_2
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(ANY_EXTEND:VNx2DI
- (match_operand:VNx2SI 2 "register_operand" "w, w"))]
+ (match_operand:VNx2SI 2 "register_operand"))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
- "@
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw]
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5 ]
+ [w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw]
+ [w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1485,24 +1488,25 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; sign-extended.
(define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_sxtw"
- [(set (match_operand:SVE_2 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_2 0 "register_operand")
(unspec:SVE_2
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 2 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 2 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
- "@
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5 ]
+ [w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
+ [w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1512,21 +1516,22 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; zero-extended.
(define_insn "*mask_gather_load<mode><v_int_container>_uxtw"
- [(set (match_operand:SVE_2 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_2 0 "register_operand")
(unspec:SVE_2
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "register_operand")
(and:VNx2DI
- (match_operand:VNx2DI 2 "register_operand" "w, w")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
- "@
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
- ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5 ]
+ [w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
+ [w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
+ }
)
;; -------------------------------------------------------------------------
@@ -1544,27 +1549,28 @@
;; Predicated extending gather loads for 32-bit elements. Operand 3 is
;; true for unsigned extension and false for signed extension.
(define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode><SVE_4BHI:mode>"
- [(set (match_operand:SVE_4HSI 0 "register_operand" "=w, w, w, w, w, w")
+ [(set (match_operand:SVE_4HSI 0 "register_operand")
(unspec:SVE_4HSI
- [(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm")
+ [(match_operand:VNx4BI 6 "general_operand")
(ANY_EXTEND:SVE_4HSI
(unspec:SVE_4BHI
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_4BHI:Vesize>" "Z, vg<SVE_4BHI:Vesize>, rk, rk, rk, rk")
- (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_4BHI:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
+ [(match_operand:VNx4BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_4BHI:Vesize>")
+ (match_operand:VNx4SI 2 "register_operand")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_4BHI:Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_4HSI:narrower_mask> & <SVE_4BHI:self_mask>) == 0"
- "@
- ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s]
- ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s, #%1]
- ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
- ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
- ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
- ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5, 6 ]
+ [w, Z, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s]
+ [w, vg<SVE_4BHI:Vesize>, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s, #%1]
+ [w, rk, w, Z, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
+ [w, rk, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
+ [w, rk, w, Z, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
+ [w, rk, w, Ui1, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx4BImode);
@@ -1574,25 +1580,26 @@
;; Predicated extending gather loads for 64-bit elements. The value of
;; operand 3 doesn't matter in this case.
(define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>"
- [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w, w, w")
+ [(set (match_operand:SVE_2HSDI 0 "register_operand")
(unspec:SVE_2HSDI
- [(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm")
+ [(match_operand:VNx2BI 6 "general_operand")
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_2BHSI:Vesize>" "Z, vg<SVE_2BHSI:Vesize>, rk, rk")
- (match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_2BHSI:Vesize>")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, Ui1, Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
- "@
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d]
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d, #%1]
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d]
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5, 6]
+ [w, Z, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d]
+ [w, vg<SVE_2BHSI:Vesize>, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d, #%1]
+ [w, rk, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d]
+ [w, rk, w, i, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1601,27 +1608,28 @@
;; Likewise, but with the offset being extended from 32 bits.
(define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_<ANY_EXTEND2:su>xtw_unpacked"
- [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_2HSDI 0 "register_operand")
(unspec:SVE_2HSDI
[(match_operand 6)
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
(unspec:VNx2DI
[(match_operand 7)
(ANY_EXTEND2:VNx2DI
- (match_operand:VNx2SI 2 "register_operand" "w, w"))]
+ (match_operand:VNx2SI 2 "register_operand"))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
- "@
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw]
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw]
+ [w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw %p4]
+ }
"&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1632,28 +1640,29 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; sign-extended.
(define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_sxtw"
- [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_2HSDI 0 "register_operand")
(unspec:SVE_2HSDI
[(match_operand 6)
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
(unspec:VNx2DI
[(match_operand 7)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 2 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 2 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
- "@
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
+ [w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
+ }
"&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1664,25 +1673,26 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; zero-extended.
(define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_uxtw"
- [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_2HSDI 0 "register_operand")
(unspec:SVE_2HSDI
[(match_operand 7)
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
(and:VNx2DI
- (match_operand:VNx2DI 2 "register_operand" "w, w")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
- "@
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
- ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
+ [w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
+ }
"&& !CONSTANT_P (operands[7])"
{
operands[7] = CONSTM1_RTX (VNx2BImode);
@@ -1700,68 +1710,71 @@
;; Predicated first-faulting gather loads for 32-bit elements. Operand
;; 3 is true for unsigned extension and false for signed extension.
(define_insn "@aarch64_ldff1_gather<mode>"
- [(set (match_operand:SVE_FULL_S 0 "register_operand" "=w, w, w, w, w, w")
+ [(set (match_operand:SVE_FULL_S 0 "register_operand")
(unspec:SVE_FULL_S
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_w" "Z, vgw, rk, rk, rk, rk")
- (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1")
- (match_operand:DI 4 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, Ui1, i, i")
+ [(match_operand:VNx4BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_w")
+ (match_operand:VNx4SI 2 "register_operand")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_w")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
- "@
- ldff1w\t%0.s, %5/z, [%2.s]
- ldff1w\t%0.s, %5/z, [%2.s, #%1]
- ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw]
- ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw]
- ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
- ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5 ]
+ [w, Z, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s]
+ [w, vgw, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s, #%1]
+ [w, rk, w, Z, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw]
+ [w, rk, w, Ui1, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw]
+ [w, rk, w, Z, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
+ [w, rk, w, Ui1, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
+ }
)
;; Predicated first-faulting gather loads for 64-bit elements. The value
;; of operand 3 doesn't matter in this case.
(define_insn "@aarch64_ldff1_gather<mode>"
- [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w, w, w")
+ [(set (match_operand:SVE_FULL_D 0 "register_operand")
(unspec:SVE_FULL_D
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_d" "Z, vgd, rk, rk")
- (match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_d")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, Ui1, Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_d")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
- "@
- ldff1d\t%0.d, %5/z, [%2.d]
- ldff1d\t%0.d, %5/z, [%2.d, #%1]
- ldff1d\t%0.d, %5/z, [%1, %2.d]
- ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, Z, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d]
+ [w, vgd, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d, #%1]
+ [w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d]
+ [w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]
+ }
)
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather<mode>_sxtw"
- [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_FULL_D 0 "register_operand")
(unspec:SVE_FULL_D
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "register_operand")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 2 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 2 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_d")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
- "@
- ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw]
- ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw]
+ [w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1770,22 +1783,23 @@
;; Likewise, but with the offset being zero-extended from 32 bits.
(define_insn "*aarch64_ldff1_gather<mode>_uxtw"
- [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_FULL_D 0 "register_operand")
(unspec:SVE_FULL_D
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "register_operand" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "register_operand")
(and:VNx2DI
- (match_operand:VNx2DI 2 "register_operand" "w, w")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_d")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
- "@
- ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw]
- ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw]
+ [w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
+ }
)
;; -------------------------------------------------------------------------
@@ -1803,28 +1817,29 @@
;; Predicated extending first-faulting gather loads for 32-bit elements.
;; Operand 3 is true for unsigned extension and false for signed extension.
(define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mode><VNx4_NARROW:mode>"
- [(set (match_operand:VNx4_WIDE 0 "register_operand" "=w, w, w, w, w, w")
+ [(set (match_operand:VNx4_WIDE 0 "register_operand")
(unspec:VNx4_WIDE
- [(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm")
+ [(match_operand:VNx4BI 6 "general_operand")
(ANY_EXTEND:VNx4_WIDE
(unspec:VNx4_NARROW
- [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>" "Z, vg<VNx4_NARROW:Vesize>, rk, rk, rk, rk")
- (match_operand:VNx4_WIDE 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
+ [(match_operand:VNx4BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>")
+ (match_operand:VNx4_WIDE 2 "register_operand")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s]
- ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s, #%1]
- ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
- ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
- ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
- ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5, 6]
+ [w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s]
+ [w, vg<VNx4_NARROW:Vesize>, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s, #%1]
+ [w, rk, w, Z, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
+ [w, rk, w, Ui1, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
+ [w, rk, w, Z, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
+ [w, rk, w, Ui1, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx4BImode);
@@ -1834,26 +1849,27 @@
;; Predicated extending first-faulting gather loads for 64-bit elements.
;; The value of operand 3 doesn't matter in this case.
(define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>"
- [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w, w, w")
+ [(set (match_operand:VNx2_WIDE 0 "register_operand")
(unspec:VNx2_WIDE
- [(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm")
+ [(match_operand:VNx2BI 6 "general_operand")
(ANY_EXTEND:VNx2_WIDE
(unspec:VNx2_NARROW
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>" "Z, vg<VNx2_NARROW:Vesize>, rk, rk")
- (match_operand:VNx2_WIDE 2 "register_operand" "w, w, w, w")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>")
+ (match_operand:VNx2_WIDE 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, Ui1, Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d]
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d, #%1]
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d]
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5, 6]
+ [w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d]
+ [w, vg<VNx2_NARROW:Vesize>, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d, #%1]
+ [w, rk, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d]
+ [w, rk, w, i, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]
+ }
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1862,29 +1878,30 @@
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>_sxtw"
- [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w")
+ [(set (match_operand:VNx2_WIDE 0 "register_operand")
(unspec:VNx2_WIDE
[(match_operand 6)
(ANY_EXTEND:VNx2_WIDE
(unspec:VNx2_NARROW
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
(unspec:VNx2DI
[(match_operand 7)
(sign_extend:VNx2DI
(truncate:VNx2SI
- (match_operand:VNx2DI 2 "register_operand" "w, w")))]
+ (match_operand:VNx2DI 2 "register_operand")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
+ [w, rk, w, i, i, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
+ }
"&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@@ -1894,26 +1911,27 @@
;; Likewise, but with the offset being zero-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>_uxtw"
- [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w")
+ [(set (match_operand:VNx2_WIDE 0 "register_operand")
(unspec:VNx2_WIDE
[(match_operand 7)
(ANY_EXTEND:VNx2_WIDE
(unspec:VNx2_NARROW
- [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
+ [(match_operand:VNx2BI 5 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
(and:VNx2DI
- (match_operand:VNx2DI 2 "register_operand" "w, w")
+ (match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
- "@
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
- ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
+ {@ [cons: =0, 1, 2, 3, 4, 5]
+ [w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
+ [w, rk, w, i, i, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
+ }
"&& !CONSTANT_P (operands[7])"
{
operands[7] = CONSTM1_RTX (VNx2BImode);
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index da8a424..f84ff74 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -102,37 +102,39 @@
;; Non-extending loads.
(define_insn "@aarch64_gather_ldnt<mode>"
- [(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_FULL_SD 0 "register_operand")
(unspec:SVE_FULL_SD
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:DI 2 "aarch64_reg_or_zero" "Z, r")
- (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")
+ [(match_operand:<VPRED> 1 "register_operand")
+ (match_operand:DI 2 "aarch64_reg_or_zero")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
"TARGET_SVE2"
- "@
- ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>]
- ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2]"
+ {@ [cons: =0, 1, 2, 3]
+ [w, Upl, Z, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>]
+ [w, Upl, r, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2]
+ }
)
;; Extending loads.
(define_insn_and_rewrite "@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>"
- [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w")
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(unspec:SVE_FULL_SDI
- [(match_operand:<SVE_FULL_SDI:VPRED> 4 "general_operand" "UplDnm, UplDnm")
+ [(match_operand:<SVE_FULL_SDI:VPRED> 4 "general_operand")
(ANY_EXTEND:SVE_FULL_SDI
(unspec:SVE_PARTIAL_I
- [(match_operand:<SVE_FULL_SDI:VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:DI 2 "aarch64_reg_or_zero" "Z, r")
- (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 3 "register_operand" "w, w")
+ [(match_operand:<SVE_FULL_SDI:VPRED> 1 "register_operand")
+ (match_operand:DI 2 "aarch64_reg_or_zero")
+ (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 3 "register_operand")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE2
&& (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
- "@
- ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>]
- ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>, %2]"
+ {@ [cons: =0, 1, 2, 3, 4]
+ [w, Upl, Z, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>]
+ [w, Upl, r, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>, %2]
+ }
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<SVE_FULL_SDI:VPRED>mode);