diff options
author | Kyrylo Tkachov <kyrylo.tkachov@arm.com> | 2023-06-21 12:03:22 +0100 |
---|---|---|
committer | Kyrylo Tkachov <kyrylo.tkachov@arm.com> | 2023-06-21 12:03:22 +0100 |
commit | bb3c69058a5fb874ea3c5c26bfb331d33d0497c3 (patch) | |
tree | 068eb434c8655b579187e521deba112954b9f646 /gcc | |
parent | b8b19729e65c79d5b9399591d19e8724b52347f9 (diff) | |
download | gcc-bb3c69058a5fb874ea3c5c26bfb331d33d0497c3.zip gcc-bb3c69058a5fb874ea3c5c26bfb331d33d0497c3.tar.gz gcc-bb3c69058a5fb874ea3c5c26bfb331d33d0497c3.tar.bz2 |
aarch64: Convert SVE gather patterns to compact syntax
This patch converts the SVE load gather patterns to the new compact syntax
that Tamar introduced. This allows for a future patch I want to contribute
to add more alternatives that are better viewed in the more compact form.
The lines in some patterns are >80 long now, but I think that's unavoidable
and those patterns already had overly long constraint strings.
No functional change intended.
Bootstrapped and tested on aarch64-none-linux-gnu.
gcc/ChangeLog:
* config/aarch64/aarch64-sve.md (mask_gather_load<mode><v_int_container>):
Convert to compact alternatives syntax.
(mask_gather_load<mode><v_int_container>): Likewise.
(*mask_gather_load<mode><v_int_container>_<su>xtw_unpacked): Likewise.
(*mask_gather_load<mode><v_int_container>_sxtw): Likewise.
(*mask_gather_load<mode><v_int_container>_uxtw): Likewise.
(@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode><SVE_4BHI:mode>):
Likewise.
(@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>):
Likewise.
(*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
<SVE_2BHSI:mode>_<ANY_EXTEND2:su>xtw_unpacked): Likewise.
(*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
<SVE_2BHSI:mode>_sxtw): Likewise.
(*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
<SVE_2BHSI:mode>_uxtw): Likewise.
(@aarch64_ldff1_gather<mode>): Likewise.
(@aarch64_ldff1_gather<mode>): Likewise.
(*aarch64_ldff1_gather<mode>_sxtw): Likewise.
(*aarch64_ldff1_gather<mode>_uxtw): Likewise.
(@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mode>
<VNx4_NARROW:mode>): Likewise.
(@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode>
<VNx2_NARROW:mode>): Likewise.
(*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode>
<VNx2_NARROW:mode>_sxtw): Likewise.
(*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode>
<VNx2_NARROW:mode>_uxtw): Likewise.
* config/aarch64/aarch64-sve2.md (@aarch64_gather_ldnt<mode>): Likewise.
(@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:mode>
<SVE_PARTIAL_I:mode>): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/config/aarch64/aarch64-sve.md | 430 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-sve2.md | 36 |
2 files changed, 275 insertions, 191 deletions
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index 2de651a..da5534c 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -1418,64 +1418,79 @@ ;; Predicated gather loads for 32-bit elements. Operand 3 is true for ;; unsigned extension and false for signed extension. (define_insn "mask_gather_load<mode><v_int_container>" - [(set (match_operand:SVE_4 0 "register_operand" "=w, w, w, w, w, w") + [(set (match_operand:SVE_4 0 "register_operand") (unspec:SVE_4 - [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>" "Z, vgw, rk, rk, rk, rk") - (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w") - (match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1") - (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i") + [(match_operand:VNx4BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>") + (match_operand:VNx4SI 2 "register_operand") + (match_operand:DI 3 "const_int_operand") + (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - "@ - ld1<Vesize>\t%0.s, %5/z, [%2.s] - ld1<Vesize>\t%0.s, %5/z, [%2.s, #%1] - ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw] - ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw] - ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5 ] + [&w, Z, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%2.s] + [?w, Z, 0, Ui1, Ui1, Upl] ^ + [&w, vgw, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%2.s, #%1] + [?w, vgw, 0, Ui1, Ui1, Upl] ^ + [&w, rk, w, Z, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw] + [?w, rk, 0, Z, Ui1, Upl] ^ + [&w, rk, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw] + [?w, rk, 0, Ui1, Ui1, Upl] ^ + [&w, rk, w, Z, i, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + [?w, rk, 0, Z, i, Upl] ^ + [&w, rk, w, Ui1, i, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4] + [?w, rk, 0, Ui1, i, Upl] ^ + } ) ;; Predicated gather loads for 64-bit elements. The value of operand 3 ;; doesn't matter in this case. (define_insn "mask_gather_load<mode><v_int_container>" - [(set (match_operand:SVE_2 0 "register_operand" "=w, w, w, w") + [(set (match_operand:SVE_2 0 "register_operand") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>" "Z, vgd, rk, rk") - (match_operand:VNx2DI 2 "register_operand" "w, w, w, w") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>") + (match_operand:VNx2DI 2 "register_operand") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - "@ - ld1<Vesize>\t%0.d, %5/z, [%2.d] - ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1] - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d] - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, Z, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%2.d] + [?w, Z, 0, i, Ui1, Upl] ^ + [&w, vgd, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1] + [?w, vgd, 0, i, Ui1, Upl] ^ + [&w, rk, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d] + [?w, rk, 0, i, Ui1, Upl] ^ + [&w, rk, w, i, i, Upl] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4] + [?w, rk, 0, i, i, Upl] ^ + } ) ;; Likewise, but with the offset being extended from 32 bits. (define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_<su>xtw_unpacked" - [(set (match_operand:SVE_2 0 "register_operand" "=w, w") + [(set (match_operand:SVE_2 0 "register_operand") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "register_operand" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "register_operand") (unspec:VNx2DI [(match_operand 6) (ANY_EXTEND:VNx2DI - (match_operand:VNx2SI 2 "register_operand" "w, w"))] + (match_operand:VNx2SI 2 "register_operand"))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - "@ - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw] - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1485,24 +1500,27 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; sign-extended. (define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_sxtw" - [(set (match_operand:SVE_2 0 "register_operand" "=w, w") + [(set (match_operand:SVE_2 0 "register_operand") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "register_operand" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "register_operand") (unspec:VNx2DI [(match_operand 6) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand" "w, w")))] + (match_operand:VNx2DI 2 "register_operand")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - "@ - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw] - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1512,21 +1530,24 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; zero-extended. (define_insn "*mask_gather_load<mode><v_int_container>_uxtw" - [(set (match_operand:SVE_2 0 "register_operand" "=w, w") + [(set (match_operand:SVE_2 0 "register_operand") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "register_operand" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "register_operand") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand" "w, w") + (match_operand:VNx2DI 2 "register_operand") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - "@ - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw] - ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } ) ;; ------------------------------------------------------------------------- @@ -1544,27 +1565,34 @@ ;; Predicated extending gather loads for 32-bit elements. Operand 3 is ;; true for unsigned extension and false for signed extension. (define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode><SVE_4BHI:mode>" - [(set (match_operand:SVE_4HSI 0 "register_operand" "=w, w, w, w, w, w") + [(set (match_operand:SVE_4HSI 0 "register_operand") (unspec:SVE_4HSI - [(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm") + [(match_operand:VNx4BI 6 "general_operand") (ANY_EXTEND:SVE_4HSI (unspec:SVE_4BHI - [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_4BHI:Vesize>" "Z, vg<SVE_4BHI:Vesize>, rk, rk, rk, rk") - (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w") - (match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1") - (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_4BHI:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i") + [(match_operand:VNx4BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_4BHI:Vesize>") + (match_operand:VNx4SI 2 "register_operand") + (match_operand:DI 3 "const_int_operand") + (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_4BHI:Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~<SVE_4HSI:narrower_mask> & <SVE_4BHI:self_mask>) == 0" - "@ - ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s] - ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s, #%1] - ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw] - ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw] - ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5, 6] + [&w, Z, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s] + [?w, Z, 0, Ui1, Ui1, Upl, UplDnm] ^ + [&w, vg<SVE_4BHI:Vesize>, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s, #%1] + [?w, vg<SVE_4BHI:Vesize>, 0, Ui1, Ui1, Upl, UplDnm] ^ + [&w, rk, w, Z, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw] + [?w, rk, 0, Z, Ui1, Upl, UplDnm] ^ + [&w, rk, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw] + [?w, rk, 0, Ui1, Ui1, Upl, UplDnm] ^ + [&w, rk, w, Z, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + [?w, rk, 0, Z, i, Upl, UplDnm] ^ + [&w, rk, w, Ui1, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4] + [?w, rk, 0, Ui1, i, Upl, UplDnm] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx4BImode); @@ -1574,25 +1602,30 @@ ;; Predicated extending gather loads for 64-bit elements. The value of ;; operand 3 doesn't matter in this case. (define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>" - [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w, w, w") + [(set (match_operand:SVE_2HSDI 0 "register_operand") (unspec:SVE_2HSDI - [(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm") + [(match_operand:VNx2BI 6 "general_operand") (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_2BHSI:Vesize>" "Z, vg<SVE_2BHSI:Vesize>, rk, rk") - (match_operand:VNx2DI 2 "register_operand" "w, w, w, w") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_2BHSI:Vesize>") + (match_operand:VNx2DI 2 "register_operand") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, Ui1, Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0" - "@ - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d] - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d, #%1] - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d] - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5, 6] + [&w, Z, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d] + [?w, Z, 0, i, Ui1, Upl, UplDnm] ^ + [&w, vg<SVE_2BHSI:Vesize>, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d, #%1] + [?w, vg<SVE_2BHSI:Vesize>, 0, i, Ui1, Upl, UplDnm] ^ + [&w, rk, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d] + [?w, rk, 0, i, Ui1, Upl, UplDnm] ^ + [&w, rk, w, i, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4] + [?w, rk, 0, i, i, Upl, UplDnm] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1601,27 +1634,30 @@ ;; Likewise, but with the offset being extended from 32 bits. (define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_<ANY_EXTEND2:su>xtw_unpacked" - [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w") + [(set (match_operand:SVE_2HSDI 0 "register_operand") (unspec:SVE_2HSDI [(match_operand 6) (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_reg_or_zero") (unspec:VNx2DI [(match_operand 7) (ANY_EXTEND2:VNx2DI - (match_operand:VNx2SI 2 "register_operand" "w, w"))] + (match_operand:VNx2SI 2 "register_operand"))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0" - "@ - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw] - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1632,28 +1668,31 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; sign-extended. (define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_sxtw" - [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w") + [(set (match_operand:SVE_2HSDI 0 "register_operand") (unspec:SVE_2HSDI [(match_operand 6) (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_reg_or_zero") (unspec:VNx2DI [(match_operand 7) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand" "w, w")))] + (match_operand:VNx2DI 2 "register_operand")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0" - "@ - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw] - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1664,25 +1703,28 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; zero-extended. (define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_uxtw" - [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w") + [(set (match_operand:SVE_2HSDI 0 "register_operand") (unspec:SVE_2HSDI [(match_operand 7) (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_reg_or_zero") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand" "w, w") + (match_operand:VNx2DI 2 "register_operand") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0" - "@ - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw] - ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& !CONSTANT_P (operands[7])" { operands[7] = CONSTM1_RTX (VNx2BImode); @@ -1700,68 +1742,83 @@ ;; Predicated first-faulting gather loads for 32-bit elements. Operand ;; 3 is true for unsigned extension and false for signed extension. (define_insn "@aarch64_ldff1_gather<mode>" - [(set (match_operand:SVE_FULL_S 0 "register_operand" "=w, w, w, w, w, w") + [(set (match_operand:SVE_FULL_S 0 "register_operand") (unspec:SVE_FULL_S - [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_w" "Z, vgw, rk, rk, rk, rk") - (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w") - (match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1") - (match_operand:DI 4 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, Ui1, i, i") + [(match_operand:VNx4BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_w") + (match_operand:VNx4SI 2 "register_operand") + (match_operand:DI 3 "const_int_operand") + (match_operand:DI 4 "aarch64_gather_scale_operand_w") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - "@ - ldff1w\t%0.s, %5/z, [%2.s] - ldff1w\t%0.s, %5/z, [%2.s, #%1] - ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw] - ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw] - ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5 ] + [&w, Z, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s] + [?w, Z, 0, i, Ui1, Upl] ^ + [&w, vgw, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s, #%1] + [?w, vgw, 0, i, Ui1, Upl] ^ + [&w, rk, w, Z, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw] + [?w, rk, 0, Z, Ui1, Upl] ^ + [&w, rk, w, Ui1, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw] + [?w, rk, 0, Ui1, Ui1, Upl] ^ + [&w, rk, w, Z, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + [?w, rk, 0, Z, i, Upl] ^ + [&w, rk, w, Ui1, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4] + [?w, rk, 0, Ui1, i, Upl] ^ + } ) ;; Predicated first-faulting gather loads for 64-bit elements. The value ;; of operand 3 doesn't matter in this case. (define_insn "@aarch64_ldff1_gather<mode>" - [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w, w, w") + [(set (match_operand:SVE_FULL_D 0 "register_operand") (unspec:SVE_FULL_D - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_d" "Z, vgd, rk, rk") - (match_operand:VNx2DI 2 "register_operand" "w, w, w, w") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_d") + (match_operand:VNx2DI 2 "register_operand") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, Ui1, Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_d") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - "@ - ldff1d\t%0.d, %5/z, [%2.d] - ldff1d\t%0.d, %5/z, [%2.d, #%1] - ldff1d\t%0.d, %5/z, [%1, %2.d] - ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5 ] + [&w, Z, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d] + [?w, Z, 0, i, Ui1, Upl ] ^ + [&w, vgd, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d, #%1] + [?w, vgd, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4] + [?w, rk, 0, i, i, Upl ] ^ + } ) ;; Likewise, but with the offset being sign-extended from 32 bits. (define_insn_and_rewrite "*aarch64_ldff1_gather<mode>_sxtw" - [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w") + [(set (match_operand:SVE_FULL_D 0 "register_operand") (unspec:SVE_FULL_D - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "register_operand" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "register_operand") (unspec:VNx2DI [(match_operand 6) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand" "w, w")))] + (match_operand:VNx2DI 2 "register_operand")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_d") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - "@ - ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw] - ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1770,22 +1827,25 @@ ;; Likewise, but with the offset being zero-extended from 32 bits. (define_insn "*aarch64_ldff1_gather<mode>_uxtw" - [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w") + [(set (match_operand:SVE_FULL_D 0 "register_operand") (unspec:SVE_FULL_D - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "register_operand" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "register_operand") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand" "w, w") + (match_operand:VNx2DI 2 "register_operand") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_d") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - "@ - ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw] - ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } ) ;; ------------------------------------------------------------------------- @@ -1803,28 +1863,35 @@ ;; Predicated extending first-faulting gather loads for 32-bit elements. ;; Operand 3 is true for unsigned extension and false for signed extension. (define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mode><VNx4_NARROW:mode>" - [(set (match_operand:VNx4_WIDE 0 "register_operand" "=w, w, w, w, w, w") + [(set (match_operand:VNx4_WIDE 0 "register_operand") (unspec:VNx4_WIDE - [(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm") + [(match_operand:VNx4BI 6 "general_operand") (ANY_EXTEND:VNx4_WIDE (unspec:VNx4_NARROW - [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>" "Z, vg<VNx4_NARROW:Vesize>, rk, rk, rk, rk") - (match_operand:VNx4_WIDE 2 "register_operand" "w, w, w, w, w, w") - (match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1") - (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i") + [(match_operand:VNx4BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>") + (match_operand:VNx4_WIDE 2 "register_operand") + (match_operand:DI 3 "const_int_operand") + (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - "@ - ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s] - ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s, #%1] - ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw] - ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw] - ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5, 6] + [&w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s] + [?w, Z, 0, i, Ui1, Upl, UplDnm] ^ + [&w, vg<VNx4_NARROW:Vesize>, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s, #%1] + [?w, vg<VNx4_NARROW:Vesize>, 0, i, Ui1, Upl, UplDnm] ^ + [&w, rk, w, Z, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw] + [?w, rk, 0, Z, Ui1, Upl, UplDnm] ^ + [&w, rk, w, Ui1, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw] + [?w, rk, 0, Ui1, Ui1, Upl, UplDnm] ^ + [&w, rk, w, Z, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + [?w, rk, 0, Z, i, Upl, UplDnm] ^ + [&w, rk, w, Ui1, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4] + [?w, rk, 0, Ui1, i, Upl, UplDnm] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx4BImode); @@ -1834,26 +1901,31 @@ ;; Predicated extending first-faulting gather loads for 64-bit elements. ;; The value of operand 3 doesn't matter in this case. (define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>" - [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w, w, w") + [(set (match_operand:VNx2_WIDE 0 "register_operand") (unspec:VNx2_WIDE - [(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm") + [(match_operand:VNx2BI 6 "general_operand") (ANY_EXTEND:VNx2_WIDE (unspec:VNx2_NARROW - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") - (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>" "Z, vg<VNx2_NARROW:Vesize>, rk, rk") - (match_operand:VNx2_WIDE 2 "register_operand" "w, w, w, w") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>") + (match_operand:VNx2_WIDE 2 "register_operand") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, Ui1, Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - "@ - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d] - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d, #%1] - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d] - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5, 6] + [&w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d] + [?w, Z, 0, i, Ui1, Upl, UplDnm] ^ + [&w, vg<VNx2_NARROW:Vesize>, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d, #%1] + [?w, vg<VNx2_NARROW:Vesize>, 0, i, Ui1, Upl, UplDnm] ^ + [&w, rk, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d] + [?w, rk, 0, i, Ui1, Upl, UplDnm] ^ + [&w, rk, w, i, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4] + [?w, rk, w, i, i, Upl, UplDnm] ^ + } "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1862,29 +1934,32 @@ ;; Likewise, but with the offset being sign-extended from 32 bits. (define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>_sxtw" - [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w") + [(set (match_operand:VNx2_WIDE 0 "register_operand") (unspec:VNx2_WIDE [(match_operand 6) (ANY_EXTEND:VNx2_WIDE (unspec:VNx2_NARROW - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_reg_or_zero") (unspec:VNx2DI [(match_operand 7) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand" "w, w")))] + (match_operand:VNx2DI 2 "register_operand")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - "@ - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw] - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1894,26 +1969,29 @@ ;; Likewise, but with the offset being zero-extended from 32 bits. (define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>_uxtw" - [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w") + [(set (match_operand:VNx2_WIDE 0 "register_operand") (unspec:VNx2_WIDE [(match_operand 7) (ANY_EXTEND:VNx2_WIDE (unspec:VNx2_NARROW - [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") - (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") + [(match_operand:VNx2BI 5 "register_operand") + (match_operand:DI 1 "aarch64_reg_or_zero") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand" "w, w") + (match_operand:VNx2DI 2 "register_operand") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i") + (match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - "@ - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw] - ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" + {@ [cons: =0, 1, 2, 3, 4, 5] + [&w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw] + [?w, rk, 0, i, Ui1, Upl ] ^ + [&w, rk, w, i, i, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4] + [?w, rk, 0, i, i, Upl ] ^ + } "&& !CONSTANT_P (operands[7])" { operands[7] = CONSTM1_RTX (VNx2BImode); diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md index da8a424..7a77e9b 100644 --- a/gcc/config/aarch64/aarch64-sve2.md +++ b/gcc/config/aarch64/aarch64-sve2.md @@ -102,37 +102,43 @@ ;; Non-extending loads. (define_insn "@aarch64_gather_ldnt<mode>" - [(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w, w") + [(set (match_operand:SVE_FULL_SD 0 "register_operand") (unspec:SVE_FULL_SD - [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl") - (match_operand:DI 2 "aarch64_reg_or_zero" "Z, r") - (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w") + [(match_operand:<VPRED> 1 "register_operand") + (match_operand:DI 2 "aarch64_reg_or_zero") + (match_operand:<V_INT_EQUIV> 3 "register_operand") (mem:BLK (scratch))] UNSPEC_LDNT1_GATHER))] "TARGET_SVE2" - "@ - ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>] - ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2]" + {@ [cons: =0, 1, 2, 3] + [&w, Upl, Z, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>] + [?w, Upl, Z, 0 ] ^ + [&w, Upl, r, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2] + [?w, Upl, r, 0 ] ^ + } ) ;; Extending loads. (define_insn_and_rewrite "@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>" - [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w") + [(set (match_operand:SVE_FULL_SDI 0 "register_operand") (unspec:SVE_FULL_SDI - [(match_operand:<SVE_FULL_SDI:VPRED> 4 "general_operand" "UplDnm, UplDnm") + [(match_operand:<SVE_FULL_SDI:VPRED> 4 "general_operand") (ANY_EXTEND:SVE_FULL_SDI (unspec:SVE_PARTIAL_I - [(match_operand:<SVE_FULL_SDI:VPRED> 1 "register_operand" "Upl, Upl") - (match_operand:DI 2 "aarch64_reg_or_zero" "Z, r") - (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 3 "register_operand" "w, w") + [(match_operand:<SVE_FULL_SDI:VPRED> 1 "register_operand") + (match_operand:DI 2 "aarch64_reg_or_zero") + (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 3 "register_operand") (mem:BLK (scratch))] UNSPEC_LDNT1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE2 && (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0" - "@ - ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>] - ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>, %2]" + {@ [cons: =0, 1, 2, 3, 4] + [&w, Upl, Z, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>] + [?w, Upl, Z, 0, UplDnm] ^ + [&w, Upl, r, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>, %2] + [?w, Upl, r, 0, UplDnm] ^ + } "&& !CONSTANT_P (operands[4])" { operands[4] = CONSTM1_RTX (<SVE_FULL_SDI:VPRED>mode); |