From 217ccab8f46ca5b260319e7b71b421daec6d11c8 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Sat, 16 Nov 2019 11:11:47 +0000 Subject: [AArch64] Pattern-match SVE extending loads This patch pattern-matches a partial SVE load followed by a sign or zero extension into an extending load. (The partial load is already an extending load; we just don't rely on the upper bits of the elements.) Nothing yet uses the extra LDFF1 and LDNF1 combinations, but it seemed more consistent to provide them, since I needed to update the pattern to use a predicated extension anyway. 2019-11-16 Richard Sandiford gcc/ * config/aarch64/aarch64-sve.md (@aarch64_load_): (@aarch64_load_) (@aarch64_load_): Combine into... (@aarch64_load_): ...this new pattern, handling extension to partial modes as well as full modes. Describe the extension as a predicated rather than unpredicated extension. (@aarch64_ldf1_) (@aarch64_ldf1_) (@aarch64_ldf1_): Combine into... (@aarch64_ldf1_): ...this new pattern, handling extension to partial modes as well as full modes. Describe the extension as a predicated rather than unpredicated extension. * config/aarch64/aarch64-sve-builtins.cc (function_expander::use_contiguous_load_insn): Add an extra predicate for extending loads. * config/aarch64/aarch64.c (aarch64_extending_load_p): New function. (aarch64_sve_adjust_stmt_cost): Likewise. (aarch64_add_stmt_cost): Use aarch64_sve_adjust_stmt_cost to adjust the cost of SVE vector stmts. gcc/testsuite/ * gcc.target/aarch64/sve/load_extend_1.c: New test. * gcc.target/aarch64/sve/load_extend_2.c: Likewise. * gcc.target/aarch64/sve/load_extend_3.c: Likewise. * gcc.target/aarch64/sve/load_extend_4.c: Likewise. * gcc.target/aarch64/sve/load_extend_5.c: Likewise. * gcc.target/aarch64/sve/load_extend_6.c: Likewise. * gcc.target/aarch64/sve/load_extend_7.c: Likewise. * gcc.target/aarch64/sve/load_extend_8.c: Likewise. * gcc.target/aarch64/sve/load_extend_9.c: Likewise. * gcc.target/aarch64/sve/load_extend_10.c: Likewise. * gcc.target/aarch64/sve/reduc_4.c: Add --param aarch64-sve-compare-costs=0. From-SVN: r278343 --- gcc/config/aarch64/aarch64-sve-builtins.cc | 6 +- gcc/config/aarch64/aarch64-sve.md | 107 +++++++++-------------------- gcc/config/aarch64/aarch64.c | 46 +++++++++++++ 3 files changed, 85 insertions(+), 74 deletions(-) (limited to 'gcc/config') diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc index 63d903d..27736b9 100644 --- a/gcc/config/aarch64/aarch64-sve-builtins.cc +++ b/gcc/config/aarch64/aarch64-sve-builtins.cc @@ -2790,7 +2790,9 @@ function_expander::use_vcond_mask_insn (insn_code icode, } /* Implement the call using instruction ICODE, which loads memory operand 1 - into register operand 0 under the control of predicate operand 2. */ + into register operand 0 under the control of predicate operand 2. + Extending loads have a further predicate (operand 3) that nominally + controls the extension. */ rtx function_expander::use_contiguous_load_insn (insn_code icode) { @@ -2799,6 +2801,8 @@ function_expander::use_contiguous_load_insn (insn_code icode) add_output_operand (icode); add_mem_operand (mem_mode, get_contiguous_base (mem_mode)); add_input_operand (icode, args[0]); + if (GET_MODE_UNIT_BITSIZE (mem_mode) < type_suffix (0).element_bits) + add_input_operand (icode, CONSTM1_RTX (VNx16BImode)); return generate_insn (icode); } diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index 40aeb95..ce1bd58 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -1189,39 +1189,22 @@ ;; ------------------------------------------------------------------------- ;; Predicated load and extend, with 8 elements per 128-bit block. -(define_insn "@aarch64_load_" - [(set (match_operand:VNx8_WIDE 0 "register_operand" "=w") - (ANY_EXTEND:VNx8_WIDE - (unspec:VNx8_NARROW - [(match_operand:VNx8BI 2 "register_operand" "Upl") - (match_operand:VNx8_NARROW 1 "memory_operand" "m")] - UNSPEC_LD1_SVE)))] - "TARGET_SVE" - "ld1\t%0., %2/z, %1" -) - -;; Predicated load and extend, with 4 elements per 128-bit block. -(define_insn "@aarch64_load_" - [(set (match_operand:VNx4_WIDE 0 "register_operand" "=w") - (ANY_EXTEND:VNx4_WIDE - (unspec:VNx4_NARROW - [(match_operand:VNx4BI 2 "register_operand" "Upl") - (match_operand:VNx4_NARROW 1 "memory_operand" "m")] - UNSPEC_LD1_SVE)))] - "TARGET_SVE" - "ld1\t%0., %2/z, %1" -) - -;; Predicated load and extend, with 2 elements per 128-bit block. -(define_insn "@aarch64_load_" - [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w") - (ANY_EXTEND:VNx2_WIDE - (unspec:VNx2_NARROW - [(match_operand:VNx2BI 2 "register_operand" "Upl") - (match_operand:VNx2_NARROW 1 "memory_operand" "m")] - UNSPEC_LD1_SVE)))] - "TARGET_SVE" - "ld1\t%0., %2/z, %1" +(define_insn_and_rewrite "@aarch64_load_" + [(set (match_operand:SVE_HSDI 0 "register_operand" "=w") + (unspec:SVE_HSDI + [(match_operand: 3 "general_operand" "UplDnm") + (ANY_EXTEND:SVE_HSDI + (unspec:SVE_PARTIAL_I + [(match_operand: 2 "register_operand" "Upl") + (match_operand:SVE_PARTIAL_I 1 "memory_operand" "m")] + UNSPEC_LD1_SVE))] + UNSPEC_PRED_X))] + "TARGET_SVE && (~ & ) == 0" + "ld1\t%0., %2/z, %1" + "&& !CONSTANT_P (operands[3])" + { + operands[3] = CONSTM1_RTX (mode); + } ) ;; ------------------------------------------------------------------------- @@ -1268,46 +1251,24 @@ ;; - LDNF1W ;; ------------------------------------------------------------------------- -;; Predicated first-faulting or non-faulting load and extend, with 8 elements -;; per 128-bit block. -(define_insn "@aarch64_ldf1_" - [(set (match_operand:VNx8_WIDE 0 "register_operand" "=w") - (ANY_EXTEND:VNx8_WIDE - (unspec:VNx8_NARROW - [(match_operand:VNx8BI 2 "register_operand" "Upl") - (match_operand:VNx8_NARROW 1 "aarch64_sve_ldf1_operand" "Ut") - (reg:VNx16BI FFRT_REGNUM)] - SVE_LDFF1_LDNF1)))] - "TARGET_SVE" - "ldf1\t%0., %2/z, %1" -) - -;; Predicated first-faulting or non-faulting load and extend, with 4 elements -;; per 128-bit block. -(define_insn "@aarch64_ldf1_" - [(set (match_operand:VNx4_WIDE 0 "register_operand" "=w") - (ANY_EXTEND:VNx4_WIDE - (unspec:VNx4_NARROW - [(match_operand:VNx4BI 2 "register_operand" "Upl") - (match_operand:VNx4_NARROW 1 "aarch64_sve_ldf1_operand" "Ut") - (reg:VNx16BI FFRT_REGNUM)] - SVE_LDFF1_LDNF1)))] - "TARGET_SVE" - "ldf1\t%0., %2/z, %1" -) - -;; Predicated first-faulting or non-faulting load and extend, with 2 elements -;; per 128-bit block. -(define_insn "@aarch64_ldf1_" - [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w") - (ANY_EXTEND:VNx2_WIDE - (unspec:VNx2_NARROW - [(match_operand:VNx2BI 2 "register_operand" "Upl") - (match_operand:VNx2_NARROW 1 "aarch64_sve_ldf1_operand" "Ut") - (reg:VNx16BI FFRT_REGNUM)] - SVE_LDFF1_LDNF1)))] - "TARGET_SVE" - "ldf1\t%0., %2/z, %1" +;; Predicated first-faulting or non-faulting load and extend. +(define_insn_and_rewrite "@aarch64_ldf1_" + [(set (match_operand:SVE_HSDI 0 "register_operand" "=w") + (unspec:SVE_HSDI + [(match_operand: 3 "general_operand" "UplDnm") + (ANY_EXTEND:SVE_HSDI + (unspec:SVE_PARTIAL_I + [(match_operand: 2 "register_operand" "Upl") + (match_operand:SVE_PARTIAL_I 1 "aarch64_sve_ldf1_operand" "Ut") + (reg:VNx16BI FFRT_REGNUM)] + SVE_LDFF1_LDNF1))] + UNSPEC_PRED_X))] + "TARGET_SVE && (~ & ) == 0" + "ldf1\t%0., %2/z, %1" + "&& !CONSTANT_P (operands[3])" + { + operands[3] = CONSTM1_RTX (mode); + } ) ;; ------------------------------------------------------------------------- diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index d175e1f..305c6da 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -12879,6 +12879,49 @@ aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, } } +/* Return true if STMT_INFO extends the result of a load. */ +static bool +aarch64_extending_load_p (stmt_vec_info stmt_info) +{ + gassign *assign = dyn_cast (stmt_info->stmt); + if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign))) + return false; + + tree rhs = gimple_assign_rhs1 (stmt_info->stmt); + tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign)); + tree rhs_type = TREE_TYPE (rhs); + if (!INTEGRAL_TYPE_P (lhs_type) + || !INTEGRAL_TYPE_P (rhs_type) + || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type)) + return false; + + stmt_vec_info def_stmt_info = stmt_info->vinfo->lookup_def (rhs); + return (def_stmt_info + && STMT_VINFO_DATA_REF (def_stmt_info) + && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info))); +} + +/* STMT_COST is the cost calculated by aarch64_builtin_vectorization_cost + for STMT_INFO, which has cost kind KIND. Adjust the cost as necessary + for SVE targets. */ +static unsigned int +aarch64_sve_adjust_stmt_cost (vect_cost_for_stmt kind, stmt_vec_info stmt_info, + unsigned int stmt_cost) +{ + /* Unlike vec_promote_demote, vector_stmt conversions do not change the + vector register size or number of units. Integer promotions of this + type therefore map to SXT[BHW] or UXT[BHW]. + + Most loads have extending forms that can do the sign or zero extension + on the fly. Optimistically assume that a load followed by an extension + will fold to this form during combine, and that the extension therefore + comes for free. */ + if (kind == vector_stmt && aarch64_extending_load_p (stmt_info)) + stmt_cost = 0; + + return stmt_cost; +} + /* Implement targetm.vectorize.add_stmt_cost. */ static unsigned aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, @@ -12894,6 +12937,9 @@ aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, int stmt_cost = aarch64_builtin_vectorization_cost (kind, vectype, misalign); + if (stmt_info && vectype && aarch64_sve_mode_p (TYPE_MODE (vectype))) + stmt_cost = aarch64_sve_adjust_stmt_cost (kind, stmt_info, stmt_cost); + /* Statements in an inner loop relative to the loop being vectorized are weighted more heavily. The value here is arbitrary and could potentially be improved with analysis. */ -- cgit v1.1