aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorTamar Christina <tamar.christina@arm.com>2023-11-09 14:02:21 +0000
committerTamar Christina <tamar.christina@arm.com>2023-11-09 14:06:07 +0000
commit2ea13fb9c0b56e9b8c0425d101cf81437a5200cf (patch)
treeb08b0df01641cb0d615d36171aac5f07f800d70c /gcc/config
parentf30ecd8050444fb902ab66b4600c590908861fdf (diff)
downloadgcc-2ea13fb9c0b56e9b8c0425d101cf81437a5200cf.zip
gcc-2ea13fb9c0b56e9b8c0425d101cf81437a5200cf.tar.gz
gcc-2ea13fb9c0b56e9b8c0425d101cf81437a5200cf.tar.bz2
AArch64: Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154]
This adds a way to generate special sequences for creation of constants for which we don't have single instructions sequences which would have normally lead to a GP -> FP transfer or a literal load. The patch starts out by adding support for creating 1 << 63 using fneg (mov 0). gcc/ChangeLog: PR tree-optimization/109154 * config/aarch64/aarch64-protos.h (aarch64_simd_special_constant_p, aarch64_maybe_generate_simd_constant): New. * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VQMOV:mode>, *aarch64_simd_mov<VDMOV:mode>): Add new coden for special constants. * config/aarch64/aarch64.cc (aarch64_extract_vec_duplicate_wide_int): Take optional mode. (aarch64_simd_special_constant_p, aarch64_maybe_generate_simd_constant): New. * config/aarch64/aarch64.md (*movdi_aarch64): Add new codegen for special constants. * config/aarch64/constraints.md (Dx): new. gcc/testsuite/ChangeLog: PR tree-optimization/109154 * gcc.target/aarch64/fneg-abs_1.c: Updated. * gcc.target/aarch64/fneg-abs_2.c: Updated. * gcc.target/aarch64/fneg-abs_4.c: Updated. * gcc.target/aarch64/dbl_mov_immediate_1.c: Updated.
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/aarch64/aarch64-protos.h2
-rw-r--r--gcc/config/aarch64/aarch64-simd.md56
-rw-r--r--gcc/config/aarch64/aarch64.cc51
-rw-r--r--gcc/config/aarch64/aarch64.md16
-rw-r--r--gcc/config/aarch64/constraints.md8
5 files changed, 108 insertions, 25 deletions
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 60a55f4..36d6c68 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -831,6 +831,8 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
enum simd_immediate_check w = AARCH64_CHECK_MOV);
rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_maybe_generate_simd_constant (rtx, rtx, machine_mode);
+bool aarch64_simd_special_constant_p (rtx, machine_mode);
bool aarch64_sve_index_immediate_p (rtx);
bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 81ff5ba..33eceb4 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -142,26 +142,35 @@
[(set_attr "type" "neon_dup<q>")]
)
-(define_insn "*aarch64_simd_mov<VDMOV:mode>"
+(define_insn_and_split "*aarch64_simd_mov<VDMOV:mode>"
[(set (match_operand:VDMOV 0 "nonimmediate_operand")
(match_operand:VDMOV 1 "general_operand"))]
"TARGET_FLOAT
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
- {@ [cons: =0, 1; attrs: type, arch]
- [w , m ; neon_load1_1reg<q> , * ] ldr\t%d0, %1
- [r , m ; load_8 , * ] ldr\t%x0, %1
- [m , Dz; store_8 , * ] str\txzr, %0
- [m , w ; neon_store1_1reg<q>, * ] str\t%d1, %0
- [m , r ; store_8 , * ] str\t%x1, %0
- [w , w ; neon_logic<q> , simd] mov\t%0.<Vbtype>, %1.<Vbtype>
- [w , w ; neon_logic<q> , * ] fmov\t%d0, %d1
- [?r, w ; neon_to_gp<q> , simd] umov\t%0, %1.d[0]
- [?r, w ; neon_to_gp<q> , * ] fmov\t%x0, %d1
- [?w, r ; f_mcr , * ] fmov\t%d0, %1
- [?r, r ; mov_reg , * ] mov\t%0, %1
- [w , Dn; neon_move<q> , simd] << aarch64_output_simd_mov_immediate (operands[1], 64);
- [w , Dz; f_mcr , * ] fmov\t%d0, xzr
+ {@ [cons: =0, 1; attrs: type, arch, length]
+ [w , m ; neon_load1_1reg<q> , * , *] ldr\t%d0, %1
+ [r , m ; load_8 , * , *] ldr\t%x0, %1
+ [m , Dz; store_8 , * , *] str\txzr, %0
+ [m , w ; neon_store1_1reg<q>, * , *] str\t%d1, %0
+ [m , r ; store_8 , * , *] str\t%x1, %0
+ [w , w ; neon_logic<q> , simd, *] mov\t%0.<Vbtype>, %1.<Vbtype>
+ [w , w ; neon_logic<q> , * , *] fmov\t%d0, %d1
+ [?r, w ; neon_to_gp<q> , simd, *] umov\t%0, %1.d[0]
+ [?r, w ; neon_to_gp<q> , * , *] fmov\t%x0, %d1
+ [?w, r ; f_mcr , * , *] fmov\t%d0, %1
+ [?r, r ; mov_reg , * , *] mov\t%0, %1
+ [w , Dn; neon_move<q> , simd, *] << aarch64_output_simd_mov_immediate (operands[1], 64);
+ [w , Dz; f_mcr , * , *] fmov\t%d0, xzr
+ [w , Dx; neon_move , simd, 8] #
+ }
+ "CONST_INT_P (operands[1])
+ && aarch64_simd_special_constant_p (operands[1], <MODE>mode)
+ && FP_REGNUM_P (REGNO (operands[0]))"
+ [(const_int 0)]
+ {
+ aarch64_maybe_generate_simd_constant (operands[0], operands[1], <MODE>mode);
+ DONE;
}
)
@@ -181,19 +190,30 @@
[?r , r ; multiple , * , 8] #
[w , Dn; neon_move<q> , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
[w , Dz; fmov , * , 4] fmov\t%d0, xzr
+ [w , Dx; neon_move , simd, 8] #
}
"&& reload_completed
- && (REG_P (operands[0])
+ && ((REG_P (operands[0])
&& REG_P (operands[1])
&& !(FP_REGNUM_P (REGNO (operands[0]))
- && FP_REGNUM_P (REGNO (operands[1]))))"
+ && FP_REGNUM_P (REGNO (operands[1]))))
+ || (aarch64_simd_special_constant_p (operands[1], <MODE>mode)
+ && FP_REGNUM_P (REGNO (operands[0]))))"
[(const_int 0)]
{
if (GP_REGNUM_P (REGNO (operands[0]))
&& GP_REGNUM_P (REGNO (operands[1])))
aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
else
- aarch64_split_simd_move (operands[0], operands[1]);
+ {
+ if (FP_REGNUM_P (REGNO (operands[0]))
+ && <MODE>mode == V2DImode
+ && aarch64_maybe_generate_simd_constant (operands[0], operands[1],
+ <MODE>mode))
+ ;
+ else
+ aarch64_split_simd_move (operands[0], operands[1]);
+ }
DONE;
}
)
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 968a9ac..800a8b0 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11894,16 +11894,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
/* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
duplicate of such constants. If so, store in RET_WI the wide_int
representation of the constant paired with the inner mode of the vector mode
- or TImode for scalar X constants. */
+ or MODE for scalar X constants. If MODE is not provided then TImode is
+ used. */
static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+ scalar_mode mode = TImode)
{
rtx elt = unwrap_const_vec_duplicate (x);
if (!CONST_SCALAR_INT_P (elt))
return false;
scalar_mode smode
- = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+ = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
*ret_wi = rtx_mode_t (elt, smode);
return true;
}
@@ -11952,6 +11954,49 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
&& IN_RANGE (INTVAL (elt), minval, maxval));
}
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+ but we can still create them in various ways. If the constant in VAL can be
+ created using alternate methods then if possible then return true and
+ additionally set TARGET to the rtx for the sequence if TARGET is not NULL.
+ Otherwise return false if sequence is not possible. */
+
+bool
+aarch64_maybe_generate_simd_constant (rtx target, rtx val, machine_mode mode)
+{
+ wide_int wval;
+ auto smode = GET_MODE_INNER (mode);
+ if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+ return false;
+
+ /* For Advanced SIMD we can create an integer with only the top bit set
+ using fneg (0.0f). */
+ if (TARGET_SIMD
+ && !TARGET_SVE
+ && smode == DImode
+ && wi::only_sign_bit_p (wval))
+ {
+ if (!target)
+ return true;
+
+ /* Use the same base type as aarch64_gen_shareable_zero. */
+ rtx zero = CONST0_RTX (V4SImode);
+ emit_move_insn (lowpart_subreg (V4SImode, target, mode), zero);
+ rtx neg = lowpart_subreg (V2DFmode, target, mode);
+ emit_insn (gen_negv2df2 (neg, copy_rtx (neg)));
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if the value in VAL with mode MODE can be created using special
+ instruction sequences. */
+
+bool aarch64_simd_special_constant_p (rtx val, machine_mode mode)
+{
+ return aarch64_maybe_generate_simd_constant (NULL_RTX, val, mode);
+}
+
bool
aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
{
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index bcf4bc8..5d1e0f8 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1341,13 +1341,21 @@
[r, w ; f_mrc , fp , 4] fmov\t%x0, %d1
[w, w ; fmov , fp , 4] fmov\t%d0, %d1
[w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
- }
- "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
- && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+ [w, Dx ; neon_move, simd, 8] #
+ }
+ "CONST_INT_P (operands[1])
+ && REG_P (operands[0])
+ && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+ && GP_REGNUM_P (REGNO (operands[0])))
+ || (aarch64_simd_special_constant_p (operands[1], DImode)
+ && FP_REGNUM_P (REGNO (operands[0]))))"
[(const_int 0)]
{
+ if (GP_REGNUM_P (REGNO (operands[0])))
aarch64_expand_mov_immediate (operands[0], operands[1]);
- DONE;
+ else
+ aarch64_maybe_generate_simd_constant (operands[0], operands[1], DImode);
+ DONE;
}
)
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 371a008..b3922bc 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@
(and (match_code "const,const_vector")
(match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
false)")))
+
+(define_constraint "Dx"
+ "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+ (match_test "aarch64_simd_special_constant_p (op, DImode)")))
+
(define_constraint "Dz"
"@internal
A constraint that matches a vector of immediate zero."