aboutsummaryrefslogtreecommitdiff
path: root/gcc/emit-rtl.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-12-20 12:51:36 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-12-20 12:51:36 +0000
commit0c12fc9b2d605cf323cfdab28a972d86398e71a1 (patch)
treeca58581589b8fa244d77652a9a9c06f5293a8bea /gcc/emit-rtl.c
parentabd3c800109b95f09af3b3f1a7a43d9b7631f21c (diff)
downloadgcc-0c12fc9b2d605cf323cfdab28a972d86398e71a1.zip
gcc-0c12fc9b2d605cf323cfdab28a972d86398e71a1.tar.gz
gcc-0c12fc9b2d605cf323cfdab28a972d86398e71a1.tar.bz2
poly_int: rtx constants
This patch adds an rtl representation of poly_int values. There were three possible ways of doing this: (1) Add a new rtl code for the poly_ints themselves and store the coefficients as trailing wide_ints. This would give constants like: (const_poly_int [c0 c1 ... cn]) The runtime value would be: c0 + c1 * x1 + ... + cn * xn (2) Like (1), but use rtxes for the coefficients. This would give constants like: (const_poly_int [(const_int c0) (const_int c1) ... (const_int cn)]) although the coefficients could be const_wide_ints instead of const_ints where appropriate. (3) Add a new rtl code for the polynomial indeterminates, then use them in const wrappers. A constant like c0 + c1 * x1 would then look like: (const:M (plus:M (mult:M (const_param:M x1) (const_int c1)) (const_int c0))) There didn't seem to be that much to choose between them. The main advantage of (1) is that it's a more efficient representation and that we can refer to the cofficients directly as wide_int_storage. 2017-12-20 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * doc/rtl.texi (const_poly_int): Document. Also document the rtl sharing behavior. * gengenrtl.c (excluded_rtx): Return true for CONST_POLY_INT. * rtl.h (const_poly_int_def): New struct. (rtx_def::u): Add a cpi field. (CASE_CONST_UNIQUE, CASE_CONST_ANY): Add CONST_POLY_INT. (CONST_POLY_INT_P, CONST_POLY_INT_COEFFS): New macros. (wi::rtx_to_poly_wide_ref): New typedef (const_poly_int_value, wi::to_poly_wide, rtx_to_poly_int64) (poly_int_rtx_p): New functions. (trunc_int_for_mode): Declare a poly_int64 version. (plus_constant): Take a poly_int64 instead of a HOST_WIDE_INT. (immed_wide_int_const): Take a poly_wide_int_ref rather than a wide_int_ref. (strip_offset): Declare. (strip_offset_and_add): New function. * rtl.def (CONST_POLY_INT): New rtx code. * rtl.c (rtx_size): Handle CONST_POLY_INT. (shared_const_p): Use poly_int_rtx_p. * emit-rtl.h (gen_int_mode): Take a poly_int64 instead of a HOST_WIDE_INT. (gen_int_shift_amount): Likewise. * emit-rtl.c (const_poly_int_hasher): New class. (const_poly_int_htab): New variable. (init_emit_once): Initialize it when NUM_POLY_INT_COEFFS > 1. (const_poly_int_hasher::hash): New function. (const_poly_int_hasher::equal): Likewise. (gen_int_mode): Take a poly_int64 instead of a HOST_WIDE_INT. (immed_wide_int_const): Rename to... (immed_wide_int_const_1): ...this and make static. (immed_wide_int_const): New function, taking a poly_wide_int_ref instead of a wide_int_ref. (gen_int_shift_amount): Take a poly_int64 instead of a HOST_WIDE_INT. (gen_lowpart_common): Handle CONST_POLY_INT. * cse.c (hash_rtx_cb, equiv_constant): Likewise. * cselib.c (cselib_hash_rtx): Likewise. * dwarf2out.c (const_ok_for_output_1): Likewise. * expr.c (convert_modes): Likewise. * print-rtl.c (rtx_writer::print_rtx, print_value): Likewise. * rtlhash.c (add_rtx): Likewise. * explow.c (trunc_int_for_mode): Add a poly_int64 version. (plus_constant): Take a poly_int64 instead of a HOST_WIDE_INT. Handle existing CONST_POLY_INT rtxes. * expmed.h (expand_shift): Take a poly_int64 instead of a HOST_WIDE_INT. * expmed.c (expand_shift): Likewise. * rtlanal.c (strip_offset): New function. (commutative_operand_precedence): Give CONST_POLY_INT the same precedence as CONST_DOUBLE and put CONST_WIDE_INT between that and CONST_INT. * rtl-tests.c (const_poly_int_tests): New struct. (rtl_tests_c_tests): Use it. * simplify-rtx.c (simplify_const_unary_operation): Handle CONST_POLY_INT. (simplify_const_binary_operation): Likewise. (simplify_binary_operation_1): Fold additions of symbolic constants and CONST_POLY_INTs. (simplify_subreg): Handle extensions and truncations of CONST_POLY_INTs. (simplify_const_poly_int_tests): New struct. (simplify_rtx_c_tests): Use it. * wide-int.h (storage_ref): Add default constructor. (wide_int_ref_storage): Likewise. (trailing_wide_ints): Use GTY((user)). (trailing_wide_ints::operator[]): Add a const version. (trailing_wide_ints::get_precision): New function. (trailing_wide_ints::extra_size): Likewise. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r255862
Diffstat (limited to 'gcc/emit-rtl.c')
-rw-r--r--gcc/emit-rtl.c102
1 files changed, 96 insertions, 6 deletions
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 8dab330..799b94a 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -148,6 +148,16 @@ struct const_wide_int_hasher : ggc_cache_ptr_hash<rtx_def>
static GTY ((cache)) hash_table<const_wide_int_hasher> *const_wide_int_htab;
+struct const_poly_int_hasher : ggc_cache_ptr_hash<rtx_def>
+{
+ typedef std::pair<machine_mode, poly_wide_int_ref> compare_type;
+
+ static hashval_t hash (rtx x);
+ static bool equal (rtx x, const compare_type &y);
+};
+
+static GTY ((cache)) hash_table<const_poly_int_hasher> *const_poly_int_htab;
+
/* A hash table storing register attribute structures. */
struct reg_attr_hasher : ggc_cache_ptr_hash<reg_attrs>
{
@@ -248,6 +258,31 @@ const_wide_int_hasher::equal (rtx x, rtx y)
}
#endif
+/* Returns a hash code for CONST_POLY_INT X. */
+
+hashval_t
+const_poly_int_hasher::hash (rtx x)
+{
+ inchash::hash h;
+ h.add_int (GET_MODE (x));
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
+ return h.end ();
+}
+
+/* Returns nonzero if CONST_POLY_INT X is an rtx representation of Y. */
+
+bool
+const_poly_int_hasher::equal (rtx x, const compare_type &y)
+{
+ if (GET_MODE (x) != y.first)
+ return false;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ if (CONST_POLY_INT_COEFFS (x)[i] != y.second.coeffs[i])
+ return false;
+ return true;
+}
+
/* Returns a hash code for X (which is really a CONST_DOUBLE). */
hashval_t
const_double_hasher::hash (rtx x)
@@ -489,9 +524,13 @@ gen_rtx_CONST_INT (machine_mode mode ATTRIBUTE_UNUSED, HOST_WIDE_INT arg)
}
rtx
-gen_int_mode (HOST_WIDE_INT c, machine_mode mode)
+gen_int_mode (poly_int64 c, machine_mode mode)
{
- return GEN_INT (trunc_int_for_mode (c, mode));
+ c = trunc_int_for_mode (c, mode);
+ if (c.is_constant ())
+ return GEN_INT (c.coeffs[0]);
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+ return immed_wide_int_const (poly_wide_int::from (c, prec, SIGNED), mode);
}
/* CONST_DOUBLEs might be created from pairs of integers, or from
@@ -595,8 +634,8 @@ lookup_const_wide_int (rtx wint)
a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT
(if TARGET_SUPPORTS_WIDE_INT). */
-rtx
-immed_wide_int_const (const wide_int_ref &v, machine_mode mode)
+static rtx
+immed_wide_int_const_1 (const wide_int_ref &v, machine_mode mode)
{
unsigned int len = v.get_len ();
/* Not scalar_int_mode because we also allow pointer bound modes. */
@@ -683,6 +722,53 @@ immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, machine_mode mode)
}
#endif
+/* Return an rtx representation of C in mode MODE. */
+
+rtx
+immed_wide_int_const (const poly_wide_int_ref &c, machine_mode mode)
+{
+ if (c.is_constant ())
+ return immed_wide_int_const_1 (c.coeffs[0], mode);
+
+ /* Not scalar_int_mode because we also allow pointer bound modes. */
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+
+ /* Allow truncation but not extension since we do not know if the
+ number is signed or unsigned. */
+ gcc_assert (prec <= c.coeffs[0].get_precision ());
+ poly_wide_int newc = poly_wide_int::from (c, prec, SIGNED);
+
+ /* See whether we already have an rtx for this constant. */
+ inchash::hash h;
+ h.add_int (mode);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (newc.coeffs[i]);
+ const_poly_int_hasher::compare_type typed_value (mode, newc);
+ rtx *slot = const_poly_int_htab->find_slot_with_hash (typed_value,
+ h.end (), INSERT);
+ rtx x = *slot;
+ if (x)
+ return x;
+
+ /* Create a new rtx. There's a choice to be made here between installing
+ the actual mode of the rtx or leaving it as VOIDmode (for consistency
+ with CONST_INT). In practice the handling of the codes is different
+ enough that we get no benefit from using VOIDmode, and various places
+ assume that VOIDmode implies CONST_INT. Using the real mode seems like
+ the right long-term direction anyway. */
+ typedef trailing_wide_ints<NUM_POLY_INT_COEFFS> twi;
+ size_t extra_size = twi::extra_size (prec);
+ x = rtx_alloc_v (CONST_POLY_INT,
+ sizeof (struct const_poly_int_def) + extra_size);
+ PUT_MODE (x, mode);
+ CONST_POLY_INT_COEFFS (x).set_precision (prec);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ CONST_POLY_INT_COEFFS (x)[i] = newc.coeffs[i];
+
+ *slot = x;
+ return x;
+}
+
rtx
gen_rtx_REG (machine_mode mode, unsigned int regno)
{
@@ -1486,7 +1572,8 @@ gen_lowpart_common (machine_mode mode, rtx x)
}
else if (GET_CODE (x) == SUBREG || REG_P (x)
|| GET_CODE (x) == CONCAT || const_vec_p (x)
- || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x))
+ || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x)
+ || CONST_POLY_INT_P (x))
return lowpart_subreg (mode, x, innermode);
/* Otherwise, we can't do this. */
@@ -6031,6 +6118,9 @@ init_emit_once (void)
#endif
const_double_htab = hash_table<const_double_hasher>::create_ggc (37);
+ if (NUM_POLY_INT_COEFFS > 1)
+ const_poly_int_htab = hash_table<const_poly_int_hasher>::create_ggc (37);
+
const_fixed_htab = hash_table<const_fixed_hasher>::create_ggc (37);
reg_attrs_htab = hash_table<reg_attr_hasher>::create_ggc (37);
@@ -6422,7 +6512,7 @@ need_atomic_barrier_p (enum memmodel model, bool pre)
by VALUE bits. */
rtx
-gen_int_shift_amount (machine_mode, HOST_WIDE_INT value)
+gen_int_shift_amount (machine_mode, poly_int64 value)
{
/* Use a 64-bit mode, to avoid any truncation.