From 0c12fc9b2d605cf323cfdab28a972d86398e71a1 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Wed, 20 Dec 2017 12:51:36 +0000 Subject: poly_int: rtx constants This patch adds an rtl representation of poly_int values. There were three possible ways of doing this: (1) Add a new rtl code for the poly_ints themselves and store the coefficients as trailing wide_ints. This would give constants like: (const_poly_int [c0 c1 ... cn]) The runtime value would be: c0 + c1 * x1 + ... + cn * xn (2) Like (1), but use rtxes for the coefficients. This would give constants like: (const_poly_int [(const_int c0) (const_int c1) ... (const_int cn)]) although the coefficients could be const_wide_ints instead of const_ints where appropriate. (3) Add a new rtl code for the polynomial indeterminates, then use them in const wrappers. A constant like c0 + c1 * x1 would then look like: (const:M (plus:M (mult:M (const_param:M x1) (const_int c1)) (const_int c0))) There didn't seem to be that much to choose between them. The main advantage of (1) is that it's a more efficient representation and that we can refer to the cofficients directly as wide_int_storage. 2017-12-20 Richard Sandiford Alan Hayward David Sherwood gcc/ * doc/rtl.texi (const_poly_int): Document. Also document the rtl sharing behavior. * gengenrtl.c (excluded_rtx): Return true for CONST_POLY_INT. * rtl.h (const_poly_int_def): New struct. (rtx_def::u): Add a cpi field. (CASE_CONST_UNIQUE, CASE_CONST_ANY): Add CONST_POLY_INT. (CONST_POLY_INT_P, CONST_POLY_INT_COEFFS): New macros. (wi::rtx_to_poly_wide_ref): New typedef (const_poly_int_value, wi::to_poly_wide, rtx_to_poly_int64) (poly_int_rtx_p): New functions. (trunc_int_for_mode): Declare a poly_int64 version. (plus_constant): Take a poly_int64 instead of a HOST_WIDE_INT. (immed_wide_int_const): Take a poly_wide_int_ref rather than a wide_int_ref. (strip_offset): Declare. (strip_offset_and_add): New function. * rtl.def (CONST_POLY_INT): New rtx code. * rtl.c (rtx_size): Handle CONST_POLY_INT. (shared_const_p): Use poly_int_rtx_p. * emit-rtl.h (gen_int_mode): Take a poly_int64 instead of a HOST_WIDE_INT. (gen_int_shift_amount): Likewise. * emit-rtl.c (const_poly_int_hasher): New class. (const_poly_int_htab): New variable. (init_emit_once): Initialize it when NUM_POLY_INT_COEFFS > 1. (const_poly_int_hasher::hash): New function. (const_poly_int_hasher::equal): Likewise. (gen_int_mode): Take a poly_int64 instead of a HOST_WIDE_INT. (immed_wide_int_const): Rename to... (immed_wide_int_const_1): ...this and make static. (immed_wide_int_const): New function, taking a poly_wide_int_ref instead of a wide_int_ref. (gen_int_shift_amount): Take a poly_int64 instead of a HOST_WIDE_INT. (gen_lowpart_common): Handle CONST_POLY_INT. * cse.c (hash_rtx_cb, equiv_constant): Likewise. * cselib.c (cselib_hash_rtx): Likewise. * dwarf2out.c (const_ok_for_output_1): Likewise. * expr.c (convert_modes): Likewise. * print-rtl.c (rtx_writer::print_rtx, print_value): Likewise. * rtlhash.c (add_rtx): Likewise. * explow.c (trunc_int_for_mode): Add a poly_int64 version. (plus_constant): Take a poly_int64 instead of a HOST_WIDE_INT. Handle existing CONST_POLY_INT rtxes. * expmed.h (expand_shift): Take a poly_int64 instead of a HOST_WIDE_INT. * expmed.c (expand_shift): Likewise. * rtlanal.c (strip_offset): New function. (commutative_operand_precedence): Give CONST_POLY_INT the same precedence as CONST_DOUBLE and put CONST_WIDE_INT between that and CONST_INT. * rtl-tests.c (const_poly_int_tests): New struct. (rtl_tests_c_tests): Use it. * simplify-rtx.c (simplify_const_unary_operation): Handle CONST_POLY_INT. (simplify_const_binary_operation): Likewise. (simplify_binary_operation_1): Fold additions of symbolic constants and CONST_POLY_INTs. (simplify_subreg): Handle extensions and truncations of CONST_POLY_INTs. (simplify_const_poly_int_tests): New struct. (simplify_rtx_c_tests): Use it. * wide-int.h (storage_ref): Add default constructor. (wide_int_ref_storage): Likewise. (trailing_wide_ints): Use GTY((user)). (trailing_wide_ints::operator[]): Add a const version. (trailing_wide_ints::get_precision): New function. (trailing_wide_ints::extra_size): Likewise. Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r255862 --- gcc/simplify-rtx.c | 150 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 145 insertions(+), 5 deletions(-) (limited to 'gcc/simplify-rtx.c') diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index dec6cae..58cf2c5 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -2038,6 +2038,26 @@ simplify_const_unary_operation (enum rtx_code code, machine_mode mode, } } + /* Handle polynomial integers. */ + else if (CONST_POLY_INT_P (op)) + { + poly_wide_int result; + switch (code) + { + case NEG: + result = -const_poly_int_value (op); + break; + + case NOT: + result = ~const_poly_int_value (op); + break; + + default: + return NULL_RTX; + } + return immed_wide_int_const (result, mode); + } + return NULL_RTX; } @@ -2218,6 +2238,7 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode, rtx tem, reversed, opleft, opright, elt0, elt1; HOST_WIDE_INT val; scalar_int_mode int_mode, inner_mode; + poly_int64 offset; /* Even if we can't compute a constant result, there are some cases worth simplifying. */ @@ -2530,6 +2551,12 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode, return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0)); } + if ((GET_CODE (op0) == CONST + || GET_CODE (op0) == SYMBOL_REF + || GET_CODE (op0) == LABEL_REF) + && poly_int_rtx_p (op1, &offset)) + return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode)); + /* Don't let a relocatable value get a negative coeff. */ if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode) return simplify_gen_binary (PLUS, mode, @@ -4327,6 +4354,57 @@ simplify_const_binary_operation (enum rtx_code code, machine_mode mode, return immed_wide_int_const (result, int_mode); } + /* Handle polynomial integers. */ + if (NUM_POLY_INT_COEFFS > 1 + && is_a (mode, &int_mode) + && poly_int_rtx_p (op0) + && poly_int_rtx_p (op1)) + { + poly_wide_int result; + switch (code) + { + case PLUS: + result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode); + break; + + case MINUS: + result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode); + break; + + case MULT: + if (CONST_SCALAR_INT_P (op1)) + result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode); + else + return NULL_RTX; + break; + + case ASHIFT: + if (CONST_SCALAR_INT_P (op1)) + { + wide_int shift = rtx_mode_t (op1, mode); + if (SHIFT_COUNT_TRUNCATED) + shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode)); + else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode))) + return NULL_RTX; + result = wi::to_poly_wide (op0, mode) << shift; + } + else + return NULL_RTX; + break; + + case IOR: + if (!CONST_SCALAR_INT_P (op1) + || !can_ior_p (wi::to_poly_wide (op0, mode), + rtx_mode_t (op1, mode), &result)) + return NULL_RTX; + break; + + default: + return NULL_RTX; + } + return immed_wide_int_const (result, int_mode); + } + return NULL_RTX; } @@ -6370,13 +6448,27 @@ simplify_subreg (machine_mode outermode, rtx op, scalar_int_mode int_outermode, int_innermode; if (is_a (outermode, &int_outermode) && is_a (innermode, &int_innermode) - && (GET_MODE_PRECISION (int_outermode) - < GET_MODE_PRECISION (int_innermode)) && byte == subreg_lowpart_offset (int_outermode, int_innermode)) { - rtx tem = simplify_truncation (int_outermode, op, int_innermode); - if (tem) - return tem; + /* Handle polynomial integers. The upper bits of a paradoxical + subreg are undefined, so this is safe regardless of whether + we're truncating or extending. */ + if (CONST_POLY_INT_P (op)) + { + poly_wide_int val + = poly_wide_int::from (const_poly_int_value (op), + GET_MODE_PRECISION (int_outermode), + SIGNED); + return immed_wide_int_const (val, int_outermode); + } + + if (GET_MODE_PRECISION (int_outermode) + < GET_MODE_PRECISION (int_innermode)) + { + rtx tem = simplify_truncation (int_outermode, op, int_innermode); + if (tem) + return tem; + } } return NULL_RTX; @@ -6685,12 +6777,60 @@ test_vector_ops () } } +template +struct simplify_const_poly_int_tests +{ + static void run (); +}; + +template<> +struct simplify_const_poly_int_tests<1> +{ + static void run () {} +}; + +/* Test various CONST_POLY_INT properties. */ + +template +void +simplify_const_poly_int_tests::run () +{ + rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode); + rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode); + rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode); + rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode); + rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode); + rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode); + rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode); + rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode); + rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode); + rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode); + rtx two = GEN_INT (2); + rtx six = GEN_INT (6); + HOST_WIDE_INT offset = subreg_lowpart_offset (QImode, HImode); + + /* These tests only try limited operation combinations. Fuller arithmetic + testing is done directly on poly_ints. */ + ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9); + ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10); + ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5); + ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3); + ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2); + ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5); + ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5); + ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6); + ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7); + ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8); + ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5); +} + /* Run all of the selftests within this file. */ void simplify_rtx_c_tests () { test_vector_ops (); + simplify_const_poly_int_tests::run (); } } // namespace selftest -- cgit v1.1