aboutsummaryrefslogtreecommitdiff
path: root/gcc/recog.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/recog.c')
-rw-r--r--gcc/recog.c500
1 files changed, 487 insertions, 13 deletions
diff --git a/gcc/recog.c b/gcc/recog.c
index d3552ec..e9aa1ba 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -183,6 +183,7 @@ struct change_t
{
rtx object;
int old_code;
+ int old_len;
bool unshare;
rtx *loc;
rtx old;
@@ -192,10 +193,13 @@ static change_t *changes;
static int changes_allocated;
static int num_changes = 0;
+static int temporarily_undone_changes = 0;
/* Validate a proposed change to OBJECT. LOC is the location in the rtl
- at which NEW_RTX will be placed. If OBJECT is zero, no validation is done,
- the change is simply made.
+ at which NEW_RTX will be placed. If NEW_LEN is >= 0, XVECLEN (NEW_RTX, 0)
+ will also be changed to NEW_LEN, which is no greater than the current
+ XVECLEN. If OBJECT is zero, no validation is done, the change is
+ simply made.
Two types of objects are supported: If OBJECT is a MEM, memory_address_p
will be called with the address and mode as parameters. If OBJECT is
@@ -212,14 +216,26 @@ static int num_changes = 0;
Otherwise, perform the change and return 1. */
static bool
-validate_change_1 (rtx object, rtx *loc, rtx new_rtx, bool in_group, bool unshare)
+validate_change_1 (rtx object, rtx *loc, rtx new_rtx, bool in_group,
+ bool unshare, int new_len = -1)
{
+ gcc_assert (temporarily_undone_changes == 0);
rtx old = *loc;
- if (old == new_rtx || rtx_equal_p (old, new_rtx))
+ /* Single-element parallels aren't valid and won't match anything.
+ Replace them with the single element. */
+ if (new_len == 1 && GET_CODE (new_rtx) == PARALLEL)
+ {
+ new_rtx = XVECEXP (new_rtx, 0, 0);
+ new_len = -1;
+ }
+
+ if ((old == new_rtx || rtx_equal_p (old, new_rtx))
+ && (new_len < 0 || XVECLEN (new_rtx, 0) == new_len))
return 1;
- gcc_assert (in_group != 0 || num_changes == 0);
+ gcc_assert ((in_group != 0 || num_changes == 0)
+ && (new_len < 0 || new_rtx == *loc));
*loc = new_rtx;
@@ -239,8 +255,12 @@ validate_change_1 (rtx object, rtx *loc, rtx new_rtx, bool in_group, bool unshar
changes[num_changes].object = object;
changes[num_changes].loc = loc;
changes[num_changes].old = old;
+ changes[num_changes].old_len = (new_len >= 0 ? XVECLEN (new_rtx, 0) : -1);
changes[num_changes].unshare = unshare;
+ if (new_len >= 0)
+ XVECLEN (new_rtx, 0) = new_len;
+
if (object && !MEM_P (object))
{
/* Set INSN_CODE to force rerecognition of insn. Save old code in
@@ -278,6 +298,14 @@ validate_unshare_change (rtx object, rtx *loc, rtx new_rtx, bool in_group)
return validate_change_1 (object, loc, new_rtx, in_group, true);
}
+/* Change XVECLEN (*LOC, 0) to NEW_LEN. OBJECT, IN_GROUP and the return
+ value are as for validate_change_1. */
+
+bool
+validate_change_xveclen (rtx object, rtx *loc, int new_len, bool in_group)
+{
+ return validate_change_1 (object, loc, *loc, in_group, false, new_len);
+}
/* Keep X canonicalized if some changes have made it non-canonical; only
modifies the operands of X, not (for example) its code. Simplifications
@@ -408,10 +436,7 @@ verify_changes (int num)
changes[i].old
&& REG_P (changes[i].old)
&& asm_noperands (PATTERN (object)) > 0
- && REG_EXPR (changes[i].old) != NULL_TREE
- && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (changes[i].old))
- && DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (changes[i].old))
- && DECL_REGISTER (REG_EXPR (changes[i].old)))
+ && register_asm_p (changes[i].old))
{
/* Don't allow changes of hard register operands to inline
assemblies if they have been defined as register asm ("x"). */
@@ -483,6 +508,7 @@ confirm_change_group (void)
int i;
rtx last_object = NULL;
+ gcc_assert (temporarily_undone_changes == 0);
for (i = 0; i < num_changes; i++)
{
rtx object = changes[i].object;
@@ -538,19 +564,67 @@ num_validated_changes (void)
void
cancel_changes (int num)
{
+ gcc_assert (temporarily_undone_changes == 0);
int i;
/* Back out all the changes. Do this in the opposite order in which
they were made. */
for (i = num_changes - 1; i >= num; i--)
{
- *changes[i].loc = changes[i].old;
+ if (changes[i].old_len >= 0)
+ XVECLEN (*changes[i].loc, 0) = changes[i].old_len;
+ else
+ *changes[i].loc = changes[i].old;
if (changes[i].object && !MEM_P (changes[i].object))
INSN_CODE (changes[i].object) = changes[i].old_code;
}
num_changes = num;
}
+/* Swap the status of change NUM from being applied to not being applied,
+ or vice versa. */
+
+static void
+swap_change (int num)
+{
+ if (changes[num].old_len >= 0)
+ std::swap (XVECLEN (*changes[num].loc, 0), changes[num].old_len);
+ else
+ std::swap (*changes[num].loc, changes[num].old);
+ if (changes[num].object && !MEM_P (changes[num].object))
+ std::swap (INSN_CODE (changes[num].object), changes[num].old_code);
+}
+
+/* Temporarily undo all the changes numbered NUM and up, with a view
+ to reapplying them later. The next call to the changes machinery
+ must be:
+
+ redo_changes (NUM)
+
+ otherwise things will end up in an invalid state. */
+
+void
+temporarily_undo_changes (int num)
+{
+ gcc_assert (temporarily_undone_changes == 0 && num <= num_changes);
+ for (int i = num_changes - 1; i >= num; i--)
+ swap_change (i);
+ temporarily_undone_changes = num_changes - num;
+}
+
+/* Redo the changes that were temporarily undone by:
+
+ temporarily_undo_changes (NUM). */
+
+void
+redo_changes (int num)
+{
+ gcc_assert (temporarily_undone_changes == num_changes - num);
+ for (int i = num; i < num_changes; ++i)
+ swap_change (i);
+ temporarily_undone_changes = 0;
+}
+
/* Reduce conditional compilation elsewhere. */
/* A subroutine of validate_replace_rtx_1 that tries to simplify the resulting
rtx. */
@@ -922,7 +996,403 @@ validate_simplify_insn (rtx_insn *insn)
}
return ((num_changes_pending () > 0) && (apply_change_group () > 0));
}
-
+
+/* Try to process the address of memory expression MEM. Return true on
+ success; leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_mem_1 (rtx mem)
+{
+ auto old_num_changes = num_validated_changes ();
+ mem_depth += 1;
+ bool res = apply_to_rvalue_1 (&XEXP (mem, 0));
+ mem_depth -= 1;
+ if (!res)
+ return false;
+
+ if (old_num_changes != num_validated_changes ()
+ && should_check_mems
+ && !check_mem (old_num_changes, mem))
+ return false;
+
+ return true;
+}
+
+/* Try to process the rvalue expression at *LOC. Return true on success;
+ leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_rvalue_1 (rtx *loc)
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ machine_mode mode = GET_MODE (x);
+
+ auto old_num_changes = num_validated_changes ();
+ if (from && GET_CODE (x) == GET_CODE (from) && rtx_equal_p (x, from))
+ {
+ /* Don't replace register asms in asm statements; we mustn't
+ change the user's register allocation. */
+ if (REG_P (x)
+ && HARD_REGISTER_P (x)
+ && register_asm_p (x)
+ && asm_noperands (PATTERN (insn)) > 0)
+ return false;
+
+ if (should_unshare)
+ validate_unshare_change (insn, loc, to, 1);
+ else
+ validate_change (insn, loc, to, 1);
+ if (mem_depth && !REG_P (to) && !CONSTANT_P (to))
+ {
+ /* We're substituting into an address, but TO will have the
+ form expected outside an address. Canonicalize it if
+ necessary. */
+ insn_propagation subprop (insn);
+ subprop.mem_depth += 1;
+ if (!subprop.apply_to_rvalue (loc))
+ gcc_unreachable ();
+ if (should_unshare
+ && num_validated_changes () != old_num_changes + 1)
+ {
+ /* TO is owned by someone else, so create a copy and
+ return TO to its original form. */
+ rtx to = copy_rtx (*loc);
+ cancel_changes (old_num_changes);
+ validate_change (insn, loc, to, 1);
+ }
+ }
+ num_replacements += 1;
+ should_unshare = true;
+ result_flags |= UNSIMPLIFIED;
+ return true;
+ }
+
+ /* Recursively apply the substitution and see if we can simplify
+ the result. This specifically shouldn't use simplify_gen_* for
+ speculative simplifications, since we want to avoid generating new
+ expressions where possible. */
+ auto old_result_flags = result_flags;
+ rtx newx = NULL_RTX;
+ bool recurse_p = false;
+ switch (GET_RTX_CLASS (code))
+ {
+ case RTX_UNARY:
+ {
+ machine_mode op0_mode = GET_MODE (XEXP (x, 0));
+ if (!apply_to_rvalue_1 (&XEXP (x, 0)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ newx = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
+ break;
+ }
+
+ case RTX_BIN_ARITH:
+ case RTX_COMM_ARITH:
+ {
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
+ newx = simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
+ else
+ newx = simplify_binary_operation (code, mode,
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+ }
+
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
+ {
+ machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
+ ? GET_MODE (XEXP (x, 0))
+ : GET_MODE (XEXP (x, 1)));
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ newx = simplify_relational_operation (code, mode, op_mode,
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+ }
+
+ case RTX_TERNARY:
+ case RTX_BITFIELD_OPS:
+ {
+ machine_mode op0_mode = GET_MODE (XEXP (x, 0));
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1))
+ || !apply_to_rvalue_1 (&XEXP (x, 2)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ newx = simplify_ternary_operation (code, mode, op0_mode,
+ XEXP (x, 0), XEXP (x, 1),
+ XEXP (x, 2));
+ break;
+ }
+
+ case RTX_EXTRA:
+ if (code == SUBREG)
+ {
+ machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
+ if (!apply_to_rvalue_1 (&SUBREG_REG (x)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ rtx inner = SUBREG_REG (x);
+ newx = simplify_subreg (mode, inner, inner_mode, SUBREG_BYTE (x));
+ /* Reject the same cases that simplify_gen_subreg would. */
+ if (!newx
+ && (GET_CODE (inner) == SUBREG
+ || GET_CODE (inner) == CONCAT
+ || GET_MODE (inner) == VOIDmode
+ || !validate_subreg (mode, inner_mode,
+ inner, SUBREG_BYTE (x))))
+ {
+ failure_reason = "would create an invalid subreg";
+ return false;
+ }
+ break;
+ }
+ else
+ recurse_p = true;
+ break;
+
+ case RTX_OBJ:
+ if (code == LO_SUM)
+ {
+ if (!apply_to_rvalue_1 (&XEXP (x, 0))
+ || !apply_to_rvalue_1 (&XEXP (x, 1)))
+ return false;
+ if (from && old_num_changes == num_validated_changes ())
+ return true;
+
+ /* (lo_sum (high x) y) -> y where x and y have the same base. */
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ if (GET_CODE (op0) == HIGH)
+ {
+ rtx base0, base1, offset0, offset1;
+ split_const (XEXP (op0, 0), &base0, &offset0);
+ split_const (op1, &base1, &offset1);
+ if (rtx_equal_p (base0, base1))
+ newx = op1;
+ }
+ }
+ else if (code == REG)
+ {
+ if (from && REG_P (from) && reg_overlap_mentioned_p (x, from))
+ {
+ failure_reason = "inexact register overlap";
+ return false;
+ }
+ }
+ else if (code == MEM)
+ return apply_to_mem_1 (x);
+ else
+ recurse_p = true;
+ break;
+
+ case RTX_CONST_OBJ:
+ break;
+
+ case RTX_AUTOINC:
+ if (from && reg_overlap_mentioned_p (XEXP (x, 0), from))
+ {
+ failure_reason = "is subject to autoinc";
+ return false;
+ }
+ recurse_p = true;
+ break;
+
+ case RTX_MATCH:
+ case RTX_INSN:
+ gcc_unreachable ();
+ }
+
+ if (recurse_p)
+ {
+ const char *fmt = GET_RTX_FORMAT (code);
+ for (int i = 0; fmt[i]; i++)
+ switch (fmt[i])
+ {
+ case 'E':
+ for (int j = 0; j < XVECLEN (x, i); j++)
+ if (!apply_to_rvalue_1 (&XVECEXP (x, i, j)))
+ return false;
+ break;
+
+ case 'e':
+ if (XEXP (x, i) && !apply_to_rvalue_1 (&XEXP (x, i)))
+ return false;
+ break;
+ }
+ }
+ else if (newx && !rtx_equal_p (x, newx))
+ {
+ /* All substitutions made by OLD_NUM_CHANGES onwards have been
+ simplified. */
+ result_flags = ((result_flags & ~UNSIMPLIFIED)
+ | (old_result_flags & UNSIMPLIFIED));
+
+ if (should_note_simplifications)
+ note_simplification (old_num_changes, old_result_flags, x, newx);
+
+ /* There's no longer any point unsharing the substitutions made
+ for subexpressions, since we'll just copy this one instead. */
+ bool unshare = false;
+ for (int i = old_num_changes; i < num_changes; ++i)
+ {
+ unshare |= changes[i].unshare;
+ changes[i].unshare = false;
+ }
+ if (unshare)
+ validate_unshare_change (insn, loc, newx, 1);
+ else
+ validate_change (insn, loc, newx, 1);
+ }
+
+ return true;
+}
+
+/* Try to process the lvalue expression at *LOC. Return true on success;
+ leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_lvalue_1 (rtx dest)
+{
+ rtx old_dest = dest;
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ {
+ if (GET_CODE (dest) == ZERO_EXTRACT
+ && (!apply_to_rvalue_1 (&XEXP (dest, 1))
+ || !apply_to_rvalue_1 (&XEXP (dest, 2))))
+ return false;
+ dest = XEXP (dest, 0);
+ }
+
+ if (MEM_P (dest))
+ return apply_to_mem_1 (dest);
+
+ /* Check whether the substitution is safe in the presence of this lvalue. */
+ if (!from
+ || dest == old_dest
+ || !REG_P (dest)
+ || !reg_overlap_mentioned_p (dest, from))
+ return true;
+
+ if (SUBREG_P (old_dest)
+ && SUBREG_REG (old_dest) == dest
+ && !read_modify_subreg_p (old_dest))
+ return true;
+
+ failure_reason = "is part of a read-write destination";
+ return false;
+}
+
+/* Try to process the instruction pattern at *LOC. Return true on success;
+ leave the caller to clean up on failure. */
+
+bool
+insn_propagation::apply_to_pattern_1 (rtx *loc)
+{
+ rtx body = *loc;
+ switch (GET_CODE (body))
+ {
+ case COND_EXEC:
+ return (apply_to_rvalue_1 (&COND_EXEC_TEST (body))
+ && apply_to_pattern_1 (&COND_EXEC_CODE (body)));
+
+ case PARALLEL:
+ {
+ int last = XVECLEN (body, 0) - 1;
+ for (int i = 0; i < last; ++i)
+ if (!apply_to_pattern_1 (&XVECEXP (body, 0, i)))
+ return false;
+ return apply_to_pattern_1 (&XVECEXP (body, 0, last));
+ }
+
+ case ASM_OPERANDS:
+ for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (body); i < len; ++i)
+ if (!apply_to_rvalue_1 (&ASM_OPERANDS_INPUT (body, i)))
+ return false;
+ return true;
+
+ case CLOBBER:
+ return apply_to_lvalue_1 (XEXP (body, 0));
+
+ case SET:
+ return (apply_to_lvalue_1 (SET_DEST (body))
+ && apply_to_rvalue_1 (&SET_SRC (body)));
+
+ default:
+ /* All the other possibilities never store and can use a normal
+ rtx walk. This includes:
+
+ - USE
+ - TRAP_IF
+ - PREFETCH
+ - UNSPEC
+ - UNSPEC_VOLATILE. */
+ return apply_to_rvalue_1 (loc);
+ }
+}
+
+/* Apply this insn_propagation object's simplification or substitution
+ to the instruction pattern at LOC. */
+
+bool
+insn_propagation::apply_to_pattern (rtx *loc)
+{
+ unsigned int num_changes = num_validated_changes ();
+ bool res = apply_to_pattern_1 (loc);
+ if (!res)
+ cancel_changes (num_changes);
+ return res;
+}
+
+/* Apply this insn_propagation object's simplification or substitution
+ to the rvalue expression at LOC. */
+
+bool
+insn_propagation::apply_to_rvalue (rtx *loc)
+{
+ unsigned int num_changes = num_validated_changes ();
+ bool res = apply_to_rvalue_1 (loc);
+ if (!res)
+ cancel_changes (num_changes);
+ return res;
+}
+
+/* Check whether INSN matches a specific alternative of an .md pattern. */
+
+bool
+valid_insn_p (rtx_insn *insn)
+{
+ recog_memoized (insn);
+ if (INSN_CODE (insn) < 0)
+ return false;
+ extract_insn (insn);
+ /* We don't know whether the insn will be in code that is optimized
+ for size or speed, so consider all enabled alternatives. */
+ if (!constrain_operands (1, get_enabled_alternatives (insn)))
+ return false;
+ return true;
+}
+
/* Return 1 if OP is a valid general operand for machine mode MODE.
This is either a register reference, a memory reference,
or a constant. In the case of a memory reference, the address
@@ -1778,6 +2248,7 @@ asm_operand_ok (rtx op, const char *constraint, const char **constraints)
/* FALLTHRU */
default:
cn = lookup_constraint (constraint);
+ rtx mem = NULL;
switch (get_constraint_type (cn))
{
case CT_REGISTER:
@@ -1796,10 +2267,13 @@ asm_operand_ok (rtx op, const char *constraint, const char **constraints)
break;
case CT_MEMORY:
+ mem = op;
+ /* Fall through. */
case CT_SPECIAL_MEMORY:
/* Every memory operand can be reloaded to fit. */
- result = result || memory_operand (extract_mem_from_operand (op),
- VOIDmode);
+ if (!mem)
+ mem = extract_mem_from_operand (op);
+ result = result || memory_operand (mem, VOIDmode);
break;
case CT_ADDRESS: