aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2021-03-30 11:42:50 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2021-03-30 11:42:50 +0100
commit48c79f054bf435051c95ee093c45a0f8c9de5b4e (patch)
tree692221b7fef075bfaa38c430aab6a7eee79a35af
parentcc2fda1328ee69b92724d6b3cffb741f07d86047 (diff)
downloadgcc-48c79f054bf435051c95ee093c45a0f8c9de5b4e.zip
gcc-48c79f054bf435051c95ee093c45a0f8c9de5b4e.tar.gz
gcc-48c79f054bf435051c95ee093c45a0f8c9de5b4e.tar.bz2
aarch64: Tweak post-RA handling of CONST_INT moves [PR98136]
This PR is a regression caused by r8-5967, where we replaced a call to aarch64_internal_mov_immediate in aarch64_add_offset with a call to aarch64_force_temporary, which in turn uses the normal emit_move_insn{,_1} routines. The problem is that aarch64_add_offset can be called while outputting a thunk, where we require all instructions to be valid without splitting. However, the move expanders were not splitting CONST_INT moves themselves. I think the right fix is to make the move expanders work even in this scenario, rather than require callers to handle it as a special case. gcc/ PR target/98136 * config/aarch64/aarch64.md (mov<mode>): Pass multi-instruction CONST_INTs to aarch64_expand_mov_immediate when called after RA. gcc/testsuite/ PR target/98136 * g++.dg/pr98136.C: New test.
-rw-r--r--gcc/config/aarch64/aarch64.md17
-rw-r--r--gcc/testsuite/g++.dg/pr98136.C26
2 files changed, 39 insertions, 4 deletions
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index dd1dc2b..a398c3d 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1241,10 +1241,19 @@
if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
operands[1] = force_reg (<MODE>mode, operands[1]);
- /* FIXME: RR we still need to fix up what we are doing with
- symbol_refs and other types of constants. */
- if (CONSTANT_P (operands[1])
- && !CONST_INT_P (operands[1]))
+ /* Lower moves of symbolic constants into individual instructions.
+ Doing this now is sometimes necessary for correctness, since some
+ sequences require temporary pseudo registers. Lowering now is also
+ often better for optimization, since more RTL passes get the
+ chance to optimize the individual instructions.
+
+ When called after RA, also split multi-instruction moves into
+ smaller pieces now, since we can't be sure that sure that there
+ will be a following split pass. */
+ if (CONST_INT_P (operands[1])
+ ? (reload_completed
+ && !aarch64_mov_imm_operand (operands[1], <MODE>mode))
+ : CONSTANT_P (operands[1]))
{
aarch64_expand_mov_immediate (operands[0], operands[1]);
DONE;
diff --git a/gcc/testsuite/g++.dg/pr98136.C b/gcc/testsuite/g++.dg/pr98136.C
new file mode 100644
index 0000000..f3c27f6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/pr98136.C
@@ -0,0 +1,26 @@
+// { dg-do compile { target { ilp32 || lp64 } } }
+
+struct AddIn
+{
+ virtual ~AddIn() {}
+ virtual void AddInCall()=0;
+};
+
+struct Base
+{
+ char b[32*1024*1024]; // Anything bigger than 16mb causes internal compiler error
+ virtual ~Base() {}
+};
+
+struct Deriv : public Base,
+ public AddIn
+{
+ void AddInCall() {}
+};
+
+int main (int argc, char **argv)
+{
+ Deriv deriv;
+ deriv.AddInCall();
+ return 0;
+}