aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRamana Radhakrishnan <ramana.radhakrishnan@arm.com>2016-05-13 09:23:28 +0000
committerRamana Radhakrishnan <ramana@gcc.gnu.org>2016-05-13 09:23:28 +0000
commit43203dea1afa87140a0f045f2c2182943247065f (patch)
treeec00ce3d8d49a2c80e8e91fcf56ef29defe59ba8 /gcc
parent44cb09ea6b2164e8392e3001b0ba5b93093bfa07 (diff)
downloadgcc-43203dea1afa87140a0f045f2c2182943247065f.zip
gcc-43203dea1afa87140a0f045f2c2182943247065f.tar.gz
gcc-43203dea1afa87140a0f045f2c2182943247065f.tar.bz2
Set TARGET_OMIT_STRUCT_RETURN_REG to true
The reason this caught my eye on aarch64 is because the return value register (x0) is not identical to the register in which the hidden parameter for AArch64 is set (x8). Thus setting this to true seems to be quite reasonable and shaves off 100 odd mov x0, x8's from cc1 in a bootstrap build. I don't expect this to make a huge impact on performance but as they say every little counts. The AAPCS64 is quite explicit about not requiring that the contents of x8 be kept live. Bootstrapped and regression tested on aarch64. Ok to apply ? Ramana gcc/ * config/aarch64/aarch64.c (TARGET_OMIT_STRUCT_RETURN_REG): Set to true. gcc/testsuite * gcc.target/aarch64/struct_return.c: New test. From-SVN: r236197
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/config/aarch64/aarch64.c3
-rw-r--r--gcc/testsuite/ChangeLog4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/struct_return.c31
4 files changed, 43 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 93fa743..87f5eaf 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,8 @@
+2016-05-13 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+
+ * config/aarch64/aarch64.c (TARGET_OMIT_STRUCT_RETURN_REG): Set
+ to true.
+
2016-05-13 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
PR target/71080
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 9995494..7e0e3b9 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -14232,6 +14232,9 @@ aarch64_optab_supported_p (int op, machine_mode, machine_mode,
#undef TARGET_OPTAB_SUPPORTED_P
#define TARGET_OPTAB_SUPPORTED_P aarch64_optab_supported_p
+#undef TARGET_OMIT_STRUCT_RETURN_REG
+#define TARGET_OMIT_STRUCT_RETURN_REG true
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-aarch64.h"
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index fd0702b..333bec6 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,7 @@
+2016-05-13 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+
+ * gcc.target/aarch64/struct_return.c: New test.
+
2016-05-12 Marek Polacek <polacek@redhat.com>
PR c/70756
diff --git a/gcc/testsuite/gcc.target/aarch64/struct_return.c b/gcc/testsuite/gcc.target/aarch64/struct_return.c
new file mode 100644
index 0000000..6d90b7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/struct_return.c
@@ -0,0 +1,31 @@
+/* Test the absence of a spurious move from x8 to x0 for functions
+ return structures. */
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+struct s
+{
+ long x;
+ long y;
+ long z;
+};
+
+struct s __attribute__((noinline))
+foo (long a, long d, long c)
+{
+ struct s b;
+ b.x = a;
+ b.y = d;
+ b.z = c;
+ return b;
+}
+
+int
+main (void)
+{
+ struct s x;
+ x = foo ( 10, 20, 30);
+ return x.x + x.y + x.z;
+}
+
+/* { dg-final { scan-assembler-not "mov\tx0, x8" } } */