diff options
author | Ulrich Weigand <ulrich.weigand@linaro.org> | 2011-10-06 11:50:26 +0000 |
---|---|---|
committer | Ulrich Weigand <uweigand@gcc.gnu.org> | 2011-10-06 11:50:26 +0000 |
commit | 4da6de81bcebc40d78a79d3c83c1960e6fc7d1d8 (patch) | |
tree | 89db83a084de31419946262731bbccba47d527cf /gcc | |
parent | fa7fd586c317945a2023d4fc8c59907a993dddf9 (diff) | |
download | gcc-4da6de81bcebc40d78a79d3c83c1960e6fc7d1d8.zip gcc-4da6de81bcebc40d78a79d3c83c1960e6fc7d1d8.tar.gz gcc-4da6de81bcebc40d78a79d3c83c1960e6fc7d1d8.tar.bz2 |
re PR target/50305 (Inline asm reload failure when building Linux kernel)
gcc/
PR target/50305
* config/arm/arm.c (arm_legitimize_reload_address): Recognize
output of a previous pass through legitimize_reload_address.
Do not attempt to optimize addresses if the base register is
equivalent to a constant.
gcc/testsuite/
PR target/50305
* gcc.target/arm/pr50305.c: New test.
From-SVN: r179603
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 8 | ||||
-rw-r--r-- | gcc/config/arm/arm.c | 17 | ||||
-rw-r--r-- | gcc/testsuite/ChangeLog | 5 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/arm/pr50305.c | 60 |
4 files changed, 90 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index f8d41cc..8c54eaf 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,11 @@ +2011-10-06 Ulrich Weigand <ulrich.weigand@linaro.org> + + PR target/50305 + * config/arm/arm.c (arm_legitimize_reload_address): Recognize + output of a previous pass through legitimize_reload_address. + Do not attempt to optimize addresses if the base register is + equivalent to a constant. + 2011-10-06 Andreas Krebbel <Andreas.Krebbel@de.ibm.com> * function.c (thread_prologue_and_epilogue_insns): Mark diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 5161439..2feac6f 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -6550,9 +6550,26 @@ arm_legitimize_reload_address (rtx *p, int opnum, int type, int ind_levels ATTRIBUTE_UNUSED) { + /* We must recognize output that we have already generated ourselves. */ + if (GET_CODE (*p) == PLUS + && GET_CODE (XEXP (*p, 0)) == PLUS + && GET_CODE (XEXP (XEXP (*p, 0), 0)) == REG + && GET_CODE (XEXP (XEXP (*p, 0), 1)) == CONST_INT + && GET_CODE (XEXP (*p, 1)) == CONST_INT) + { + push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL, + MODE_BASE_REG_CLASS (mode), GET_MODE (*p), + VOIDmode, 0, 0, opnum, (enum reload_type) type); + return true; + } + if (GET_CODE (*p) == PLUS && GET_CODE (XEXP (*p, 0)) == REG && ARM_REGNO_OK_FOR_BASE_P (REGNO (XEXP (*p, 0))) + /* If the base register is equivalent to a constant, let the generic + code handle it. Otherwise we will run into problems if a future + reload pass decides to rematerialize the constant. */ + && !reg_equiv_constant (ORIGINAL_REGNO (XEXP (*p, 0))) && GET_CODE (XEXP (*p, 1)) == CONST_INT) { HOST_WIDE_INT val = INTVAL (XEXP (*p, 1)); diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 458bacb..27f7746 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2011-10-06 Ulrich Weigand <ulrich.weigand@linaro.org> + + PR target/50305 + * gcc.target/arm/pr50305.c: New test. + 2011-10-06 Richard Guenther <rguenther@suse.de> PR tree-optimization/38884 diff --git a/gcc/testsuite/gcc.target/arm/pr50305.c b/gcc/testsuite/gcc.target/arm/pr50305.c new file mode 100644 index 0000000..2f6ad5c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/pr50305.c @@ -0,0 +1,60 @@ +/* { dg-do compile } */ +/* { dg-skip-if "incompatible options" { arm*-*-* } { "-march=*" } { "-march=armv7-a" } } */ +/* { dg-options "-O2 -fno-omit-frame-pointer -marm -march=armv7-a -mfpu=vfp3" } */ + +struct event { + unsigned long long id; + unsigned int flag; +}; + +void dummy(void) +{ + /* This is here to ensure that the offset of perf_event_id below + relative to the LANCHOR symbol exceeds the allowed displacement. */ + static int __warned[300]; + __warned[0] = 1; +} + +extern void *kmem_cache_alloc_trace (void *cachep); +extern void *cs_cachep; +extern int nr_cpu_ids; + +struct event * +event_alloc (int cpu) +{ + static unsigned long long __attribute__((aligned(8))) perf_event_id; + struct event *event; + unsigned long long result; + unsigned long tmp; + + if (cpu >= nr_cpu_ids) + return 0; + + event = kmem_cache_alloc_trace (cs_cachep); + + __asm__ __volatile__ ("dmb" : : : "memory"); + + __asm__ __volatile__("@ atomic64_add_return\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (perf_event_id) + : "r" (&perf_event_id), "r" (1LL) + : "cc"); + + __asm__ __volatile__ ("dmb" : : : "memory"); + + event->id = result; + + if (cpu) + event->flag = 1; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + kmem_cache_alloc_trace (cs_cachep); + + return event; +} + |