diff options
author | Richard Henderson <rth@redhat.com> | 2012-04-30 09:51:15 -0700 |
---|---|---|
committer | Richard Henderson <rth@gcc.gnu.org> | 2012-04-30 09:51:15 -0700 |
commit | a7b8107f8ee99629c28ab9e0e5b00c589d8577db (patch) | |
tree | eec45707de7026b461de08386e7b628ee962695e /gcc | |
parent | 85dd5559489b154582d81f668fc1a3af63ab4764 (diff) | |
download | gcc-a7b8107f8ee99629c28ab9e0e5b00c589d8577db.zip gcc-a7b8107f8ee99629c28ab9e0e5b00c589d8577db.tar.gz gcc-a7b8107f8ee99629c28ab9e0e5b00c589d8577db.tar.bz2 |
arm.md (UNSPEC_LL): New.
* config/arm/arm.md (UNSPEC_LL): New.
* config/arm/sync.md (atomic_loaddi, atomic_loaddi_1): New.
(arm_load_exclusivedi): Use %H0.
From-SVN: r186990
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 6 | ||||
-rw-r--r-- | gcc/config/arm/arm.md | 1 | ||||
-rw-r--r-- | gcc/config/arm/sync.md | 36 |
3 files changed, 33 insertions, 10 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5a39f21..ea032c1 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2012-04-30 Richard Henderson <rth@redhat.com> + + * config/arm/arm.md (UNSPEC_LL): New. + * config/arm/sync.md (atomic_loaddi, atomic_loaddi_1): New. + (arm_load_exclusivedi): Use %H0. + 2012-04-30 Jason Merrill <jason@redhat.com> * dwarf2out.c (comdat_symbol_id): Add const. diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 9506228..7a49270 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -117,6 +117,7 @@ ; that. UNSPEC_UNALIGNED_STORE ; Same for str/strh. UNSPEC_PIC_UNIFIED ; Create a common pic addressing form. + UNSPEC_LL ; Represent an unpaired load-register-exclusive. ]) ;; UNSPEC_VOLATILE Usage: diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md index 03838f5..86135bb 100644 --- a/gcc/config/arm/sync.md +++ b/gcc/config/arm/sync.md @@ -65,6 +65,31 @@ (set_attr "conds" "unconditional") (set_attr "predicable" "no")]) +;; Note that ldrd and vldr are *not* guaranteed to be single-copy atomic, +;; even for a 64-bit aligned address. Instead we use a ldrexd unparied +;; with a store. +(define_expand "atomic_loaddi" + [(match_operand:DI 0 "s_register_operand") ;; val out + (match_operand:DI 1 "mem_noofs_operand") ;; memory + (match_operand:SI 2 "const_int_operand")] ;; model + "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN" +{ + enum memmodel model = (enum memmodel) INTVAL (operands[2]); + expand_mem_thread_fence (model); + emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1])); + if (model == MEMMODEL_SEQ_CST) + expand_mem_thread_fence (model); + DONE; +}) + +(define_insn "atomic_loaddi_1" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "mem_noofs_operand" "Ua")] + UNSPEC_LL))] + "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN" + "ldrexd%?\t%0, %H0, %C1" + [(set_attr "predicable" "yes")]) + (define_expand "atomic_compare_and_swap<mode>" [(match_operand:SI 0 "s_register_operand" "") ;; bool out (match_operand:QHSD 1 "s_register_operand" "") ;; val out @@ -317,16 +342,7 @@ [(match_operand:DI 1 "mem_noofs_operand" "Ua")] VUNSPEC_LL))] "TARGET_HAVE_LDREXD" - { - rtx target = operands[0]; - /* The restrictions on target registers in ARM mode are that the two - registers are consecutive and the first one is even; Thumb is - actually more flexible, but DI should give us this anyway. - Note that the 1st register always gets the lowest word in memory. */ - gcc_assert ((REGNO (target) & 1) == 0); - operands[2] = gen_rtx_REG (SImode, REGNO (target) + 1); - return "ldrexd%?\t%0, %2, %C1"; - } + "ldrexd%?\t%0, %H0, %C1" [(set_attr "predicable" "yes")]) (define_insn "arm_store_exclusive<mode>" |