aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLulu Cheng <chenglulu@loongson.cn>2023-11-17 16:04:45 +0800
committerLulu Cheng <chenglulu@loongson.cn>2023-11-18 16:14:48 +0800
commit26ab97e0da5d99c23654d90428d26e22ef976309 (patch)
tree145761ae588e291974078cf423be782d738d8b4b
parent1b12e7a07fe45570a0f846a7ad19a272062a6691 (diff)
downloadgcc-26ab97e0da5d99c23654d90428d26e22ef976309.zip
gcc-26ab97e0da5d99c23654d90428d26e22ef976309.tar.gz
gcc-26ab97e0da5d99c23654d90428d26e22ef976309.tar.bz2
LoongArch: atomic_load and atomic_store are implemented using dbar grading.
Because the la464 memory model design allows the same address load out of order, so in the following test example, the Load of 23 lines may be executed first over the load of 21 lines, resulting in an error. So when memmodel is MEMMODEL_RELAXED, the load instruction will be followed by "dbar 0x700" when implementing _atomic_load. 1 void * 2 gomp_ptrlock_get_slow (gomp_ptrlock_t *ptrlock) 3 { 4 int *intptr; 5 uintptr_t oldval = 1; 6 7 __atomic_compare_exchange_n (ptrlock, &oldval, 2, false, 8 MEMMODEL_RELAXED, MEMMODEL_RELAXED); 9 10 /* futex works on ints, not pointers. 11 But a valid work share pointer will be at least 12 8 byte aligned, so it is safe to assume the low 13 32-bits of the pointer won't contain values 1 or 2. */ 14 __asm volatile ("" : "=r" (intptr) : "0" (ptrlock)); 15 #if __BYTE_ORDER == __BIG_ENDIAN 16 if (sizeof (*ptrlock) > sizeof (int)) 17 intptr += (sizeof (*ptrlock) / sizeof (int)) - 1; 18 #endif 19 do 20 do_wait (intptr, 2); 21 while (__atomic_load_n (intptr, MEMMODEL_RELAXED) == 2); 22 __asm volatile ("" : : : "memory"); 23 return (void *) __atomic_load_n (ptrlock, MEMMODEL_ACQUIRE); 24 } gcc/ChangeLog: * config/loongarch/sync.md (atomic_load<mode>): New template.
-rw-r--r--gcc/config/loongarch/sync.md70
1 files changed, 65 insertions, 5 deletions
diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
index 66e316d..67848d7 100644
--- a/gcc/config/loongarch/sync.md
+++ b/gcc/config/loongarch/sync.md
@@ -30,6 +30,7 @@
UNSPEC_SYNC_OLD_OP
UNSPEC_SYNC_EXCHANGE
UNSPEC_ATOMIC_STORE
+ UNSPEC_ATOMIC_LOAD
UNSPEC_MEMORY_BARRIER
])
@@ -103,16 +104,75 @@
;; Atomic memory operations.
+(define_insn "atomic_load<mode>"
+ [(set (match_operand:QHWD 0 "register_operand" "=r")
+ (unspec_volatile:QHWD
+ [(match_operand:QHWD 1 "memory_operand" "+m")
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ UNSPEC_ATOMIC_LOAD))]
+ ""
+{
+ enum memmodel model = memmodel_base (INTVAL (operands[2]));
+
+ switch (model)
+ {
+ case MEMMODEL_SEQ_CST:
+ return "dbar\t0x11\\n\\t"
+ "ld.<size>\t%0,%1\\n\\t"
+ "dbar\t0x14\\n\\t";
+ case MEMMODEL_ACQUIRE:
+ return "ld.<size>\t%0,%1\\n\\t"
+ "dbar\t0x14\\n\\t";
+ case MEMMODEL_RELAXED:
+ return "ld.<size>\t%0,%1\\n\\t"
+ "dbar\t0x700\\n\\t";
+
+ default:
+ /* The valid memory order variants are __ATOMIC_RELAXED, __ATOMIC_SEQ_CST,
+ __ATOMIC_CONSUME and __ATOMIC_ACQUIRE.
+ The expand_builtin_atomic_store function converts all invalid memmodels
+ to MEMMODEL_SEQ_CST.
+
+ __atomic builtins doc: "Consume is implemented using the
+ stronger acquire memory order because of a deficiency in C++11's
+ semantics." See PR 59448 and get_memmodel in builtins.cc. */
+ gcc_unreachable ();
+ }
+}
+ [(set (attr "length") (const_int 12))])
+
;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
(define_insn "atomic_store<mode>"
- [(set (match_operand:GPR 0 "memory_operand" "+ZB")
- (unspec_volatile:GPR
- [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
+ [(set (match_operand:QHWD 0 "memory_operand" "+m")
+ (unspec_volatile:QHWD
+ [(match_operand:QHWD 1 "reg_or_0_operand" "rJ")
(match_operand:SI 2 "const_int_operand")] ;; model
UNSPEC_ATOMIC_STORE))]
""
- "amswap%A2.<amo>\t$zero,%z1,%0"
- [(set (attr "length") (const_int 8))])
+{
+ enum memmodel model = memmodel_base (INTVAL (operands[2]));
+
+ switch (model)
+ {
+ case MEMMODEL_SEQ_CST:
+ return "dbar\t0x12\\n\\t"
+ "st.<size>\t%z1,%0\\n\\t"
+ "dbar\t0x18\\n\\t";
+ case MEMMODEL_RELEASE:
+ return "dbar\t0x12\\n\\t"
+ "st.<size>\t%z1,%0\\n\\t";
+ case MEMMODEL_RELAXED:
+ return "st.<size>\t%z1,%0";
+
+ default:
+ /* The valid memory order variants are __ATOMIC_RELAXED, __ATOMIC_SEQ_CST,
+ and __ATOMIC_RELEASE.
+ The expand_builtin_atomic_store function converts all invalid memmodels
+ to MEMMODEL_SEQ_CST. */
+ gcc_unreachable ();
+ }
+}
+ [(set (attr "length") (const_int 12))])
(define_insn "atomic_<atomic_optab><mode>"
[(set (match_operand:GPR 0 "memory_operand" "+ZB")