diff options
Diffstat (limited to 'llvm/test/CodeGen/SPARC')
-rw-r--r-- | llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll | 24 | ||||
-rw-r--r-- | llvm/test/CodeGen/SPARC/atomics-ordering.ll | 446 |
2 files changed, 458 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll index 380a4a0..d1f1c46 100644 --- a/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll +++ b/llvm/test/CodeGen/SPARC/atomicrmw-uinc-udec-wrap.ll @@ -5,7 +5,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) { ; CHECK-LABEL: atomicrmw_uinc_wrap_i8: ; CHECK: .cfi_startproc ; CHECK-NEXT: ! %bb.0: -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadStore | #StoreStore ; CHECK-NEXT: and %o0, -4, %o2 ; CHECK-NEXT: mov 3, %o3 ; CHECK-NEXT: andn %o3, %o0, %o0 @@ -36,7 +36,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) { ; CHECK-NEXT: nop ; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end ; CHECK-NEXT: srl %o4, %o0, %o0 -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadLoad | #LoadStore ; CHECK-NEXT: retl ; CHECK-NEXT: nop %result = atomicrmw uinc_wrap ptr %ptr, i8 %val seq_cst @@ -47,7 +47,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) { ; CHECK-LABEL: atomicrmw_uinc_wrap_i16: ; CHECK: .cfi_startproc ; CHECK-NEXT: ! %bb.0: -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadStore | #StoreStore ; CHECK-NEXT: and %o0, -4, %o2 ; CHECK-NEXT: and %o0, 3, %o0 ; CHECK-NEXT: xor %o0, 2, %o0 @@ -79,7 +79,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) { ; CHECK-NEXT: nop ; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end ; CHECK-NEXT: srl %o5, %o0, %o0 -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadLoad | #LoadStore ; CHECK-NEXT: retl ; CHECK-NEXT: nop %result = atomicrmw uinc_wrap ptr %ptr, i16 %val seq_cst @@ -90,7 +90,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) { ; CHECK-LABEL: atomicrmw_uinc_wrap_i32: ; CHECK: .cfi_startproc ; CHECK-NEXT: ! %bb.0: -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadStore | #StoreStore ; CHECK-NEXT: ld [%o0], %o2 ; CHECK-NEXT: .LBB2_1: ! %atomicrmw.start ; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1 @@ -106,7 +106,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) { ; CHECK-NEXT: bne %icc, .LBB2_1 ; CHECK-NEXT: nop ; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadLoad | #LoadStore ; CHECK-NEXT: retl ; CHECK-NEXT: mov %o2, %o0 %result = atomicrmw uinc_wrap ptr %ptr, i32 %val seq_cst @@ -160,7 +160,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) { ; CHECK-LABEL: atomicrmw_udec_wrap_i8: ; CHECK: .cfi_startproc ; CHECK-NEXT: ! %bb.0: -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadStore | #StoreStore ; CHECK-NEXT: and %o0, -4, %o2 ; CHECK-NEXT: mov 3, %o3 ; CHECK-NEXT: andn %o3, %o0, %o0 @@ -193,7 +193,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) { ; CHECK-NEXT: nop ; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end ; CHECK-NEXT: srl %o5, %o0, %o0 -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadLoad | #LoadStore ; CHECK-NEXT: retl ; CHECK-NEXT: nop %result = atomicrmw udec_wrap ptr %ptr, i8 %val seq_cst @@ -204,7 +204,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) { ; CHECK-LABEL: atomicrmw_udec_wrap_i16: ; CHECK: .cfi_startproc ; CHECK-NEXT: ! %bb.0: -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadStore | #StoreStore ; CHECK-NEXT: and %o0, -4, %o2 ; CHECK-NEXT: and %o0, 3, %o0 ; CHECK-NEXT: xor %o0, 2, %o0 @@ -238,7 +238,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) { ; CHECK-NEXT: nop ; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end ; CHECK-NEXT: srl %g2, %o0, %o0 -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadLoad | #LoadStore ; CHECK-NEXT: retl ; CHECK-NEXT: nop %result = atomicrmw udec_wrap ptr %ptr, i16 %val seq_cst @@ -249,7 +249,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) { ; CHECK-LABEL: atomicrmw_udec_wrap_i32: ; CHECK: .cfi_startproc ; CHECK-NEXT: ! %bb.0: -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadStore | #StoreStore ; CHECK-NEXT: ld [%o0], %o2 ; CHECK-NEXT: .LBB6_1: ! %atomicrmw.start ; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1 @@ -267,7 +267,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) { ; CHECK-NEXT: bne %icc, .LBB6_1 ; CHECK-NEXT: nop ; CHECK-NEXT: ! %bb.2: ! %atomicrmw.end -; CHECK-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; CHECK-NEXT: membar #LoadLoad | #LoadStore ; CHECK-NEXT: retl ; CHECK-NEXT: mov %o2, %o0 %result = atomicrmw udec_wrap ptr %ptr, i32 %val seq_cst diff --git a/llvm/test/CodeGen/SPARC/atomics-ordering.ll b/llvm/test/CodeGen/SPARC/atomics-ordering.ll new file mode 100644 index 0000000..7c13ac2 --- /dev/null +++ b/llvm/test/CodeGen/SPARC/atomics-ordering.ll @@ -0,0 +1,446 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=sparc -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC32 +; RUN: llc < %s -mtriple=sparc -mcpu=leon4 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC32-LEON4 +; RUN: llc < %s -mtriple=sparc -mcpu=v9 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC32-V9 +; RUN: llc < %s -mtriple=sparcv9 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC64 + +define i32 @load_acq(ptr %0) nounwind { +; SPARC32-LABEL: load_acq: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_load_4 +; SPARC32-NEXT: mov 2, %o1 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore %g0, %o0, %o0 +; +; SPARC32-LEON4-LABEL: load_acq: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: ld [%o0], %o0 +; +; SPARC32-V9-LABEL: load_acq: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: ld [%o0], %o0 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: nop +; +; SPARC64-LABEL: load_acq: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: ld [%o0], %o0 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: nop + %2 = load atomic i32, ptr %0 acquire, align 4 + ret i32 %2 +} + +define i32 @load_sc(ptr %0) nounwind { +; SPARC32-LABEL: load_sc: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_load_4 +; SPARC32-NEXT: mov 5, %o1 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore %g0, %o0, %o0 +; +; SPARC32-LEON4-LABEL: load_sc: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: ld [%o0], %o0 +; +; SPARC32-V9-LABEL: load_sc: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: ld [%o0], %o0 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: nop +; +; SPARC64-LABEL: load_sc: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: ld [%o0], %o0 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: nop + %2 = load atomic i32, ptr %0 seq_cst, align 4 + ret i32 %2 +} + +define void @store_rel(ptr %0, i32 %1) nounwind { +; SPARC32-LABEL: store_rel: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_store_4 +; SPARC32-NEXT: mov 3, %o2 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC32-LEON4-LABEL: store_rel: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: st %o1, [%o0] +; +; SPARC32-V9-LABEL: store_rel: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: st %o1, [%o0] +; +; SPARC64-LABEL: store_rel: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: st %o1, [%o0] + store atomic i32 %1, ptr %0 release, align 4 + ret void +} + +define void @store_sc(ptr %0, i32 %1) nounwind { +; SPARC32-LABEL: store_sc: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_store_4 +; SPARC32-NEXT: mov 5, %o2 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC32-LEON4-LABEL: store_sc: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: st %o1, [%o0] +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: ldstub [%sp+-1], %g0 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: nop +; +; SPARC32-V9-LABEL: store_sc: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: st %o1, [%o0] +; SPARC32-V9-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: nop +; +; SPARC64-LABEL: store_sc: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: st %o1, [%o0] +; SPARC64-NEXT: membar #LoadLoad | #StoreLoad | #LoadStore | #StoreStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: nop + store atomic i32 %1, ptr %0 seq_cst, align 4 + ret void +} + +define i32 @rmw_acq(ptr %0, i32 %1) nounwind { +; SPARC32-LABEL: rmw_acq: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_exchange_4 +; SPARC32-NEXT: mov 2, %o2 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore %g0, %o0, %o0 +; +; SPARC32-LEON4-LABEL: rmw_acq: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: swap [%o0], %o1 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o1, %o0 +; +; SPARC32-V9-LABEL: rmw_acq: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: swap [%o0], %o1 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o1, %o0 +; +; SPARC64-LABEL: rmw_acq: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: swap [%o0], %o1 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o1, %o0 + %3 = atomicrmw xchg ptr %0, i32 %1 acquire, align 4 + ret i32 %3 +} + +define i32 @rmw_rel(ptr %0, i32 %1) nounwind { +; SPARC32-LABEL: rmw_rel: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_exchange_4 +; SPARC32-NEXT: mov 3, %o2 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore %g0, %o0, %o0 +; +; SPARC32-LEON4-LABEL: rmw_rel: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: swap [%o0], %o1 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o1, %o0 +; +; SPARC32-V9-LABEL: rmw_rel: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: swap [%o0], %o1 +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o1, %o0 +; +; SPARC64-LABEL: rmw_rel: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: swap [%o0], %o1 +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o1, %o0 + %3 = atomicrmw xchg ptr %0, i32 %1 release, align 4 + ret i32 %3 +} + +define i32 @rmw_acq_rel(ptr %0, i32 %1) nounwind { +; SPARC32-LABEL: rmw_acq_rel: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_exchange_4 +; SPARC32-NEXT: mov 4, %o2 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore %g0, %o0, %o0 +; +; SPARC32-LEON4-LABEL: rmw_acq_rel: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: swap [%o0], %o1 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o1, %o0 +; +; SPARC32-V9-LABEL: rmw_acq_rel: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: swap [%o0], %o1 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o1, %o0 +; +; SPARC64-LABEL: rmw_acq_rel: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: swap [%o0], %o1 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o1, %o0 + %3 = atomicrmw xchg ptr %0, i32 %1 acq_rel, align 4 + ret i32 %3 +} + +define i32 @rmw_sc(ptr %0, i32 %1) nounwind { +; SPARC32-LABEL: rmw_sc: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i1, %o1 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: call __atomic_exchange_4 +; SPARC32-NEXT: mov 5, %o2 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore %g0, %o0, %o0 +; +; SPARC32-LEON4-LABEL: rmw_sc: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: swap [%o0], %o1 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o1, %o0 +; +; SPARC32-V9-LABEL: rmw_sc: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: swap [%o0], %o1 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o1, %o0 +; +; SPARC64-LABEL: rmw_sc: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: swap [%o0], %o1 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o1, %o0 + %3 = atomicrmw xchg ptr %0, i32 %1 seq_cst, align 4 + ret i32 %3 +} + +define i32 @cas_acq(ptr %0, i32 %1, i32 %2) nounwind { +; SPARC32-LABEL: cas_acq: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i2, %o2 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: st %i1, [%fp+-4] +; SPARC32-NEXT: add %fp, -4, %o1 +; SPARC32-NEXT: mov 2, %o3 +; SPARC32-NEXT: call __atomic_compare_exchange_4 +; SPARC32-NEXT: mov %o3, %o4 +; SPARC32-NEXT: ld [%fp+-4], %i0 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC32-LEON4-LABEL: cas_acq: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: casa [%o0] 10, %o1, %o2 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o2, %o0 +; +; SPARC32-V9-LABEL: cas_acq: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: cas [%o0], %o1, %o2 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o2, %o0 +; +; SPARC64-LABEL: cas_acq: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: cas [%o0], %o1, %o2 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o2, %o0 + %4 = cmpxchg ptr %0, i32 %1, i32 %2 acquire acquire, align 4 + %5 = extractvalue { i32, i1 } %4, 0 + ret i32 %5 +} + +define i32 @cas_rel(ptr %0, i32 %1, i32 %2) nounwind { +; SPARC32-LABEL: cas_rel: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i2, %o2 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: st %i1, [%fp+-4] +; SPARC32-NEXT: add %fp, -4, %o1 +; SPARC32-NEXT: mov 3, %o3 +; SPARC32-NEXT: call __atomic_compare_exchange_4 +; SPARC32-NEXT: mov %g0, %o4 +; SPARC32-NEXT: ld [%fp+-4], %i0 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC32-LEON4-LABEL: cas_rel: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: casa [%o0] 10, %o1, %o2 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o2, %o0 +; +; SPARC32-V9-LABEL: cas_rel: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: cas [%o0], %o1, %o2 +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o2, %o0 +; +; SPARC64-LABEL: cas_rel: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: cas [%o0], %o1, %o2 +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o2, %o0 + %4 = cmpxchg ptr %0, i32 %1, i32 %2 release monotonic, align 4 + %5 = extractvalue { i32, i1 } %4, 0 + ret i32 %5 +} + +define i32 @cas_acq_rel(ptr %0, i32 %1, i32 %2) nounwind { +; SPARC32-LABEL: cas_acq_rel: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i2, %o2 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: st %i1, [%fp+-4] +; SPARC32-NEXT: add %fp, -4, %o1 +; SPARC32-NEXT: mov 4, %o3 +; SPARC32-NEXT: call __atomic_compare_exchange_4 +; SPARC32-NEXT: mov 2, %o4 +; SPARC32-NEXT: ld [%fp+-4], %i0 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC32-LEON4-LABEL: cas_acq_rel: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: casa [%o0] 10, %o1, %o2 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o2, %o0 +; +; SPARC32-V9-LABEL: cas_acq_rel: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: cas [%o0], %o1, %o2 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o2, %o0 +; +; SPARC64-LABEL: cas_acq_rel: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: cas [%o0], %o1, %o2 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o2, %o0 + %4 = cmpxchg ptr %0, i32 %1, i32 %2 acq_rel acquire, align 4 + %5 = extractvalue { i32, i1 } %4, 0 + ret i32 %5 +} + +define i32 @cas_sc(ptr %0, i32 %1, i32 %2) nounwind { +; SPARC32-LABEL: cas_sc: +; SPARC32: ! %bb.0: +; SPARC32-NEXT: save %sp, -96, %sp +; SPARC32-NEXT: mov %i2, %o2 +; SPARC32-NEXT: mov %i0, %o0 +; SPARC32-NEXT: st %i1, [%fp+-4] +; SPARC32-NEXT: add %fp, -4, %o1 +; SPARC32-NEXT: mov 5, %o3 +; SPARC32-NEXT: call __atomic_compare_exchange_4 +; SPARC32-NEXT: mov %o3, %o4 +; SPARC32-NEXT: ld [%fp+-4], %i0 +; SPARC32-NEXT: ret +; SPARC32-NEXT: restore +; +; SPARC32-LEON4-LABEL: cas_sc: +; SPARC32-LEON4: ! %bb.0: +; SPARC32-LEON4-NEXT: stbar +; SPARC32-LEON4-NEXT: casa [%o0] 10, %o1, %o2 +; SPARC32-LEON4-NEXT: retl +; SPARC32-LEON4-NEXT: mov %o2, %o0 +; +; SPARC32-V9-LABEL: cas_sc: +; SPARC32-V9: ! %bb.0: +; SPARC32-V9-NEXT: membar #LoadStore | #StoreStore +; SPARC32-V9-NEXT: cas [%o0], %o1, %o2 +; SPARC32-V9-NEXT: membar #LoadLoad | #LoadStore +; SPARC32-V9-NEXT: retl +; SPARC32-V9-NEXT: mov %o2, %o0 +; +; SPARC64-LABEL: cas_sc: +; SPARC64: ! %bb.0: +; SPARC64-NEXT: membar #LoadStore | #StoreStore +; SPARC64-NEXT: cas [%o0], %o1, %o2 +; SPARC64-NEXT: membar #LoadLoad | #LoadStore +; SPARC64-NEXT: retl +; SPARC64-NEXT: mov %o2, %o0 + %4 = cmpxchg ptr %0, i32 %1, i32 %2 seq_cst seq_cst, align 4 + %5 = extractvalue { i32, i1 } %4, 0 + ret i32 %5 +} |