aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/PowerPC/atomic-compare-exchange-weak.ll
blob: 65a12a6222f24a00536a2610074d06337efb1618 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; Test the generation of asm for the function:
; int foo(_Atomic int *cp, int *old, int c) {
;  return atomic_compare_exchange_weak_explicit(cp, old, c, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
; }

; RUN: llc < %s -ppc-asm-full-reg-names  -mtriple=powerpc-ibm-aix -mcpu=pwr8  -verify-machineinstrs \
; RUN:   | FileCheck %s --check-prefix=CHECK
; RUN: llc < %s -ppc-asm-full-reg-names  -mtriple=powerpc64-ibm-aix -mcpu=pwr8  -verify-machineinstrs \
; RUN:   | FileCheck %s --check-prefix=CHECK64

define i32 @foo(ptr noundef %cp, ptr noundef %old, i32 noundef %c)  {
; CHECK-LABEL: foo:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    stw r3, -4(r1)
; CHECK-NEXT:    stw r4, -8(r1)
; CHECK-NEXT:    lwz r7, 0(r4)
; CHECK-NEXT:    stw r5, -12(r1)
; CHECK-NEXT:    stw r5, -16(r1)
; CHECK-NEXT:    lwarx r6, 0, r3
; CHECK-NEXT:    cmplw r6, r7
; CHECK-NEXT:    bne cr0, L..BB0_2
; CHECK-NEXT:  # %bb.1: # %cmpxchg.fencedstore
; CHECK-NEXT:    stwcx. r5, 0, r3
; CHECK-NEXT:    beq cr0, L..BB0_5
; CHECK-NEXT:  L..BB0_2: # %cmpxchg.failure
; CHECK-NEXT:    crxor 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
; CHECK-NEXT:  # %bb.3: # %cmpxchg.store_expected
; CHECK-NEXT:    stw r6, 0(r4)
; CHECK-NEXT:  L..BB0_4: # %cmpxchg.continue
; CHECK-NEXT:    li r3, 0
; CHECK-NEXT:    li r4, 1
; CHECK-NEXT:    isel r3, r4, r3, 4*cr5+lt
; CHECK-NEXT:    stb r3, -17(r1)
; CHECK-NEXT:    blr
; CHECK-NEXT:  L..BB0_5:
; CHECK-NEXT:    creqv 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
; CHECK-NEXT:    b L..BB0_4
;
; CHECK64-LABEL: foo:
; CHECK64:       # %bb.0: # %entry
; CHECK64-NEXT:    std r3, -8(r1)
; CHECK64-NEXT:    std r4, -16(r1)
; CHECK64-NEXT:    lwz r7, 0(r4)
; CHECK64-NEXT:    stw r5, -20(r1)
; CHECK64-NEXT:    stw r5, -24(r1)
; CHECK64-NEXT:    lwarx r6, 0, r3
; CHECK64-NEXT:    cmplw r6, r7
; CHECK64-NEXT:    bne cr0, L..BB0_2
; CHECK64-NEXT:  # %bb.1: # %cmpxchg.fencedstore
; CHECK64-NEXT:    stwcx. r5, 0, r3
; CHECK64-NEXT:    beq cr0, L..BB0_5
; CHECK64-NEXT:  L..BB0_2: # %cmpxchg.failure
; CHECK64-NEXT:    crxor 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
; CHECK64-NEXT:  # %bb.3: # %cmpxchg.store_expected
; CHECK64-NEXT:    stw r6, 0(r4)
; CHECK64-NEXT:  L..BB0_4: # %cmpxchg.continue
; CHECK64-NEXT:    li r3, 0
; CHECK64-NEXT:    li r4, 1
; CHECK64-NEXT:    isel r3, r4, r3, 4*cr5+lt
; CHECK64-NEXT:    li r4, 1
; CHECK64-NEXT:    stb r3, -25(r1)
; CHECK64-NEXT:    li r3, 0
; CHECK64-NEXT:    isel r3, r4, r3, 4*cr5+lt
; CHECK64-NEXT:    blr
; CHECK64-NEXT:  L..BB0_5:
; CHECK64-NEXT:    creqv 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
; CHECK64-NEXT:    b L..BB0_4
entry:
  %cp.addr = alloca ptr, align 4
  %old.addr = alloca ptr, align 4
  %c.addr = alloca i32, align 4
  %.atomictmp = alloca i32, align 4
  %cmpxchg.bool = alloca i8, align 1
  store ptr %cp, ptr %cp.addr, align 4
  store ptr %old, ptr %old.addr, align 4
  store i32 %c, ptr %c.addr, align 4
  %0 = load ptr, ptr %cp.addr, align 4
  %1 = load ptr, ptr %old.addr, align 4
  %2 = load i32, ptr %c.addr, align 4
  store i32 %2, ptr %.atomictmp, align 4
  %3 = load i32, ptr %1, align 4
  %4 = load i32, ptr %.atomictmp, align 4
  %5 = cmpxchg weak ptr %0, i32 %3, i32 %4 monotonic monotonic, align 4
  %6 = extractvalue { i32, i1 } %5, 0
  %7 = extractvalue { i32, i1 } %5, 1
  br i1 %7, label %cmpxchg.continue, label %cmpxchg.store_expected

cmpxchg.store_expected:                           ; preds = %entry
  store i32 %6, ptr %1, align 4
  br label %cmpxchg.continue

cmpxchg.continue:                                 ; preds = %cmpxchg.store_expected, %entry
  %storedv = zext i1 %7 to i8
  store i8 %storedv, ptr %cmpxchg.bool, align 1
  %8 = load i8, ptr %cmpxchg.bool, align 1
  %loadedv = trunc i8 %8 to i1
  %conv = zext i1 %loadedv to i32
  ret i32 %conv
}