aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/gep-const-address-space.ll
blob: b24ebbd9435cf2d0fbd03e44c5fba0d458e14523 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck %s

define protected amdgpu_kernel void @IllegalGEPConst(i32 %a, ptr addrspace(1) %b, double %c) {
; CHECK-LABEL: IllegalGEPConst:
; CHECK:       ; %bb.0: ; %entry
; CHECK-NEXT:    s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; CHECK-NEXT:    s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24
; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c
; CHECK-NEXT:    s_mov_b32 s14, -1
; CHECK-NEXT:    s_mov_b32 s15, 0xe00000
; CHECK-NEXT:    s_add_u32 s12, s12, s11
; CHECK-NEXT:    s_addc_u32 s13, s13, 0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    s_ashr_i32 s7, s6, 31
; CHECK-NEXT:    s_lshl_b64 s[6:7], s[6:7], 3
; CHECK-NEXT:    s_add_u32 s0, s0, s6
; CHECK-NEXT:    s_addc_u32 s1, s1, s7
; CHECK-NEXT:    s_add_u32 s0, s0, -8
; CHECK-NEXT:    s_mov_b64 s[4:5], src_shared_base
; CHECK-NEXT:    s_addc_u32 s1, s1, -1
; CHECK-NEXT:    s_cmp_eq_u32 s1, s5
; CHECK-NEXT:    s_cselect_b64 s[4:5], -1, 0
; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
; CHECK-NEXT:    s_mov_b64 s[4:5], -1
; CHECK-NEXT:    s_cbranch_vccnz .LBB0_3
; CHECK-NEXT:  ; %bb.1: ; %Flow6
; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
; CHECK-NEXT:    s_cbranch_vccz .LBB0_8
; CHECK-NEXT:  .LBB0_2: ; %atomicrmw.phi
; CHECK-NEXT:    s_endpgm
; CHECK-NEXT:  .LBB0_3: ; %atomicrmw.check.private
; CHECK-NEXT:    s_mov_b64 s[4:5], src_private_base
; CHECK-NEXT:    s_cmp_eq_u32 s1, s5
; CHECK-NEXT:    s_cselect_b64 s[4:5], -1, 0
; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
; CHECK-NEXT:    s_mov_b64 s[4:5], -1
; CHECK-NEXT:    s_cbranch_vccz .LBB0_5
; CHECK-NEXT:  ; %bb.4: ; %atomicrmw.global
; CHECK-NEXT:    v_mov_b32_e32 v2, 0
; CHECK-NEXT:    v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
; CHECK-NEXT:    global_atomic_add_f64 v2, v[0:1], s[0:1]
; CHECK-NEXT:    s_waitcnt vmcnt(0)
; CHECK-NEXT:    buffer_wbinvl1_vol
; CHECK-NEXT:    s_mov_b64 s[4:5], 0
; CHECK-NEXT:  .LBB0_5: ; %Flow
; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
; CHECK-NEXT:    s_cbranch_vccnz .LBB0_7
; CHECK-NEXT:  ; %bb.6: ; %atomicrmw.private
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cselect_b32 s4, s0, -1
; CHECK-NEXT:    v_mov_b32_e32 v2, s4
; CHECK-NEXT:    buffer_load_dword v0, v2, s[12:15], 0 offen
; CHECK-NEXT:    buffer_load_dword v1, v2, s[12:15], 0 offen offset:4
; CHECK-NEXT:    s_waitcnt vmcnt(0)
; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[2:3]
; CHECK-NEXT:    buffer_store_dword v0, v2, s[12:15], 0 offen
; CHECK-NEXT:    buffer_store_dword v1, v2, s[12:15], 0 offen offset:4
; CHECK-NEXT:  .LBB0_7: ; %Flow5
; CHECK-NEXT:    s_cbranch_execnz .LBB0_2
; CHECK-NEXT:  .LBB0_8: ; %atomicrmw.shared
; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT:    s_cselect_b32 s0, s0, -1
; CHECK-NEXT:    v_mov_b32_e32 v2, s0
; CHECK-NEXT:    v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
; CHECK-NEXT:    ds_add_f64 v2, v[0:1]
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    s_endpgm
entry:
  %i = add nsw i32 %a, -1
  %i.2 = sext i32 %i to i64
  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
  %i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr
  %i.5 = atomicrmw fadd ptr %i.4, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0
  ret void
}

!0 = !{}