aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/iglp-no-clobber.ll
blob: f582f984a39241164338b6373feda861ef788fea (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck %s

; iglp.opt should not be flagged as clobbering the memory operand for the global_load, and we should be able to
; lower into the scalar version (i.e. should not need to lower into vector version with waterfall loop)

define amdgpu_kernel void @func(ptr addrspace(1) %in, ptr addrspace(3) %out) {
; CHECK-LABEL: func:
; CHECK:       ; %bb.0: ; %.lr.ph
; CHECK-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x0
; CHECK-NEXT:    s_mov_b64 s[8:9], 0
; CHECK-NEXT:    s_mov_b64 s[10:11], 0
; CHECK-NEXT:    s_mov_b32 s3, 32
; CHECK-NEXT:    s_mov_b32 s2, 0
; CHECK-NEXT:    s_mov_b64 s[12:13], 0
; CHECK-NEXT:  .LBB0_1: ; %loop
; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    s_mov_b64 s[0:1], s[10:11]
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    s_add_u32 s10, s6, s12
; CHECK-NEXT:    s_addc_u32 s11, s7, s13
; CHECK-NEXT:    s_load_dwordx2 s[12:13], s[8:9], 0x0
; CHECK-NEXT:    s_add_i32 s3, s3, -1
; CHECK-NEXT:    s_cmp_lg_u32 s3, 0
; CHECK-NEXT:    ; iglp_opt mask(0x00000000)
; CHECK-NEXT:    s_cbranch_scc1 .LBB0_1
; CHECK-NEXT:  ; %bb.2: ; %end
; CHECK-NEXT:    s_and_b32 s1, s1, 0xffff
; CHECK-NEXT:    s_mov_b32 s3, s2
; CHECK-NEXT:    buffer_load_dwordx2 v[0:1], off, s[0:3], 0
; CHECK-NEXT:    s_load_dword s0, s[4:5], 0x8
; CHECK-NEXT:    s_waitcnt vmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v1, 0
; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
; CHECK-NEXT:    v_mov_b32_e32 v2, s0
; CHECK-NEXT:    v_and_b32_e32 v0, 0xff, v0
; CHECK-NEXT:    ds_write_b64 v2, v[0:1]
; CHECK-NEXT:    s_endpgm
.lr.ph:
  br label %loop

loop:                                                ; preds = %1, %.lr.ph
  %addr = phi ptr addrspace(1) [ null, %.lr.ph ], [ %gep, %loop ]
  %offset = phi i64 [ 0, %.lr.ph ], [ %nextOff, %loop ]
  %inc = phi i32 [0, %.lr.ph], [ %incCond, %loop ]
  %rsrc = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %addr, i16 0, i32 0, i32 0)
  %load = tail call <2 x i32> @llvm.amdgcn.raw.ptr.buffer.load.v2i32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
  %load.bc = bitcast <2 x i32> %load to <8 x i8>
  %load.elem = extractelement <8 x i8> %load.bc, i64 0
  tail call void @llvm.amdgcn.iglp.opt(i32 0)
  %vec = insertelement <4 x i8> zeroinitializer, i8 %load.elem, i64 0
  %vec.bc = bitcast <4 x i8> %vec to <2 x half>
  %shuff = shufflevector <2 x half> %vec.bc, <2 x half> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  %gep = getelementptr i8, ptr addrspace(1) %in, i64 %offset
  %unmaskedload49 = load <1 x i64>, ptr addrspace(1) null, align 8
  %nextOff = extractelement <1 x i64> %unmaskedload49, i64 0
  %incCond = add i32 %inc, 1
  %cond = icmp eq i32 %incCond, 32
  br i1 %cond, label %end, label %loop

end:
  store <4 x half> %shuff, ptr addrspace(3) %out, align 8
  ret void
}

declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) readnone, i16, i32, i32) #0

declare <2 x i32> @llvm.amdgcn.raw.ptr.buffer.load.v2i32(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) #1