aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/reorder-stores.ll
blob: a06e54e79f72aec3cfca6d5ae40f621f579aee1d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
; RUN:  llc -amdgpu-scalarize-global-loads=false  -mtriple=amdgcn < %s | FileCheck -check-prefixes=GCN,SI %s
; RUN:  llc -amdgpu-scalarize-global-loads=false  -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GCN,VI %s

; GCN-LABEL: {{^}}no_reorder_v2f64_global_load_store:
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx4
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
define amdgpu_kernel void @no_reorder_v2f64_global_load_store(ptr addrspace(1) nocapture %x, ptr addrspace(1) nocapture %y) nounwind {
  %tmp1 = load <2 x double>, ptr addrspace(1) %x, align 16
  %tmp4 = load <2 x double>, ptr addrspace(1) %y, align 16
  store <2 x double> %tmp4, ptr addrspace(1) %x, align 16
  store <2 x double> %tmp1, ptr addrspace(1) %y, align 16
  ret void
}

; GCN-LABEL: {{^}}no_reorder_scalarized_v2f64_local_load_store:
; SI: ds_read2_b64
; SI: ds_write2_b64

; VI: ds_read_b128
; VI: ds_write_b128

; GCN: s_endpgm
define amdgpu_kernel void @no_reorder_scalarized_v2f64_local_load_store(ptr addrspace(3) nocapture %x, ptr addrspace(3) nocapture %y) nounwind {
  %tmp1 = load <2 x double>, ptr addrspace(3) %x, align 16
  %tmp4 = load <2 x double>, ptr addrspace(3) %y, align 16
  store <2 x double> %tmp4, ptr addrspace(3) %x, align 16
  store <2 x double> %tmp1, ptr addrspace(3) %y, align 16
  ret void
}

; GCN-LABEL: {{^}}no_reorder_split_v8i32_global_load_store:
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx4


; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
define amdgpu_kernel void @no_reorder_split_v8i32_global_load_store(ptr addrspace(1) nocapture %x, ptr addrspace(1) nocapture %y) nounwind {
  %tmp1 = load <8 x i32>, ptr addrspace(1) %x, align 32
  %tmp4 = load <8 x i32>, ptr addrspace(1) %y, align 32
  store <8 x i32> %tmp4, ptr addrspace(1) %x, align 32
  store <8 x i32> %tmp1, ptr addrspace(1) %y, align 32
  ret void
}

; GCN-LABEL: {{^}}no_reorder_extload_64:
; GCN: ds_read_b64
; GCN: ds_read_b64
; GCN: ds_write_b64
; GCN-NOT: ds_read
; GCN: ds_write_b64
; GCN: s_endpgm
define amdgpu_kernel void @no_reorder_extload_64(ptr addrspace(3) nocapture %x, ptr addrspace(3) nocapture %y) nounwind {
  %tmp1 = load <2 x i32>, ptr addrspace(3) %x, align 8
  %tmp4 = load <2 x i32>, ptr addrspace(3) %y, align 8
  %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
  %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
  %tmp7 = add <2 x i64> %tmp1ext, <i64 1, i64 1>
  %tmp9 = add <2 x i64> %tmp4ext, <i64 1, i64 1>
  %trunctmp9 = trunc <2 x i64> %tmp9 to <2 x i32>
  %trunctmp7 = trunc <2 x i64> %tmp7 to <2 x i32>
  store <2 x i32> %trunctmp9, ptr addrspace(3) %x, align 8
  store <2 x i32> %trunctmp7, ptr addrspace(3) %y, align 8
  ret void
}