aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/packetizer.ll
blob: b9bf13886d366658acffd15bb1b526b276de9aef (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s -check-prefix=R600
; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s -check-prefix=CM

define amdgpu_kernel void @test(ptr addrspace(1) %out, i32 %x_arg, i32 %y_arg, i32 %z_arg, i32 %w_arg, i32 %e) {
; R600-LABEL: test:
; R600:       ; %bb.0: ; %entry
; R600-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; R600-NEXT:    CF_END
; R600-NEXT:    PAD
; R600-NEXT:    ALU clause starting at 4:
; R600-NEXT:     ADD_INT T0.Y, KC0[3].X, 1,
; R600-NEXT:     ADD_INT T0.Z, KC0[3].Y, 1,
; R600-NEXT:     ADD_INT T0.W, KC0[2].Z, 1,
; R600-NEXT:     ADD_INT * T1.W, KC0[2].W, 1,
; R600-NEXT:     BIT_ALIGN_INT T0.X, PS, PS, KC0[3].Z,
; R600-NEXT:     BIT_ALIGN_INT T1.Y, PV.W, PV.W, KC0[3].Z,
; R600-NEXT:     BIT_ALIGN_INT T0.Z, PV.Z, PV.Z, KC0[3].Z,
; R600-NEXT:     BIT_ALIGN_INT * T0.W, PV.Y, PV.Y, KC0[3].Z,
; R600-NEXT:     OR_INT T0.W, PV.W, PV.Z,
; R600-NEXT:     OR_INT * T1.W, PV.Y, PV.X,
; R600-NEXT:     OR_INT T0.X, PS, PV.W,
; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
;
; CM-LABEL: test:
; CM:       ; %bb.0: ; %entry
; CM-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
; CM-NEXT:    MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
; CM-NEXT:    CF_END
; CM-NEXT:    PAD
; CM-NEXT:    ALU clause starting at 4:
; CM-NEXT:     ADD_INT T0.X, KC0[3].X, 1,
; CM-NEXT:     ADD_INT T0.Y, KC0[3].Y, 1,
; CM-NEXT:     ADD_INT T0.Z, KC0[2].Z, 1,
; CM-NEXT:     ADD_INT * T0.W, KC0[2].W, 1,
; CM-NEXT:     BIT_ALIGN_INT T1.X, PV.W, PV.W, KC0[3].Z,
; CM-NEXT:     BIT_ALIGN_INT T1.Y, PV.Z, PV.Z, KC0[3].Z,
; CM-NEXT:     BIT_ALIGN_INT T0.Z, PV.Y, PV.Y, KC0[3].Z,
; CM-NEXT:     BIT_ALIGN_INT * T0.W, PV.X, PV.X, KC0[3].Z,
; CM-NEXT:     OR_INT T0.Z, PV.W, PV.Z,
; CM-NEXT:     OR_INT * T0.W, PV.Y, PV.X,
; CM-NEXT:     OR_INT * T0.X, PV.W, PV.Z,
; CM-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
; CM-NEXT:    2(2.802597e-45), 0(0.000000e+00)
entry:
  %shl = sub i32 32, %e
  %x = add i32 %x_arg, 1
  %x.0 = shl i32 %x, %shl
  %x.1 = lshr i32 %x, %e
  %x.2 = or i32 %x.0, %x.1
  %y = add i32 %y_arg, 1
  %y.0 = shl i32 %y, %shl
  %y.1 = lshr i32 %y, %e
  %y.2 = or i32 %y.0, %y.1
  %z = add i32 %z_arg, 1
  %z.0 = shl i32 %z, %shl
  %z.1 = lshr i32 %z, %e
  %z.2 = or i32 %z.0, %z.1
  %w = add i32 %w_arg, 1
  %w.0 = shl i32 %w, %shl
  %w.1 = lshr i32 %w, %e
  %w.2 = or i32 %w.0, %w.1
  %xy = or i32 %x.2, %y.2
  %zw = or i32 %z.2, %w.2
  %xyzw = or i32 %xy, %zw
  store i32 %xyzw, ptr addrspace(1) %out
  ret void
}