aboutsummaryrefslogtreecommitdiff
path: root/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip
blob: 2342fcefb5f894ad722c72fb8428f57972b6442b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu verde -emit-llvm -disable-llvm-optzns -fcuda-is-device -o - %s | FileCheck %s

#define __device__ __attribute__((device))

// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z31test_amdgcn_make_buffer_rsrc_p0Pvsii(
// CHECK-SAME: ptr noundef [[P:%.*]], i16 noundef signext [[STRIDE:%.*]], i32 noundef [[NUM:%.*]], i32 noundef [[FLAGS:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT:  [[ENTRY:.*:]]
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
// CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT:    [[STRIDE_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
// CHECK-NEXT:    [[NUM_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT:    [[FLAGS_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
// CHECK-NEXT:    [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
// CHECK-NEXT:    [[STRIDE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRIDE_ADDR]] to ptr
// CHECK-NEXT:    [[NUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[NUM_ADDR]] to ptr
// CHECK-NEXT:    [[FLAGS_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[FLAGS_ADDR]] to ptr
// CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    store i16 [[STRIDE]], ptr [[STRIDE_ADDR_ASCAST]], align 2
// CHECK-NEXT:    store i32 [[NUM]], ptr [[NUM_ADDR_ASCAST]], align 4
// CHECK-NEXT:    store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP4:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]])
// CHECK-NEXT:    ret ptr addrspace(8) [[TMP4]]
//
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, int num, int flags) {
  return __builtin_amdgcn_make_buffer_rsrc(p, stride, num, flags);
}

// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z47test_amdgcn_make_buffer_rsrc_p0_stride_constantPvii(
// CHECK-SAME: ptr noundef [[P:%.*]], i32 noundef [[NUM:%.*]], i32 noundef [[FLAGS:%.*]]) #[[ATTR0]] {
// CHECK-NEXT:  [[ENTRY:.*:]]
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
// CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT:    [[NUM_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT:    [[FLAGS_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
// CHECK-NEXT:    [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
// CHECK-NEXT:    [[NUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[NUM_ADDR]] to ptr
// CHECK-NEXT:    [[FLAGS_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[FLAGS_ADDR]] to ptr
// CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    store i32 [[NUM]], ptr [[NUM_ADDR_ASCAST]], align 4
// CHECK-NEXT:    store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 [[TMP2]])
// CHECK-NEXT:    ret ptr addrspace(8) [[TMP3]]
//
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) {
  return __builtin_amdgcn_make_buffer_rsrc(p, /*stride=*/4, num, flags);
}

// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z44test_amdgcn_make_buffer_rsrc_p0_num_constantPvsi(
// CHECK-SAME: ptr noundef [[P:%.*]], i16 noundef signext [[STRIDE:%.*]], i32 noundef [[FLAGS:%.*]]) #[[ATTR0]] {
// CHECK-NEXT:  [[ENTRY:.*:]]
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
// CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT:    [[STRIDE_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
// CHECK-NEXT:    [[FLAGS_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
// CHECK-NEXT:    [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
// CHECK-NEXT:    [[STRIDE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRIDE_ADDR]] to ptr
// CHECK-NEXT:    [[FLAGS_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[FLAGS_ADDR]] to ptr
// CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    store i16 [[STRIDE]], ptr [[STRIDE_ADDR_ASCAST]], align 2
// CHECK-NEXT:    store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 [[TMP2]])
// CHECK-NEXT:    ret ptr addrspace(8) [[TMP3]]
//
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) {
  return __builtin_amdgcn_make_buffer_rsrc(p, stride, /*num=*/1234, flags);
}

// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z46test_amdgcn_make_buffer_rsrc_p0_flags_constantPvsi(
// CHECK-SAME: ptr noundef [[P:%.*]], i16 noundef signext [[STRIDE:%.*]], i32 noundef [[NUM:%.*]]) #[[ATTR0]] {
// CHECK-NEXT:  [[ENTRY:.*:]]
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
// CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT:    [[STRIDE_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
// CHECK-NEXT:    [[NUM_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
// CHECK-NEXT:    [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
// CHECK-NEXT:    [[STRIDE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRIDE_ADDR]] to ptr
// CHECK-NEXT:    [[NUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[NUM_ADDR]] to ptr
// CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    store i16 [[STRIDE]], ptr [[STRIDE_ADDR_ASCAST]], align 2
// CHECK-NEXT:    store i32 [[NUM]], ptr [[NUM_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 5678)
// CHECK-NEXT:    ret ptr addrspace(8) [[TMP3]]
//
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) {
  return __builtin_amdgcn_make_buffer_rsrc(p, stride, num, /*flags=*/5678);
}