aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/udiv.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
commit3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch)
treed74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/AMDGPU/udiv.ll
parentf6021ecddc73d14c94ad70938250d58f330795be (diff)
downloadllvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip
llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz
llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.bz2
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated the same as compute kernels. Make this explicit so the default calling convention can be changed to a non-kernel. Converted with perl -pi -e 's/define void/define amdgpu_kernel void/' on the relevant test directories (and undoing in one place that actually wanted a non-kernel). llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/udiv.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv.ll28
1 files changed, 14 insertions, 14 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index ed791bc..2874a0cd 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -10,7 +10,7 @@
; EG: CF_END
; SI: v_rcp_iflag_f32_e32
-define void @udiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -21,7 +21,7 @@ define void @udiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}s_udiv_i32:
; SI: v_rcp_iflag_f32_e32
-define void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = udiv i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
@@ -38,7 +38,7 @@ define void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
; SI: v_rcp_iflag_f32_e32
; SI: v_rcp_iflag_f32_e32
; SI: s_endpgm
-define void @udiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
%b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
@@ -50,7 +50,7 @@ define void @udiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
; FUNC-LABEL: {{^}}udiv_v4i32:
; EG: CF_END
; SI: s_endpgm
-define void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
%b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
@@ -63,7 +63,7 @@ define void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
; SI: buffer_load_dword [[VAL:v[0-9]+]]
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 4, [[VAL]]
; SI: buffer_store_dword [[RESULT]]
-define void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%result = udiv i32 %a, 16
@@ -77,7 +77,7 @@ define void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_mul_hi_u32 [[MULHI:v[0-9]+]], [[K]], [[VAL]]
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 25, [[MULHI]]
; SI: buffer_store_dword [[RESULT]]
-define void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%result = udiv i32 %a, 34259182
@@ -91,7 +91,7 @@ define void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; SI: v_mul_hi_u32 [[MULHI:v[0-9]+]], [[K]], [[VAL]]
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 24, [[MULHI]]
; SI: buffer_store_dword [[RESULT]]
-define void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%result = udiv i32 %a, 34259183
@@ -103,7 +103,7 @@ define void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0xff, v{{[0-9]+}}
; SI: buffer_store_dword [[TRUNC]]
-define void @v_udiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -117,7 +117,7 @@ define void @v_udiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0xffff, v{{[0-9]+}}
; SI: buffer_store_dword [[TRUNC]]
-define void @v_udiv_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16, i16 addrspace(1) * %in
%den = load i16, i16 addrspace(1) * %den_ptr
@@ -131,7 +131,7 @@ define void @v_udiv_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0x7fffff, v{{[0-9]+}}
; SI: buffer_store_dword [[TRUNC]]
-define void @v_udiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
%den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1
%num = load i23, i23 addrspace(1) * %in
%den = load i23, i23 addrspace(1) * %den_ptr
@@ -143,7 +143,7 @@ define void @v_udiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}v_udiv_i24:
; SI-NOT: v_rcp_f32
-define void @v_udiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
%den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1
%num = load i24, i24 addrspace(1) * %in
%den = load i24, i24 addrspace(1) * %den_ptr
@@ -159,7 +159,7 @@ define void @v_udiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
; SI: v_mul_hi_u32
; SI: v_mul_hi_u32
-define void @scalarize_mulhu_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
+define amdgpu_kernel void @scalarize_mulhu_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
%1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
%2 = udiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668>
store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16
@@ -168,7 +168,7 @@ define void @scalarize_mulhu_4xi32(<4 x i32> addrspace(1)* nocapture readonly %i
; FUNC-LABEL: {{^}}test_udiv2:
; SI: s_lshr_b32 s{{[0-9]}}, s{{[0-9]}}, 1
-define void @test_udiv2(i32 %p) {
+define amdgpu_kernel void @test_udiv2(i32 %p) {
%i = udiv i32 %p, 2
store volatile i32 %i, i32 addrspace(1)* undef
ret void
@@ -178,7 +178,7 @@ define void @test_udiv2(i32 %p) {
; SI: v_mov_b32_e32 v{{[0-9]+}}, 0xaaaaaaab
; SI: v_mul_hi_u32 v0, {{v[0-9]+}}, {{s[0-9]+}}
; SI-NEXT: v_lshrrev_b32_e32 v0, 1, v0
-define void @test_udiv_3_mulhu(i32 %p) {
+define amdgpu_kernel void @test_udiv_3_mulhu(i32 %p) {
%i = udiv i32 %p, 3
store volatile i32 %i, i32 addrspace(1)* undef
ret void