aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/DirectX/BufferLoad.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/DirectX/BufferLoad.ll')
-rw-r--r--llvm/test/CodeGen/DirectX/BufferLoad.ll29
1 files changed, 17 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/DirectX/BufferLoad.ll b/llvm/test/CodeGen/DirectX/BufferLoad.ll
index 7f1291b..86e2217 100644
--- a/llvm/test/CodeGen/DirectX/BufferLoad.ll
+++ b/llvm/test/CodeGen/DirectX/BufferLoad.ll
@@ -17,8 +17,9 @@ define void @loadv4f32() {
; CHECK-NOT: %dx.resource.casthandle
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load0 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 0)
+ %data0 = extractvalue {<4 x float>, i1} %load0, 0
; The extract order depends on the users, so don't enforce that here.
; CHECK-DAG: [[VAL0_0:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA0]], 0
@@ -34,8 +35,9 @@ define void @loadv4f32() {
call void @scalar_user(float %data0_2)
; CHECK: [[DATA4:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 4, i32 undef)
- %data4 = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load4 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 4)
+ %data4 = extractvalue {<4 x float>, i1} %load4, 0
; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA4]], 0
; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA4]], 1
@@ -48,8 +50,9 @@ define void @loadv4f32() {
call void @vector_user(<4 x float> %data4)
; CHECK: [[DATA12:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 12, i32 undef)
- %data12 = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load12 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 12)
+ %data12 = extractvalue {<4 x float>, i1} %load12, 0
; CHECK: [[DATA12_3:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA12]], 3
%data12_3 = extractelement <4 x float> %data12, i32 3
@@ -70,8 +73,9 @@ define void @index_dynamic(i32 %bufindex, i32 %elemindex) {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[LOAD:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 %bufindex, i32 undef)
- %load = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 %bufindex)
+ %data = extractvalue {<4 x float>, i1} %load, 0
; CHECK: [[ALLOCA:%.*]] = alloca [4 x float]
; CHECK: [[V0:%.*]] = extractvalue %dx.types.ResRet.f32 [[LOAD]], 0
@@ -89,10 +93,10 @@ define void @index_dynamic(i32 %bufindex, i32 %elemindex) {
;
; CHECK: [[PTR:%.*]] = getelementptr inbounds [4 x float], ptr [[ALLOCA]], i32 0, i32 %elemindex
; CHECK: [[X:%.*]] = load float, ptr [[PTR]]
- %data = extractelement <4 x float> %load, i32 %elemindex
+ %x = extractelement <4 x float> %data, i32 %elemindex
; CHECK: call void @scalar_user(float [[X]])
- call void @scalar_user(float %data)
+ call void @scalar_user(float %x)
ret void
}
@@ -105,8 +109,9 @@ define void @loadf32() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call float @llvm.dx.resource.load.typedbuffer(
+ %load0 = call {float, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", float, 0, 0, 0) %buffer, i32 0)
+ %data0 = extractvalue {float, i1} %load0, 0
; CHECK: [[VAL0:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA0]], 0
; CHECK: call void @scalar_user(float [[VAL0]])
@@ -123,7 +128,7 @@ define void @loadv2f32() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <2 x float> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<2 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <2 x float>, 0, 0, 0) %buffer, i32 0)
ret void
@@ -137,7 +142,7 @@ define void @loadv4f32_checkbit() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call {<4 x float>, i1} @llvm.dx.resource.loadchecked.typedbuffer.f32(
+ %data0 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer.f32(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 0)
; CHECK: [[STATUS:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA0]], 4
@@ -158,7 +163,7 @@ define void @loadv4i32() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.i32 @dx.op.bufferLoad.i32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x i32> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<4 x i32>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x i32>, 0, 0, 0) %buffer, i32 0)
ret void
@@ -172,7 +177,7 @@ define void @loadv4f16() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f16 @dx.op.bufferLoad.f16(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x half> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<4 x half>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x half>, 0, 0, 0) %buffer, i32 0)
ret void
@@ -186,7 +191,7 @@ define void @loadv4i16() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.i16 @dx.op.bufferLoad.i16(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x i16> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<4 x i16>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x i16>, 0, 0, 0) %buffer, i32 0)
ret void