diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/bf16-math.ll | 21 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/bf16.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll | 116 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll | 1524 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll | 13 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll | 5 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll | 14 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll | 195 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll | 47 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/wqm.mir | 277 |
10 files changed, 1919 insertions, 295 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll index 3a82f84..30a7864 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll @@ -370,9 +370,6 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) { ; GCN: ; %bb.0: ; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp ; GCN-NEXT: ; return to shader part epilog - - - %exp = call bfloat @llvm.exp2.bf16(bfloat %src) %max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0) %clamp = call bfloat @llvm.minnum.bf16(bfloat %max, bfloat 1.0) @@ -384,9 +381,6 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa ; GCN: ; %bb.0: ; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp ; GCN-NEXT: ; return to shader part epilog - - - %mul = fmul <2 x bfloat> %src0, %src1 %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>) %clamp = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %max, <2 x bfloat> <bfloat 1.0, bfloat 1.0>) @@ -400,9 +394,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -415,9 +406,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -432,9 +420,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -447,9 +432,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0] ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, <bfloat 0.5, bfloat 0.5> store <2 x bfloat> %add, ptr addrspace(1) %out @@ -464,9 +446,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfl ; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm - - - %mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> %add = fadd contract <2 x bfloat> %mul, <bfloat 2.0, bfloat 200.0> store <2 x bfloat> %add, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 8f8ea13..505ddc8 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -24671,7 +24671,6 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) { ret <32 x bfloat> %op } - declare bfloat @llvm.maxnum.bf16(bfloat, bfloat) declare <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat>, <2 x bfloat>) declare <3 x bfloat> @llvm.maxnum.v3bf16(<3 x bfloat>, <3 x bfloat>) @@ -29673,7 +29672,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) { ret { bfloat, i16 } %op } - declare bfloat @llvm.log.bf16(bfloat) declare bfloat @llvm.log2.bf16(bfloat) declare bfloat @llvm.log10.bf16(bfloat) diff --git a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll index f58cb84..839d0ba 100644 --- a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll +++ b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll @@ -38,11 +38,11 @@ define hidden void @copy(ptr noundef %va) { ; CHECK-NEXT: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr ; CHECK-NEXT: %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr ; CHECK-NEXT: store ptr %va, ptr addrspace(5) %va.addr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp.ascast, ptr %va.addr.ascast, i32 8, i1 false) ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %cp, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp) ; CHECK-NEXT: ret void ; entry: @@ -51,43 +51,43 @@ entry: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr store ptr %va, ptr addrspace(5) %va.addr, align 8 - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp) call void @llvm.va_copy.p0(ptr %cp.ascast, ptr nonnull %va.addr.ascast) %0 = load ptr, ptr addrspace(5) %cp, align 8 call void @valist(ptr noundef %0) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp) ret void } -declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture) +declare void @llvm.lifetime.start.p5(ptr addrspace(5) nocapture) declare void @llvm.va_copy.p0(ptr, ptr) declare hidden void @valist(ptr noundef) -declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture) +declare void @llvm.lifetime.end.p5(ptr addrspace(5) nocapture) define hidden void @start_once(...) { ; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s = alloca ptr, align 8, addrspace(5) ; CHECK-NEXT: %s.ascast = addrspacecast ptr addrspace(5) %s to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s) ; CHECK-NEXT: store ptr %varargs, ptr %s.ascast, align 8 ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s) ; CHECK-NEXT: ret void ; entry: %s = alloca ptr, align 8, addrspace(5) %s.ascast = addrspacecast ptr addrspace(5) %s to ptr - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s) call void @llvm.va_start.p0(ptr %s.ascast) %0 = load ptr, ptr addrspace(5) %s, align 8 call void @valist(ptr noundef %0) call void @llvm.va_end.p0(ptr %s.ascast) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s) ret void } @@ -102,16 +102,16 @@ define hidden void @start_twice(...) { ; CHECK-NEXT: %s1 = alloca ptr, align 8, addrspace(5) ; CHECK-NEXT: %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr ; CHECK-NEXT: %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1) ; CHECK-NEXT: store ptr %varargs, ptr %s0.ascast, align 8 ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s0, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) ; CHECK-NEXT: store ptr %varargs, ptr %s1.ascast, align 8 ; CHECK-NEXT: %1 = load ptr, ptr addrspace(5) %s1, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0) ; CHECK-NEXT: ret void ; entry: @@ -119,8 +119,8 @@ entry: %s1 = alloca ptr, align 8, addrspace(5) %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0) - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1) call void @llvm.va_start.p0(ptr %s0.ascast) %0 = load ptr, ptr addrspace(5) %s0, align 8 call void @valist(ptr noundef %0) @@ -129,8 +129,8 @@ entry: %1 = load ptr, ptr addrspace(5) %s1, align 8 call void @valist(ptr noundef %1) call void @llvm.va_end.p0(ptr %s1.ascast) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0) ret void } @@ -138,12 +138,12 @@ define hidden void @single_i32(i32 noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -157,12 +157,12 @@ define hidden void @single_double(double noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -174,12 +174,12 @@ define hidden void @single_v4f32(<4 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -191,12 +191,12 @@ define hidden void @single_v8f32(<8 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -208,12 +208,12 @@ define hidden void @single_v16f32(<16 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -225,12 +225,12 @@ define hidden void @single_v32f32(<32 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -242,14 +242,14 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store double %y, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -261,14 +261,14 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -286,14 +286,14 @@ define hidden void @i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %y.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %y.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %y.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -317,14 +317,14 @@ define hidden void @libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -342,14 +342,14 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <4 x float> %y, ptr addrspace(5) %1, align 16 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -361,14 +361,14 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -380,14 +380,14 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <8 x float> %y, ptr addrspace(5) %1, align 32 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -399,14 +399,14 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -418,14 +418,14 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <16 x float> %y, ptr addrspace(5) %1, align 64 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -437,14 +437,14 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -456,14 +456,14 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <32 x float> %y, ptr addrspace(5) %1, align 128 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -475,14 +475,14 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -495,12 +495,12 @@ define hidden void @fptr_single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 4, addrspace(5) ; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void %0(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -520,12 +520,12 @@ define hidden void @fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i6 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void %0(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll index 462090c..0a2e7af 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll @@ -1,12 +1,46 @@ -; RUN: llc -mtriple=amdgcn -mcpu=verde < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX12 %s -; GCN-LABEL: {{^}}gs_const: -; GCN-NOT: v_cmpx -; GCN: s_mov_b64 exec, 0 define amdgpu_gs void @gs_const() { +; SI-LABEL: gs_const: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: gs_const: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], exec +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: gs_const: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], exec +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %tmp = icmp ule i32 0, 3 %tmp1 = select i1 %tmp, float 1.000000e+00, float -1.000000e+00 %c1 = fcmp oge float %tmp1, 0.0 @@ -19,12 +53,81 @@ define amdgpu_gs void @gs_const() { ret void } -; GCN-LABEL: {{^}}vcc_implicit_def: -; GCN: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}} -; GCN: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}} -; GCN: s_and{{n2|_not1}}_b64 exec, exec, vcc -; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]] define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { +; SI-LABEL: vcc_implicit_def: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; SI-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB1_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; SI-NEXT: exp mrt1 v0, v0, v0, v0 done vm +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB1_2: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: vcc_implicit_def: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: exp mrt1 v0, v0, v0, v0 done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB1_2: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: vcc_implicit_def: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: exp mrt1 v0, v0, v0, v0 done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB1_2: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: vcc_implicit_def: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_le_f32_e64 s[0:1], 0, v1 +; GFX12-NEXT: s_mov_b64 s[2:3], exec +; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, vcc +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: export mrt1 v0, v0, v0, v0 done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB1_2: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %tmp0 = fcmp olt float %arg13, 0.000000e+00 %c1 = fcmp oge float %arg14, 0.0 call void @llvm.amdgcn.kill(i1 %c1) @@ -34,31 +137,102 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { ret void } -; GCN-LABEL: {{^}}true: -; GCN-NEXT: %bb. -; GCN-NEXT: s_endpgm define amdgpu_gs void @true() { +; GCN-LABEL: true: +; GCN: ; %bb.0: +; GCN-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 true) ret void } -; GCN-LABEL: {{^}}false: -; GCN-NOT: v_cmpx -; GCN: s_mov_b64 exec, 0 define amdgpu_gs void @false() { +; SI-LABEL: false: +; SI: ; %bb.0: +; SI-NEXT: s_andn2_b64 exec, exec, exec +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: false: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_andn2_b64 exec, exec, exec +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: false: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: s_and_not1_b64 exec, exec, exec +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 false) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}and: -; GCN: v_cmp_lt_i32 -; GCN: v_cmp_lt_i32 -; GCN: s_or_b64 s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[0:1], exec, s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1] -; GCN: s_and_b64 exec, exec, s[2:3] define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) { +; SI-LABEL: and: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: and: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: and: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX11-12-NEXT: s_mov_b64 s[2:3], exec +; GFX11-12-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = icmp slt i32 %a, %b %c2 = icmp slt i32 %c, %d %x = or i1 %c1, %c2 @@ -67,13 +241,52 @@ define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) { ret void } -; GCN-LABEL: {{^}}andn2: -; GCN: v_cmp_lt_i32 -; GCN: v_cmp_lt_i32 -; GCN: s_xor_b64 s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1] -; GCN: s_and_b64 exec, exec, s[2:3] define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) { +; SI-LABEL: andn2: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: andn2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: andn2: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX11-12-NEXT: s_mov_b64 s[2:3], exec +; GFX11-12-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = icmp slt i32 %a, %b %c2 = icmp slt i32 %c, %d %x = xor i1 %c1, %c2 @@ -83,135 +296,854 @@ define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) { ret void } -; GCN-LABEL: {{^}}oeq: -; GCN: v_cmp_neq_f32 +; Should use v_cmp_neq_f32 define amdgpu_gs void @oeq(float %a) { +; SI-LABEL: oeq: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: oeq: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: oeq: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: oeq: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp oeq float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ogt: -; GCN: v_cmp_nlt_f32 +; Should use v_cmp_nlt_f32 define amdgpu_gs void @ogt(float %a) { +; SI-LABEL: ogt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ogt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ogt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ogt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ogt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}oge: -; GCN: v_cmp_nle_f32 +; Should use v_cmp_nle_f32 define amdgpu_gs void @oge(float %a) { +; SI-LABEL: oge: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: oge: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: oge: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: oge: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp oge float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}olt: -; GCN: v_cmp_ngt_f32 +; Should use v_cmp_ngt_f32 define amdgpu_gs void @olt(float %a) { +; SI-LABEL: olt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: olt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: olt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: olt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp olt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ole: -; GCN: v_cmp_nge_f32 +; Should use v_cmp_nge_f32 define amdgpu_gs void @ole(float %a) { +; SI-LABEL: ole: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ole: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ole: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ole: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ole float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}one: -; GCN: v_cmp_nlg_f32 +; Should use v_cmp_nlg_f32 define amdgpu_gs void @one(float %a) { +; SI-LABEL: one: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: one: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: one: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: one: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp one float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ord: -; GCN: v_cmp_o_f32 +; Should use v_cmp_o_f32 define amdgpu_gs void @ord(float %a) { +; SI-LABEL: ord: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ord: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: ord: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = fcmp ord float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}uno: -; GCN: v_cmp_u_f32 +; Should use v_cmp_u_f32 define amdgpu_gs void @uno(float %a) { +; SI-LABEL: uno: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: uno: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: uno: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = fcmp uno float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ueq: -; GCN: v_cmp_lg_f32 +; Should use v_cmp_lg_f32 define amdgpu_gs void @ueq(float %a) { +; SI-LABEL: ueq: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ueq: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ueq: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ueq: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ueq float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ugt: -; GCN: v_cmp_ge_f32 +; Should use v_cmp_ge_f32 define amdgpu_gs void @ugt(float %a) { +; SI-LABEL: ugt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ugt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ugt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ugt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ugt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}uge: -; GCN: v_cmp_gt_f32_e32 vcc, -1.0 +; Should use v_cmp_gt_f32_e32 vcc, -1.0 define amdgpu_gs void @uge(float %a) { +; SI-LABEL: uge: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: uge: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: uge: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: uge: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, -1.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp uge float %a, -1.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ult: -; GCN: v_cmp_le_f32_e32 vcc, -2.0 +; Should use v_cmp_le_f32_e32 vcc, -2.0 define amdgpu_gs void @ult(float %a) { +; SI-LABEL: ult: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ult: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ult: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ult: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nle_f32_e32 vcc, -2.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ult float %a, -2.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ule: -; GCN: v_cmp_lt_f32_e32 vcc, 2.0 +; Should use v_cmp_lt_f32_e32 vcc, 2.0 define amdgpu_gs void @ule(float %a) { +; SI-LABEL: ule: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ule: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ule: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ule: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc, 2.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ule float %a, 2.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}une: -; GCN: v_cmp_eq_f32_e32 vcc, 0 +; Should use v_cmp_eq_f32_e32 vcc, 0 define amdgpu_gs void @une(float %a) { +; SI-LABEL: une: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: une: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: une: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: une: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp une float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}neg_olt: -; GCN: v_cmp_gt_f32_e32 vcc, 1.0 +; Should use v_cmp_gt_f32_e32 vcc, 1.0 define amdgpu_gs void @neg_olt(float %a) { +; SI-LABEL: neg_olt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: neg_olt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: neg_olt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: neg_olt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, 1.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp olt float %a, 1.0 %c2 = xor i1 %c1, 1 call void @llvm.amdgcn.kill(i1 %c2) @@ -219,13 +1151,61 @@ define amdgpu_gs void @neg_olt(float %a) { ret void } -; GCN-LABEL: {{^}}fcmp_x2: ; FIXME: LLVM should be able to combine these fcmp opcodes. -; SI: v_cmp_lt_f32_e32 vcc, s{{[0-9]+}}, v0 -; GFX10: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 -; GCN: v_cndmask_b32 -; GCN: v_cmp_nle_f32 define amdgpu_ps void @fcmp_x2(float %a) #0 { +; SI-LABEL: fcmp_x2: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b32 s0, 0x3e800000 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB21_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB21_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: fcmp_x2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB21_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: fcmp_x2: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB21_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_x2: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[2:3] +; GFX12-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB21_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %ogt = fcmp nsz ogt float %a, 2.500000e-01 %k = select i1 %ogt, float -1.000000e+00, float 0.000000e+00 %c = fcmp nsz oge float %k, 0.000000e+00 @@ -234,14 +1214,78 @@ define amdgpu_ps void @fcmp_x2(float %a) #0 { } ; Note: an almost identical test for this exists in llvm.amdgcn.wqm.vote.ll -; GCN-LABEL: {{^}}wqm: -; GCN: v_cmp_neq_f32_e32 vcc, 0 -; GCN-DAG: s_wqm_b64 s[2:3], vcc -; GCN-DAG: s_mov_b64 s[0:1], exec -; GCN: s_and{{n2|_not1}}_b64 s[2:3], exec, s[2:3] -; GCN: s_and{{n2|_not1}}_b64 s[0:1], s[0:1], s[2:3] -; GCN: s_and_b64 exec, exec, s[0:1] define amdgpu_ps float @wqm(float %a) { +; SI-LABEL: wqm: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_wqm_b64 s[2:3], vcc +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, s[2:3] +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_cbranch_scc0 .LBB22_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: v_mov_b32_e32 v0, 0 +; SI-NEXT: s_branch .LBB22_3 +; SI-NEXT: .LBB22_2: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB22_3: +; +; GFX10-LABEL: wqm: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_wqm_b64 s[2:3], vcc +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[2:3] +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: s_branch .LBB22_3 +; GFX10-NEXT: .LBB22_2: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB22_3: +; +; GFX11-LABEL: wqm: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_mov_b64 s[0:1], exec +; GFX11-NEXT: s_wqm_b64 s[2:3], vcc +; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3] +; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_branch .LBB22_3 +; GFX11-NEXT: .LBB22_2: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB22_3: +; +; GFX12-LABEL: wqm: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_wqm_b64 s[2:3], vcc +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3] +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: v_mov_b32_e32 v0, 0 +; GFX12-NEXT: s_branch .LBB22_3 +; GFX12-NEXT: .LBB22_2: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB22_3: %c1 = fcmp une float %a, 0.0 %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) call void @llvm.amdgcn.kill(i1 %c2) @@ -249,28 +1293,212 @@ define amdgpu_ps float @wqm(float %a) { } ; This checks that we use the 64-bit encoding when the operand is a SGPR. -; GCN-LABEL: {{^}}test_sgpr: -; GCN: v_cmp_nle_f32_e64 define amdgpu_ps void @test_sgpr(float inreg %a) #0 { +; SI-LABEL: test_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB23_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB23_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB23_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB23_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_le_f32 s0, 1.0 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB23_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %c = fcmp ole float %a, 1.000000e+00 call void @llvm.amdgcn.kill(i1 %c) #1 ret void } -; GCN-LABEL: {{^}}test_non_inline_imm_sgpr: -; GCN-NOT: v_cmp_le_f32_e64 define amdgpu_ps void @test_non_inline_imm_sgpr(float inreg %a) #0 { +; SI-LABEL: test_non_inline_imm_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; SI-NEXT: v_cmp_le_f32_e32 vcc, s0, v0 +; SI-NEXT: s_andn2_b64 s[0:1], exec, vcc +; SI-NEXT: s_andn2_b64 s[2:3], exec, s[0:1] +; SI-NEXT: s_cbranch_scc0 .LBB24_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB24_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_non_inline_imm_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0 +; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[0:1] +; GFX10-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB24_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_non_inline_imm_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0 +; GFX11-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX11-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB24_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_non_inline_imm_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_le_f32 s0, 0x3fc00000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB24_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %c = fcmp ole float %a, 1.500000e+00 call void @llvm.amdgcn.kill(i1 %c) #1 ret void } -; GCN-LABEL: {{^}}test_scc_liveness: -; GCN: s_cmp -; GCN: s_and_b64 exec -; GCN: s_cmp -; GCN: s_cbranch_scc define amdgpu_ps void @test_scc_liveness() #0 { +; SI-LABEL: test_scc_liveness: +; SI: ; %bb.0: ; %main_body +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: .LBB25_1: ; %loop3 +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_cmp_gt_i32 s2, 0 +; SI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; SI-NEXT: s_andn2_b64 s[4:5], exec, s[4:5] +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5] +; SI-NEXT: s_cbranch_scc0 .LBB25_4 +; SI-NEXT: ; %bb.2: ; %loop3 +; SI-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_add_i32 s3, s2, 1 +; SI-NEXT: s_cmp_lt_i32 s2, 1 +; SI-NEXT: s_mov_b32 s2, s3 +; SI-NEXT: s_cbranch_scc1 .LBB25_1 +; SI-NEXT: ; %bb.3: ; %endloop15 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB25_4: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_scc_liveness: +; GFX10: ; %bb.0: ; %main_body +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_mov_b32 s2, 0 +; GFX10-NEXT: .LBB25_1: ; %loop3 +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_cmp_gt_i32 s2, 0 +; GFX10-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX10-NEXT: s_andn2_b64 s[4:5], exec, s[4:5] +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5] +; GFX10-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX10-NEXT: ; %bb.2: ; %loop3 +; GFX10-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_add_i32 s3, s2, 1 +; GFX10-NEXT: s_cmp_lt_i32 s2, 1 +; GFX10-NEXT: s_mov_b32 s2, s3 +; GFX10-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX10-NEXT: ; %bb.3: ; %endloop15 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB25_4: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_scc_liveness: +; GFX11: ; %bb.0: ; %main_body +; GFX11-NEXT: s_mov_b64 s[0:1], exec +; GFX11-NEXT: s_mov_b32 s2, 0 +; GFX11-NEXT: .LBB25_1: ; %loop3 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_cmp_gt_i32 s2, 0 +; GFX11-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX11-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5] +; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5] +; GFX11-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX11-NEXT: ; %bb.2: ; %loop3 +; GFX11-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX11-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_add_i32 s3, s2, 1 +; GFX11-NEXT: s_cmp_lt_i32 s2, 1 +; GFX11-NEXT: s_mov_b32 s2, s3 +; GFX11-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX11-NEXT: ; %bb.3: ; %endloop15 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB25_4: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_scc_liveness: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_mov_b32 s2, 0 +; GFX12-NEXT: .LBB25_1: ; %loop3 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_cmp_gt_i32 s2, 0 +; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5] +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5] +; GFX12-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX12-NEXT: ; %bb.2: ; %loop3 +; GFX12-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_add_co_i32 s3, s2, 1 +; GFX12-NEXT: s_cmp_lt_i32 s2, 1 +; GFX12-NEXT: s_mov_b32 s2, s3 +; GFX12-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX12-NEXT: ; %bb.3: ; %endloop15 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB25_4: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm main_body: br label %loop3 @@ -287,11 +1515,139 @@ endloop15: ; preds = %loop3 ; Check this compiles. ; If kill is marked as defining VCC then this will fail with live interval issues. -; GCN-LABEL: {{^}}kill_with_loop_exit: -; GCN: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec -; GCN: s_and{{n2|_not1}}_b64 [[LIVE]], [[LIVE]], exec -; GCN-NEXT: s_cbranch_scc0 define amdgpu_ps void @kill_with_loop_exit(float inreg %inp0, float inreg %inp1, <4 x i32> inreg %inp2, float inreg %inp3) { +; SI-LABEL: kill_with_loop_exit: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_mov_b32_e32 v0, 0x43000000 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s1, v0 +; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_and_b64 vcc, exec, s[0:1] +; SI-NEXT: v_mov_b32_e32 v0, 1.0 +; SI-NEXT: s_cbranch_vccnz .LBB26_5 +; SI-NEXT: ; %bb.1: ; %.preheader1.preheader +; SI-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; SI-NEXT: .LBB26_2: ; %bb +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_and_b64 vcc, exec, s[0:1] +; SI-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; SI-NEXT: s_cbranch_vccnz .LBB26_2 +; SI-NEXT: ; %bb.3: ; %bb33 +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; SI-NEXT: s_cbranch_scc0 .LBB26_6 +; SI-NEXT: ; %bb.4: ; %bb33 +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: .LBB26_5: ; %bb35 +; SI-NEXT: exp mrt0 v0, v0, v0, v0 done vm +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB26_6: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: kill_with_loop_exit: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0 +; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1 +; GFX10-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX10-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX10-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX10-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; GFX10-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX10-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; GFX10-NEXT: .LBB26_2: ; %bb +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX10-NEXT: ; %bb.3: ; %bb33 +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; GFX10-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX10-NEXT: ; %bb.4: ; %bb33 +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: .LBB26_5: ; %bb35 +; GFX10-NEXT: exp mrt0 v0, v0, v0, v0 done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB26_6: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: kill_with_loop_exit: +; GFX11: ; %bb.0: ; %.entry +; GFX11-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0 +; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX11-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX11-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX11-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX11-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; GFX11-NEXT: s_mov_b64 s[2:3], exec +; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX11-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; GFX11-NEXT: .LBB26_2: ; %bb +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX11-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX11-NEXT: ; %bb.3: ; %bb33 +; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec +; GFX11-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX11-NEXT: ; %bb.4: ; %bb33 +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: .LBB26_5: ; %bb35 +; GFX11-NEXT: exp mrt0 v0, v0, v0, v0 done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB26_6: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: kill_with_loop_exit: +; GFX12: ; %bb.0: ; %.entry +; GFX12-NEXT: s_cmp_lt_f32 s0, 0x43000000 +; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX12-NEXT: s_cmp_lt_f32 s1, 0x43000000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX12-NEXT: s_mov_b32 s4, 1.0 +; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX12-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX12-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX12-NEXT: s_cmp_ngt_f32 s6, 0 +; GFX12-NEXT: s_mov_b64 s[2:3], exec +; GFX12-NEXT: s_mov_b32 s4, 0x3fc00000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] +; GFX12-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 +; GFX12-NEXT: .LBB26_2: ; %bb +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_add_f32 s4, s4, 0x3e800000 +; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX12-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX12-NEXT: ; %bb.3: ; %bb33 +; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec +; GFX12-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX12-NEXT: ; %bb.4: ; %bb33 +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: .LBB26_5: ; %bb35 +; GFX12-NEXT: v_mov_b32_e32 v0, s4 +; GFX12-NEXT: export mrt0 v0, v0, v0, v0 done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB26_6: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm .entry: %tmp24 = fcmp olt float %inp0, 1.280000e+02 %tmp25 = fcmp olt float %inp1, 1.280000e+02 diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll index 91a8446..13ea8b0 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll @@ -18,10 +18,9 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a ; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_or_b32 s1, s2, s3 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_or_b32 s0, s0, s1 -; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: v_mov_b32_e32 v2, s3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0 ; GFX11-NEXT: global_store_b32 v[0:1], v2, off ; GFX11-NEXT: s_endpgm ; @@ -34,14 +33,12 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo ; GFX12-NEXT: v_readfirstlane_b32 s0, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_readfirstlane_b32 s1, v3 ; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_or_b32 s0, s0, s1 -; GFX12-NEXT: s_or_b32 s0, s2, s0 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2 ; GFX12-NEXT: global_store_b32 v[0:1], v2, off ; GFX12-NEXT: s_endpgm bb: diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll index 11cda2d..c96ba75 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll @@ -199,7 +199,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_negabsf32(bfloat %src0, bfloat %src1, ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_f32imm1(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_f32imm1: ; GFX1250: ; %bb.0: @@ -230,7 +229,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_f32imminv2pi(bfloat %src0, bfloat %src ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi: ; GFX1250: ; %bb.0: @@ -247,7 +245,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63: ; GFX1250: ; %bb.0: @@ -360,7 +357,6 @@ define float @no_mix_simple_fabs(float %src0, float %src1, float %src2) #0 { ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals(bfloat %src0, bfloat %src1, bfloat %src2) #1 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals: ; GFX1250: ; %bb.0: @@ -469,7 +465,6 @@ define float @v_mad_mix_f32_negprecvtbf16lo_bf16lo_bf16lo(i32 %src0.arg, bfloat ret float %result } - define float @v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo(i32 %src0.arg, bfloat %src1, bfloat %src2) #0 { ; GFX1250-LABEL: v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo: ; GFX1250: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll index 4393172..03304ae 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll @@ -76,9 +76,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_post_cvt(bfloat %src0, b ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - - - %src0.ext = fpext bfloat %src0 to float %src1.ext = fpext bfloat %src1 to float %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2) @@ -106,7 +103,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_pre_cvt(bfloat %src0, bf ret bfloat %cvt.result } - define <2 x bfloat> @v_mad_mix_v2f32(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32: ; GFX1250: ; %bb.0: @@ -179,7 +175,6 @@ define <4 x bfloat> @v_mad_mix_v4f32(<4 x bfloat> %src0, <4 x bfloat> %src1, <4 ret <4 x bfloat> %cvt.result } - define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32_clamp_postcvt: ; GFX1250: ; %bb.0: @@ -194,9 +189,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo ; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[4:5], v[6:7], v[0:1] ; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - - - %src0.ext = fpext <2 x bfloat> %src0 to <2 x float> %src1.ext = fpext <2 x bfloat> %src1 to <2 x float> %src2.ext = fpext <2 x bfloat> %src2 to <2 x float> @@ -207,7 +199,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo ret <2 x bfloat> %clamp } - define <3 x bfloat> @v_mad_mix_v3f32_clamp_postcvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v3f32_clamp_postcvt: ; GFX1250: ; %bb.0: @@ -252,9 +243,6 @@ define <4 x bfloat> @v_mad_mix_v4f32_clamp_postcvt(<4 x bfloat> %src0, <4 x bflo ; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp ; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - - - %src0.ext = fpext <4 x bfloat> %src0 to <4 x float> %src1.ext = fpext <4 x bfloat> %src1 to <4 x float> %src2.ext = fpext <4 x bfloat> %src2 to <4 x float> @@ -325,7 +313,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt_hi(<2 x bfloat> %src0, <2 x b ret <2 x bfloat> %insert } - define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32_clamp_precvt: ; GFX1250: ; %bb.0: @@ -353,7 +340,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloa ret <2 x bfloat> %cvt.result } - define <3 x bfloat> @v_mad_mix_v3f32_clamp_precvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v3f32_clamp_precvt: ; GFX1250: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 0b43ff2..b35a74e 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -200,8 +200,199 @@ bb: ret void } -declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1 -declare noundef i32 @llvm.amdgcn.workitem.id.x() #2 +; The inline asm requires the value be copied to an AGPR class, not +; the AV_* pseudo we usually expect for register allocator live range +; splits. +define amdgpu_kernel void @test_rewrite_mfma_direct_copy_to_agpr_class(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_rewrite_mfma_direct_copy_to_agpr_class: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; CHECK-NEXT: v_mov_b32_e32 v32, 2.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 4.0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v32, v33, a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %in, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +; TODO: Handle rewriting this case +define void @test_rewrite_mfma_imm_src2(float %arg0, float %arg1) #0 { +; CHECK-LABEL: test_rewrite_mfma_imm_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v0, v1, 2.0 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v1 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v2 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a4, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a5, v5 +; CHECK-NEXT: v_accvgpr_write_b32 a6, v6 +; CHECK-NEXT: v_accvgpr_write_b32 a7, v7 +; CHECK-NEXT: v_accvgpr_write_b32 a8, v8 +; CHECK-NEXT: v_accvgpr_write_b32 a9, v9 +; CHECK-NEXT: v_accvgpr_write_b32 a10, v10 +; CHECK-NEXT: v_accvgpr_write_b32 a11, v11 +; CHECK-NEXT: v_accvgpr_write_b32 a12, v12 +; CHECK-NEXT: v_accvgpr_write_b32 a13, v13 +; CHECK-NEXT: v_accvgpr_write_b32 a14, v14 +; CHECK-NEXT: v_accvgpr_write_b32 a15, v15 +; CHECK-NEXT: v_accvgpr_write_b32 a16, v16 +; CHECK-NEXT: v_accvgpr_write_b32 a17, v17 +; CHECK-NEXT: v_accvgpr_write_b32 a18, v18 +; CHECK-NEXT: v_accvgpr_write_b32 a19, v19 +; CHECK-NEXT: v_accvgpr_write_b32 a20, v20 +; CHECK-NEXT: v_accvgpr_write_b32 a21, v21 +; CHECK-NEXT: v_accvgpr_write_b32 a22, v22 +; CHECK-NEXT: v_accvgpr_write_b32 a23, v23 +; CHECK-NEXT: v_accvgpr_write_b32 a24, v24 +; CHECK-NEXT: v_accvgpr_write_b32 a25, v25 +; CHECK-NEXT: v_accvgpr_write_b32 a26, v26 +; CHECK-NEXT: v_accvgpr_write_b32 a27, v27 +; CHECK-NEXT: v_accvgpr_write_b32 a28, v28 +; CHECK-NEXT: v_accvgpr_write_b32 a29, v29 +; CHECK-NEXT: v_accvgpr_write_b32 a30, v30 +; CHECK-NEXT: v_accvgpr_write_b32 a31, v31 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> splat (float 2.0), i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +; TODO: Handle rewriting this case +define void @test_rewrite_mfma_subreg_extract0(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract0: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v2 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v5 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +define void @test_rewrite_mfma_subreg_extract1(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract1: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v6 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v7 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v8 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v9 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +; odd offset +define void @test_rewrite_mfma_subreg_extract2(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v5 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2 +declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2 +declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #3 attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" } attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) } diff --git a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll b/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll deleted file mode 100644 index 726e35d..0000000 --- a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll +++ /dev/null @@ -1,47 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GCN %s - -declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32) - - -define amdgpu_kernel void @test_isel_single_lane(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 { -; GCN-LABEL: test_isel_single_lane: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b32 s4, s[0:1], 0x58 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 -; GCN-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: v_readfirstlane_b32 s0, v1 -; GCN-NEXT: s_addk_co_i32 s0, 0xf4 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GCN-NEXT: s_lshl_b32 s1, s0, 4 -; GCN-NEXT: s_mul_i32 s0, s0, s1 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GCN-NEXT: s_lshl_b32 s0, s0, 12 -; GCN-NEXT: s_sub_co_i32 s0, s1, s0 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: global_store_b32 v0, v1, s[2:3] -; GCN-NEXT: s_endpgm - %gep0 = getelementptr i32, ptr addrspace(1) %in, i32 22 - %val0 = load i32, ptr addrspace(1) %gep0, align 4 - %gep1 = getelementptr i32, ptr addrspace(1) %in, i32 4 - %val1 = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr addrspace(1) %gep1, i32 %val0) - %res0 = add i32 %val1, 244 - %res1 = shl i32 %res0, 4 - %res2 = mul i32 %res0, %res1 - %res3 = shl i32 %res2, 12 - %res4 = sub i32 %res1, %res3 - store i32 %res4, ptr addrspace(1) %out - ret void -} - - -attributes #0 = { - "amdgpu-flat-work-group-size"="1,1" - "amdgpu-waves-per-eu"="1,1" - "uniform-work-group-size"="true" -} diff --git a/llvm/test/CodeGen/AMDGPU/wqm.mir b/llvm/test/CodeGen/AMDGPU/wqm.mir index 350b233..ceb1b3e 100644 --- a/llvm/test/CodeGen/AMDGPU/wqm.mir +++ b/llvm/test/CodeGen/AMDGPU/wqm.mir @@ -1,3 +1,4 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-wqm -o - %s | FileCheck %s # RUN: llc -mtriple=amdgcn -mcpu=fiji -passes=si-wqm -o - %s | FileCheck %s @@ -46,10 +47,6 @@ --- # Check for awareness that s_or_saveexec_b64 clobbers SCC -# -#CHECK: ENTER_STRICT_WWM -#CHECK: S_CMP_LT_I32 -#CHECK: S_CSELECT_B32 name: test_strict_wwm_scc alignment: 1 exposesReturnsTwice: false @@ -80,6 +77,21 @@ body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-LABEL: name: test_strict_wwm_scc + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 %3 = COPY $vgpr0 %2 = COPY $sgpr2 %1 = COPY $sgpr1 @@ -96,16 +108,35 @@ body: | --- # Second test for awareness that s_or_saveexec_b64 clobbers SCC # Because entry block is treated differently. -# -#CHECK: %bb.1 -#CHECK: S_CMP_LT_I32 -#CHECK: COPY $scc -#CHECK: ENTER_STRICT_WWM -#CHECK: $scc = COPY -#CHECK: S_CSELECT_B32 name: test_strict_wwm_scc2 tracksRegLiveness: true body: | + ; CHECK-LABEL: name: test_strict_wwm_scc2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY]], [[DEF]], 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; CHECK-NEXT: [[ENTER_STRICT_WWM1:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: $scc = COPY [[COPY4]] + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM1]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec + ; CHECK-NEXT: $vgpr1 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 @@ -130,7 +161,6 @@ body: | --- # V_SET_INACTIVE, when its second operand is undef, is replaced by a # COPY by si-wqm. Ensure the instruction is removed. -#CHECK-NOT: V_SET_INACTIVE name: no_cfg alignment: 1 exposesReturnsTwice: false @@ -167,6 +197,28 @@ body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-LABEL: name: no_cfg + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:sgpr_128 = COPY [[REG_SEQUENCE]] + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1 + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY8]], [[COPY7]], 323, 12, 15, 0, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber %15:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_dpp]], implicit $exec + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %15, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %3:sgpr_32 = COPY $sgpr3 %2:sgpr_32 = COPY $sgpr2 %1:sgpr_32 = COPY $sgpr1 @@ -189,18 +241,32 @@ body: | --- # Ensure that strict_wwm is not put around an EXEC copy -#CHECK-LABEL: name: copy_exec -#CHECK: %7:sreg_64 = COPY $exec -#CHECK-NEXT: %13:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec -#CHECK-NEXT: %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec -#CHECK-NEXT: $exec = EXIT_STRICT_WWM %13 -#CHECK-NEXT: %9:vgpr_32 = V_MBCNT_LO_U32_B32_e64 %7.sub0, 0, implicit $exec name: copy_exec tracksRegLiveness: true body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-LABEL: name: copy_exec + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: dead [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]].sub0, 0, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_MBCNT_LO_U32_B32_e64_]], 312, 15, 15, 0, implicit $exec + ; CHECK-NEXT: dead [[V_READLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READLANE_B32 [[V_MOV_B32_dpp]], 63 + ; CHECK-NEXT: early-clobber %12:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_e32_]], implicit $exec + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %12, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %3:sgpr_32 = COPY $sgpr3 %2:sgpr_32 = COPY $sgpr2 %1:sgpr_32 = COPY $sgpr1 @@ -224,20 +290,48 @@ body: | --- # Check exit of WQM is still inserted correctly when SCC is live until block end. # Critially this tests that compilation does not fail. -#CHECK-LABEL: name: scc_always_live -#CHECK: %8:vreg_128 = IMAGE_SAMPLE_V4_V2 %7 -#CHECK-NEXT: S_CMP_EQ_U32 %2, 0, implicit-def $scc -#CHECK-NEXT: undef %9.sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 -#CHECK-NEXT: %9.sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 -#CHECK-NEXT: %14:sreg_32_xm0 = COPY $scc -#CHECK-NEXT: $exec = S_AND_B64 $exec, %13, implicit-def $scc -#CHECK-NEXT: $scc = COPY %14 -#CHECK-NEXT: %10:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 -#CHECK-NEXT: %11:vreg_128 = IMAGE_SAMPLE_V4_V2 -#CHECK-NEXT: S_CBRANCH_SCC0 %bb.2 name: scc_always_live tracksRegLiveness: true body: | + ; CHECK-LABEL: name: scc_always_live + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: $m0 = COPY $sgpr1 + ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[V_INTERP_P1_F32_:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY1]], 3, 2, implicit $mode, implicit $m0, implicit $exec + ; CHECK-NEXT: [[V_INTERP_P1_F32_1:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY2]], 3, 2, implicit $mode, implicit $m0, implicit $exec + ; CHECK-NEXT: undef [[COPY4:%[0-9]+]].sub0:vreg_64 = COPY [[V_INTERP_P1_F32_]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]].sub1:vreg_64 = COPY [[V_INTERP_P1_F32_1]] + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY4]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) + ; CHECK-NEXT: S_CMP_EQ_U32 [[COPY3]], 0, implicit-def $scc + ; CHECK-NEXT: undef [[V_ADD_F32_e64_:%[0-9]+]].sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[IMAGE_SAMPLE_V4_V2_]].sub0, 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]].sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 [[V_INTERP_P1_F32_]], [[V_INTERP_P1_F32_1]], implicit $mode, implicit $exec + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc + ; CHECK-NEXT: $scc = COPY [[COPY5]] + ; CHECK-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[V_INTERP_P1_F32_]], 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_1:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_ADD_F32_e64_]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) + ; CHECK-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[V_ADD_F32_e64_1]], [[DEF1]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0 + ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1 + ; CHECK-NEXT: $vgpr2 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub0 + ; CHECK-NEXT: $vgpr3 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub1 + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1, $vgpr2, $vgpr3 bb.0: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2 @@ -281,18 +375,26 @@ body: | --- # Check that unnecessary instruction do not get marked for WWM # -#CHECK-NOT: ENTER_STRICT_WWM -#CHECK: BUFFER_LOAD_DWORDX2 -#CHECK: ENTER_STRICT_WWM -#CHECK: V_SET_INACTIVE_B32 -#CHECK: V_SET_INACTIVE_B32 -#CHECK-NOT: ENTER_STRICT_WWM -#CHECK: V_MAX name: test_wwm_set_inactive_propagation tracksRegLiveness: true body: | bb.0: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; CHECK-LABEL: name: test_wwm_set_inactive_propagation + ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64_xexec = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub0:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub1:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[V_MAX_F64_e64_:%[0-9]+]]:vreg_64 = nnan nsz arcp contract reassoc nofpexcept V_MAX_F64_e64 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub0, implicit $exec + ; CHECK-NEXT: early-clobber $vgpr1 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub1, implicit $exec + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 %1:vgpr_32 = COPY $vgpr0 %2:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN %1:vgpr_32, %0:sgpr_128, 0, 0, 0, 0, implicit $exec @@ -308,15 +410,46 @@ body: | --- # Check that WQM marking occurs correctly through phi nodes in live range graph. # If not then initial V_MOV will not be in WQM. -# -#CHECK-LABEL: name: test_wqm_lr_phi -#CHECK: COPY $exec -#CHECK-NEXT: S_WQM -#CHECK-NEXT: V_MOV_B32_e32 -10 -#CHECK-NEXT: V_MOV_B32_e32 0 name: test_wqm_lr_phi tracksRegLiveness: true body: | + ; CHECK-LABEL: name: test_wqm_lr_phi + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc + ; CHECK-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: [[S_GETPC_B64_:%[0-9]+]]:sreg_64 = S_GETPC_B64 + ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[S_GETPC_B64_]], 32, 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $vcc = V_CMP_LT_U32_e64 4, 4, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: successors: %bb.3(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4: + ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX8_IMM]], [[DEF]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7) + ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0 + ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1 + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 bb.0: undef %0.sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec %0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec @@ -345,14 +478,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_cs -#CHECK-NOT: S_WQM name: no_wqm_in_cs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_cs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -362,14 +501,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_es -#CHECK-NOT: S_WQM name: no_wqm_in_es tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_es + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -379,14 +524,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_gs -#CHECK-NOT: S_WQM name: no_wqm_in_gs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_gs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -396,14 +547,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_hs -#CHECK-NOT: S_WQM name: no_wqm_in_hs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_hs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -413,14 +570,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_ls -#CHECK-NOT: S_WQM name: no_wqm_in_ls tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_ls + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -430,14 +593,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_vs -#CHECK-NOT: S_WQM name: no_wqm_in_vs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_vs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF |