diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/limit-coalesce.mir | 33 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll | 92 |
2 files changed, 110 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir index ca77482..fa52b96 100644 --- a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir +++ b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir @@ -1,19 +1,9 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 # RUN: llc -mtriple=amdgcn -run-pass register-coalescer -o - %s | FileCheck %s -# Check that coalescer does not create wider register tuple than in source - -# CHECK: - { id: 2, class: vreg_64, preferred-register: '', flags: [ ] } -# CHECK: - { id: 3, class: vreg_64, preferred-register: '', flags: [ ] } -# CHECK: - { id: 4, class: vreg_64, preferred-register: '', flags: [ ] } -# CHECK: - { id: 5, class: vreg_96, preferred-register: '', flags: [ ] } -# CHECK: - { id: 6, class: vreg_96, preferred-register: '', flags: [ ] } -# CHECK: - { id: 7, class: vreg_128, preferred-register: '', flags: [ ] } -# CHECK: - { id: 8, class: vreg_128, preferred-register: '', flags: [ ] } +# Check that coalescer does not create wider register tuple than in +# source. # No more registers shall be defined -# CHECK-NEXT: liveins: -# CHECK: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4, -# CHECK: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6, - --- name: main alignment: 1 @@ -52,6 +42,23 @@ body: | bb.0.entry: liveins: $sgpr0, $vgpr0_vgpr1 + ; CHECK-LABEL: name: main + ; CHECK: liveins: $sgpr0, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $sgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[DEF]].sub0 + ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0:vreg_64 = COPY [[COPY]].sub1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0 + ; CHECK-NEXT: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, [[COPY1]], 0, 0, implicit $exec, implicit $flat_scr + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vreg_96 = IMPLICIT_DEF + ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:vreg_96 = COPY [[DEF1]] + ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2:vreg_96 = COPY [[DEF]].sub0 + ; CHECK-NEXT: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr + ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vreg_128 = IMPLICIT_DEF + ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1_sub2:vreg_128 = COPY [[DEF2]] + ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub3:vreg_128 = COPY [[DEF]].sub0 + ; CHECK-NEXT: FLAT_STORE_DWORDX4 $vgpr0_vgpr1, [[COPY3]], 0, 0, implicit $exec, implicit $flat_scr %3 = IMPLICIT_DEF undef %4.sub0 = COPY $sgpr0 %4.sub1 = COPY %3.sub0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll index de7d234..b9bf76c 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG %s declare i32 @llvm.amdgcn.s.quadmask.i32(i32) declare i64 @llvm.amdgcn.s.quadmask.i64(i64) @@ -172,3 +172,91 @@ entry: %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask) ret i64 %qm } + +;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC. +define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) { +; GFX11-GISEL-LABEL: test_scc_quadmask_32: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1 +; GFX11-GISEL-NEXT: s_quadmask_b32 s1, s1 +; GFX11-GISEL-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0 +; GFX11-GISEL-NEXT: global_store_b32 v2, v3, s[2:3] +; GFX11-GISEL-NEXT: global_store_b32 v[0:1], v4, off +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX11-SDAG-LABEL: test_scc_quadmask_32: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-NEXT: s_and_b32 s0, s0, 1 +; GFX11-SDAG-NEXT: s_quadmask_b32 s1, s1 +; GFX11-SDAG-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0 +; GFX11-SDAG-NEXT: global_store_b32 v2, v3, s[2:3] +; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v4, off +; GFX11-SDAG-NEXT: s_endpgm + %and = and i32 %val0, 1 + %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone + store i32 %result, ptr addrspace(1) %ptr + %cmp = icmp eq i32 %and, 0 + %sel = select i1 %cmp, i32 1, i32 0 + store i32 %sel, ptr addrspace(1) null, align 4 + ret void +} + +;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC. +define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) { +; GFX11-GISEL-LABEL: test_scc_quadmask_64: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-GISEL-NEXT: s_load_b32 s4, s[4:5], 0x24 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: s_quadmask_b64 s[0:1], s[0:1] +; GFX11-GISEL-NEXT: s_and_b32 s4, s4, 1 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-GISEL-NEXT: s_cmp_eq_u32 s4, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1 +; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-GISEL-NEXT: global_store_b64 v4, v[0:1], s[2:3] +; GFX11-GISEL-NEXT: global_store_b32 v[2:3], v5, off +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX11-SDAG-LABEL: test_scc_quadmask_64: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_clause 0x1 +; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x24 +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-NEXT: s_and_b32 s4, s6, 1 +; GFX11-SDAG-NEXT: s_quadmask_b64 s[0:1], s[0:1] +; GFX11-SDAG-NEXT: s_cmp_eq_u32 s4, 0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0 +; GFX11-SDAG-NEXT: global_store_b64 v4, v[2:3], s[2:3] +; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v5, off +; GFX11-SDAG-NEXT: s_endpgm + %and = and i32 %val0, 1 + %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone + store i64 %result, ptr addrspace(1) %ptr + %cmp = icmp eq i32 %and, 0 + %sel = select i1 %cmp, i32 1, i32 0 + store i32 %sel, ptr addrspace(1) null, align 4 + ret void +} |