diff options
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll | 1524 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/wqm.mir | 277 | ||||
-rw-r--r-- | llvm/test/CodeGen/ARM/scmp.ll | 48 | ||||
-rw-r--r-- | llvm/test/CodeGen/ARM/ucmp.ll | 36 | ||||
-rw-r--r-- | llvm/test/CodeGen/Thumb/scmp.ll | 489 | ||||
-rw-r--r-- | llvm/test/CodeGen/Thumb/ucmp.ll | 445 |
6 files changed, 2413 insertions, 406 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll index 462090c..0a2e7af 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll @@ -1,12 +1,46 @@ -; RUN: llc -mtriple=amdgcn -mcpu=verde < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX12 %s -; GCN-LABEL: {{^}}gs_const: -; GCN-NOT: v_cmpx -; GCN: s_mov_b64 exec, 0 define amdgpu_gs void @gs_const() { +; SI-LABEL: gs_const: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: gs_const: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], exec +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: gs_const: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], exec +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %tmp = icmp ule i32 0, 3 %tmp1 = select i1 %tmp, float 1.000000e+00, float -1.000000e+00 %c1 = fcmp oge float %tmp1, 0.0 @@ -19,12 +53,81 @@ define amdgpu_gs void @gs_const() { ret void } -; GCN-LABEL: {{^}}vcc_implicit_def: -; GCN: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}} -; GCN: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}} -; GCN: s_and{{n2|_not1}}_b64 exec, exec, vcc -; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]] define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { +; SI-LABEL: vcc_implicit_def: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; SI-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB1_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; SI-NEXT: exp mrt1 v0, v0, v0, v0 done vm +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB1_2: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: vcc_implicit_def: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: exp mrt1 v0, v0, v0, v0 done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB1_2: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: vcc_implicit_def: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: exp mrt1 v0, v0, v0, v0 done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB1_2: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: vcc_implicit_def: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_le_f32_e64 s[0:1], 0, v1 +; GFX12-NEXT: s_mov_b64 s[2:3], exec +; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, vcc +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: export mrt1 v0, v0, v0, v0 done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB1_2: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %tmp0 = fcmp olt float %arg13, 0.000000e+00 %c1 = fcmp oge float %arg14, 0.0 call void @llvm.amdgcn.kill(i1 %c1) @@ -34,31 +137,102 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { ret void } -; GCN-LABEL: {{^}}true: -; GCN-NEXT: %bb. -; GCN-NEXT: s_endpgm define amdgpu_gs void @true() { +; GCN-LABEL: true: +; GCN: ; %bb.0: +; GCN-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 true) ret void } -; GCN-LABEL: {{^}}false: -; GCN-NOT: v_cmpx -; GCN: s_mov_b64 exec, 0 define amdgpu_gs void @false() { +; SI-LABEL: false: +; SI: ; %bb.0: +; SI-NEXT: s_andn2_b64 exec, exec, exec +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: false: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_andn2_b64 exec, exec, exec +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: false: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: s_and_not1_b64 exec, exec, exec +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 false) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}and: -; GCN: v_cmp_lt_i32 -; GCN: v_cmp_lt_i32 -; GCN: s_or_b64 s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[0:1], exec, s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1] -; GCN: s_and_b64 exec, exec, s[2:3] define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) { +; SI-LABEL: and: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: and: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: and: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX11-12-NEXT: s_mov_b64 s[2:3], exec +; GFX11-12-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = icmp slt i32 %a, %b %c2 = icmp slt i32 %c, %d %x = or i1 %c1, %c2 @@ -67,13 +241,52 @@ define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) { ret void } -; GCN-LABEL: {{^}}andn2: -; GCN: v_cmp_lt_i32 -; GCN: v_cmp_lt_i32 -; GCN: s_xor_b64 s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1] -; GCN: s_and_b64 exec, exec, s[2:3] define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) { +; SI-LABEL: andn2: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: andn2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: andn2: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX11-12-NEXT: s_mov_b64 s[2:3], exec +; GFX11-12-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = icmp slt i32 %a, %b %c2 = icmp slt i32 %c, %d %x = xor i1 %c1, %c2 @@ -83,135 +296,854 @@ define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) { ret void } -; GCN-LABEL: {{^}}oeq: -; GCN: v_cmp_neq_f32 +; Should use v_cmp_neq_f32 define amdgpu_gs void @oeq(float %a) { +; SI-LABEL: oeq: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: oeq: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: oeq: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: oeq: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp oeq float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ogt: -; GCN: v_cmp_nlt_f32 +; Should use v_cmp_nlt_f32 define amdgpu_gs void @ogt(float %a) { +; SI-LABEL: ogt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ogt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ogt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ogt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ogt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}oge: -; GCN: v_cmp_nle_f32 +; Should use v_cmp_nle_f32 define amdgpu_gs void @oge(float %a) { +; SI-LABEL: oge: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: oge: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: oge: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: oge: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp oge float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}olt: -; GCN: v_cmp_ngt_f32 +; Should use v_cmp_ngt_f32 define amdgpu_gs void @olt(float %a) { +; SI-LABEL: olt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: olt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: olt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: olt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp olt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ole: -; GCN: v_cmp_nge_f32 +; Should use v_cmp_nge_f32 define amdgpu_gs void @ole(float %a) { +; SI-LABEL: ole: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ole: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ole: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ole: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ole float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}one: -; GCN: v_cmp_nlg_f32 +; Should use v_cmp_nlg_f32 define amdgpu_gs void @one(float %a) { +; SI-LABEL: one: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: one: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: one: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: one: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp one float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ord: -; GCN: v_cmp_o_f32 +; Should use v_cmp_o_f32 define amdgpu_gs void @ord(float %a) { +; SI-LABEL: ord: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ord: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: ord: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = fcmp ord float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}uno: -; GCN: v_cmp_u_f32 +; Should use v_cmp_u_f32 define amdgpu_gs void @uno(float %a) { +; SI-LABEL: uno: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: uno: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: uno: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = fcmp uno float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ueq: -; GCN: v_cmp_lg_f32 +; Should use v_cmp_lg_f32 define amdgpu_gs void @ueq(float %a) { +; SI-LABEL: ueq: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ueq: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ueq: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ueq: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ueq float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ugt: -; GCN: v_cmp_ge_f32 +; Should use v_cmp_ge_f32 define amdgpu_gs void @ugt(float %a) { +; SI-LABEL: ugt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ugt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ugt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ugt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ugt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}uge: -; GCN: v_cmp_gt_f32_e32 vcc, -1.0 +; Should use v_cmp_gt_f32_e32 vcc, -1.0 define amdgpu_gs void @uge(float %a) { +; SI-LABEL: uge: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: uge: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: uge: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: uge: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, -1.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp uge float %a, -1.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ult: -; GCN: v_cmp_le_f32_e32 vcc, -2.0 +; Should use v_cmp_le_f32_e32 vcc, -2.0 define amdgpu_gs void @ult(float %a) { +; SI-LABEL: ult: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ult: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ult: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ult: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nle_f32_e32 vcc, -2.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ult float %a, -2.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ule: -; GCN: v_cmp_lt_f32_e32 vcc, 2.0 +; Should use v_cmp_lt_f32_e32 vcc, 2.0 define amdgpu_gs void @ule(float %a) { +; SI-LABEL: ule: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ule: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ule: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ule: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc, 2.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ule float %a, 2.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}une: -; GCN: v_cmp_eq_f32_e32 vcc, 0 +; Should use v_cmp_eq_f32_e32 vcc, 0 define amdgpu_gs void @une(float %a) { +; SI-LABEL: une: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: une: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: une: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: une: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp une float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}neg_olt: -; GCN: v_cmp_gt_f32_e32 vcc, 1.0 +; Should use v_cmp_gt_f32_e32 vcc, 1.0 define amdgpu_gs void @neg_olt(float %a) { +; SI-LABEL: neg_olt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: neg_olt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: neg_olt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: neg_olt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, 1.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp olt float %a, 1.0 %c2 = xor i1 %c1, 1 call void @llvm.amdgcn.kill(i1 %c2) @@ -219,13 +1151,61 @@ define amdgpu_gs void @neg_olt(float %a) { ret void } -; GCN-LABEL: {{^}}fcmp_x2: ; FIXME: LLVM should be able to combine these fcmp opcodes. -; SI: v_cmp_lt_f32_e32 vcc, s{{[0-9]+}}, v0 -; GFX10: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 -; GCN: v_cndmask_b32 -; GCN: v_cmp_nle_f32 define amdgpu_ps void @fcmp_x2(float %a) #0 { +; SI-LABEL: fcmp_x2: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b32 s0, 0x3e800000 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB21_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB21_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: fcmp_x2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB21_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: fcmp_x2: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB21_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_x2: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[2:3] +; GFX12-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB21_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %ogt = fcmp nsz ogt float %a, 2.500000e-01 %k = select i1 %ogt, float -1.000000e+00, float 0.000000e+00 %c = fcmp nsz oge float %k, 0.000000e+00 @@ -234,14 +1214,78 @@ define amdgpu_ps void @fcmp_x2(float %a) #0 { } ; Note: an almost identical test for this exists in llvm.amdgcn.wqm.vote.ll -; GCN-LABEL: {{^}}wqm: -; GCN: v_cmp_neq_f32_e32 vcc, 0 -; GCN-DAG: s_wqm_b64 s[2:3], vcc -; GCN-DAG: s_mov_b64 s[0:1], exec -; GCN: s_and{{n2|_not1}}_b64 s[2:3], exec, s[2:3] -; GCN: s_and{{n2|_not1}}_b64 s[0:1], s[0:1], s[2:3] -; GCN: s_and_b64 exec, exec, s[0:1] define amdgpu_ps float @wqm(float %a) { +; SI-LABEL: wqm: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_wqm_b64 s[2:3], vcc +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, s[2:3] +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_cbranch_scc0 .LBB22_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: v_mov_b32_e32 v0, 0 +; SI-NEXT: s_branch .LBB22_3 +; SI-NEXT: .LBB22_2: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB22_3: +; +; GFX10-LABEL: wqm: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_wqm_b64 s[2:3], vcc +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[2:3] +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: s_branch .LBB22_3 +; GFX10-NEXT: .LBB22_2: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB22_3: +; +; GFX11-LABEL: wqm: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_mov_b64 s[0:1], exec +; GFX11-NEXT: s_wqm_b64 s[2:3], vcc +; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3] +; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_branch .LBB22_3 +; GFX11-NEXT: .LBB22_2: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB22_3: +; +; GFX12-LABEL: wqm: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_wqm_b64 s[2:3], vcc +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3] +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: v_mov_b32_e32 v0, 0 +; GFX12-NEXT: s_branch .LBB22_3 +; GFX12-NEXT: .LBB22_2: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB22_3: %c1 = fcmp une float %a, 0.0 %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) call void @llvm.amdgcn.kill(i1 %c2) @@ -249,28 +1293,212 @@ define amdgpu_ps float @wqm(float %a) { } ; This checks that we use the 64-bit encoding when the operand is a SGPR. -; GCN-LABEL: {{^}}test_sgpr: -; GCN: v_cmp_nle_f32_e64 define amdgpu_ps void @test_sgpr(float inreg %a) #0 { +; SI-LABEL: test_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB23_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB23_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB23_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB23_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_le_f32 s0, 1.0 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB23_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %c = fcmp ole float %a, 1.000000e+00 call void @llvm.amdgcn.kill(i1 %c) #1 ret void } -; GCN-LABEL: {{^}}test_non_inline_imm_sgpr: -; GCN-NOT: v_cmp_le_f32_e64 define amdgpu_ps void @test_non_inline_imm_sgpr(float inreg %a) #0 { +; SI-LABEL: test_non_inline_imm_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; SI-NEXT: v_cmp_le_f32_e32 vcc, s0, v0 +; SI-NEXT: s_andn2_b64 s[0:1], exec, vcc +; SI-NEXT: s_andn2_b64 s[2:3], exec, s[0:1] +; SI-NEXT: s_cbranch_scc0 .LBB24_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB24_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_non_inline_imm_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0 +; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[0:1] +; GFX10-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB24_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_non_inline_imm_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0 +; GFX11-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX11-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB24_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_non_inline_imm_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_le_f32 s0, 0x3fc00000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB24_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %c = fcmp ole float %a, 1.500000e+00 call void @llvm.amdgcn.kill(i1 %c) #1 ret void } -; GCN-LABEL: {{^}}test_scc_liveness: -; GCN: s_cmp -; GCN: s_and_b64 exec -; GCN: s_cmp -; GCN: s_cbranch_scc define amdgpu_ps void @test_scc_liveness() #0 { +; SI-LABEL: test_scc_liveness: +; SI: ; %bb.0: ; %main_body +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: .LBB25_1: ; %loop3 +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_cmp_gt_i32 s2, 0 +; SI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; SI-NEXT: s_andn2_b64 s[4:5], exec, s[4:5] +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5] +; SI-NEXT: s_cbranch_scc0 .LBB25_4 +; SI-NEXT: ; %bb.2: ; %loop3 +; SI-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_add_i32 s3, s2, 1 +; SI-NEXT: s_cmp_lt_i32 s2, 1 +; SI-NEXT: s_mov_b32 s2, s3 +; SI-NEXT: s_cbranch_scc1 .LBB25_1 +; SI-NEXT: ; %bb.3: ; %endloop15 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB25_4: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_scc_liveness: +; GFX10: ; %bb.0: ; %main_body +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_mov_b32 s2, 0 +; GFX10-NEXT: .LBB25_1: ; %loop3 +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_cmp_gt_i32 s2, 0 +; GFX10-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX10-NEXT: s_andn2_b64 s[4:5], exec, s[4:5] +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5] +; GFX10-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX10-NEXT: ; %bb.2: ; %loop3 +; GFX10-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_add_i32 s3, s2, 1 +; GFX10-NEXT: s_cmp_lt_i32 s2, 1 +; GFX10-NEXT: s_mov_b32 s2, s3 +; GFX10-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX10-NEXT: ; %bb.3: ; %endloop15 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB25_4: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_scc_liveness: +; GFX11: ; %bb.0: ; %main_body +; GFX11-NEXT: s_mov_b64 s[0:1], exec +; GFX11-NEXT: s_mov_b32 s2, 0 +; GFX11-NEXT: .LBB25_1: ; %loop3 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_cmp_gt_i32 s2, 0 +; GFX11-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX11-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5] +; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5] +; GFX11-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX11-NEXT: ; %bb.2: ; %loop3 +; GFX11-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX11-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_add_i32 s3, s2, 1 +; GFX11-NEXT: s_cmp_lt_i32 s2, 1 +; GFX11-NEXT: s_mov_b32 s2, s3 +; GFX11-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX11-NEXT: ; %bb.3: ; %endloop15 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB25_4: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_scc_liveness: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_mov_b32 s2, 0 +; GFX12-NEXT: .LBB25_1: ; %loop3 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_cmp_gt_i32 s2, 0 +; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5] +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5] +; GFX12-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX12-NEXT: ; %bb.2: ; %loop3 +; GFX12-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_add_co_i32 s3, s2, 1 +; GFX12-NEXT: s_cmp_lt_i32 s2, 1 +; GFX12-NEXT: s_mov_b32 s2, s3 +; GFX12-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX12-NEXT: ; %bb.3: ; %endloop15 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB25_4: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm main_body: br label %loop3 @@ -287,11 +1515,139 @@ endloop15: ; preds = %loop3 ; Check this compiles. ; If kill is marked as defining VCC then this will fail with live interval issues. -; GCN-LABEL: {{^}}kill_with_loop_exit: -; GCN: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec -; GCN: s_and{{n2|_not1}}_b64 [[LIVE]], [[LIVE]], exec -; GCN-NEXT: s_cbranch_scc0 define amdgpu_ps void @kill_with_loop_exit(float inreg %inp0, float inreg %inp1, <4 x i32> inreg %inp2, float inreg %inp3) { +; SI-LABEL: kill_with_loop_exit: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_mov_b32_e32 v0, 0x43000000 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s1, v0 +; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_and_b64 vcc, exec, s[0:1] +; SI-NEXT: v_mov_b32_e32 v0, 1.0 +; SI-NEXT: s_cbranch_vccnz .LBB26_5 +; SI-NEXT: ; %bb.1: ; %.preheader1.preheader +; SI-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; SI-NEXT: .LBB26_2: ; %bb +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_and_b64 vcc, exec, s[0:1] +; SI-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; SI-NEXT: s_cbranch_vccnz .LBB26_2 +; SI-NEXT: ; %bb.3: ; %bb33 +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; SI-NEXT: s_cbranch_scc0 .LBB26_6 +; SI-NEXT: ; %bb.4: ; %bb33 +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: .LBB26_5: ; %bb35 +; SI-NEXT: exp mrt0 v0, v0, v0, v0 done vm +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB26_6: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: kill_with_loop_exit: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0 +; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1 +; GFX10-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX10-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX10-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX10-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; GFX10-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX10-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; GFX10-NEXT: .LBB26_2: ; %bb +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX10-NEXT: ; %bb.3: ; %bb33 +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; GFX10-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX10-NEXT: ; %bb.4: ; %bb33 +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: .LBB26_5: ; %bb35 +; GFX10-NEXT: exp mrt0 v0, v0, v0, v0 done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB26_6: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: kill_with_loop_exit: +; GFX11: ; %bb.0: ; %.entry +; GFX11-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0 +; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX11-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX11-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX11-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX11-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; GFX11-NEXT: s_mov_b64 s[2:3], exec +; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX11-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; GFX11-NEXT: .LBB26_2: ; %bb +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX11-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX11-NEXT: ; %bb.3: ; %bb33 +; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec +; GFX11-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX11-NEXT: ; %bb.4: ; %bb33 +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: .LBB26_5: ; %bb35 +; GFX11-NEXT: exp mrt0 v0, v0, v0, v0 done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB26_6: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: kill_with_loop_exit: +; GFX12: ; %bb.0: ; %.entry +; GFX12-NEXT: s_cmp_lt_f32 s0, 0x43000000 +; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX12-NEXT: s_cmp_lt_f32 s1, 0x43000000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX12-NEXT: s_mov_b32 s4, 1.0 +; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX12-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX12-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX12-NEXT: s_cmp_ngt_f32 s6, 0 +; GFX12-NEXT: s_mov_b64 s[2:3], exec +; GFX12-NEXT: s_mov_b32 s4, 0x3fc00000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] +; GFX12-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 +; GFX12-NEXT: .LBB26_2: ; %bb +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_add_f32 s4, s4, 0x3e800000 +; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX12-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX12-NEXT: ; %bb.3: ; %bb33 +; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec +; GFX12-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX12-NEXT: ; %bb.4: ; %bb33 +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: .LBB26_5: ; %bb35 +; GFX12-NEXT: v_mov_b32_e32 v0, s4 +; GFX12-NEXT: export mrt0 v0, v0, v0, v0 done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB26_6: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm .entry: %tmp24 = fcmp olt float %inp0, 1.280000e+02 %tmp25 = fcmp olt float %inp1, 1.280000e+02 diff --git a/llvm/test/CodeGen/AMDGPU/wqm.mir b/llvm/test/CodeGen/AMDGPU/wqm.mir index 350b233..ceb1b3e 100644 --- a/llvm/test/CodeGen/AMDGPU/wqm.mir +++ b/llvm/test/CodeGen/AMDGPU/wqm.mir @@ -1,3 +1,4 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-wqm -o - %s | FileCheck %s # RUN: llc -mtriple=amdgcn -mcpu=fiji -passes=si-wqm -o - %s | FileCheck %s @@ -46,10 +47,6 @@ --- # Check for awareness that s_or_saveexec_b64 clobbers SCC -# -#CHECK: ENTER_STRICT_WWM -#CHECK: S_CMP_LT_I32 -#CHECK: S_CSELECT_B32 name: test_strict_wwm_scc alignment: 1 exposesReturnsTwice: false @@ -80,6 +77,21 @@ body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-LABEL: name: test_strict_wwm_scc + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 %3 = COPY $vgpr0 %2 = COPY $sgpr2 %1 = COPY $sgpr1 @@ -96,16 +108,35 @@ body: | --- # Second test for awareness that s_or_saveexec_b64 clobbers SCC # Because entry block is treated differently. -# -#CHECK: %bb.1 -#CHECK: S_CMP_LT_I32 -#CHECK: COPY $scc -#CHECK: ENTER_STRICT_WWM -#CHECK: $scc = COPY -#CHECK: S_CSELECT_B32 name: test_strict_wwm_scc2 tracksRegLiveness: true body: | + ; CHECK-LABEL: name: test_strict_wwm_scc2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY]], [[DEF]], 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; CHECK-NEXT: [[ENTER_STRICT_WWM1:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: $scc = COPY [[COPY4]] + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM1]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec + ; CHECK-NEXT: $vgpr1 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 @@ -130,7 +161,6 @@ body: | --- # V_SET_INACTIVE, when its second operand is undef, is replaced by a # COPY by si-wqm. Ensure the instruction is removed. -#CHECK-NOT: V_SET_INACTIVE name: no_cfg alignment: 1 exposesReturnsTwice: false @@ -167,6 +197,28 @@ body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-LABEL: name: no_cfg + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:sgpr_128 = COPY [[REG_SEQUENCE]] + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1 + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY8]], [[COPY7]], 323, 12, 15, 0, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber %15:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_dpp]], implicit $exec + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %15, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %3:sgpr_32 = COPY $sgpr3 %2:sgpr_32 = COPY $sgpr2 %1:sgpr_32 = COPY $sgpr1 @@ -189,18 +241,32 @@ body: | --- # Ensure that strict_wwm is not put around an EXEC copy -#CHECK-LABEL: name: copy_exec -#CHECK: %7:sreg_64 = COPY $exec -#CHECK-NEXT: %13:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec -#CHECK-NEXT: %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec -#CHECK-NEXT: $exec = EXIT_STRICT_WWM %13 -#CHECK-NEXT: %9:vgpr_32 = V_MBCNT_LO_U32_B32_e64 %7.sub0, 0, implicit $exec name: copy_exec tracksRegLiveness: true body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-LABEL: name: copy_exec + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: dead [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]].sub0, 0, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_MBCNT_LO_U32_B32_e64_]], 312, 15, 15, 0, implicit $exec + ; CHECK-NEXT: dead [[V_READLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READLANE_B32 [[V_MOV_B32_dpp]], 63 + ; CHECK-NEXT: early-clobber %12:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_e32_]], implicit $exec + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %12, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %3:sgpr_32 = COPY $sgpr3 %2:sgpr_32 = COPY $sgpr2 %1:sgpr_32 = COPY $sgpr1 @@ -224,20 +290,48 @@ body: | --- # Check exit of WQM is still inserted correctly when SCC is live until block end. # Critially this tests that compilation does not fail. -#CHECK-LABEL: name: scc_always_live -#CHECK: %8:vreg_128 = IMAGE_SAMPLE_V4_V2 %7 -#CHECK-NEXT: S_CMP_EQ_U32 %2, 0, implicit-def $scc -#CHECK-NEXT: undef %9.sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 -#CHECK-NEXT: %9.sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 -#CHECK-NEXT: %14:sreg_32_xm0 = COPY $scc -#CHECK-NEXT: $exec = S_AND_B64 $exec, %13, implicit-def $scc -#CHECK-NEXT: $scc = COPY %14 -#CHECK-NEXT: %10:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 -#CHECK-NEXT: %11:vreg_128 = IMAGE_SAMPLE_V4_V2 -#CHECK-NEXT: S_CBRANCH_SCC0 %bb.2 name: scc_always_live tracksRegLiveness: true body: | + ; CHECK-LABEL: name: scc_always_live + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: $m0 = COPY $sgpr1 + ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[V_INTERP_P1_F32_:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY1]], 3, 2, implicit $mode, implicit $m0, implicit $exec + ; CHECK-NEXT: [[V_INTERP_P1_F32_1:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY2]], 3, 2, implicit $mode, implicit $m0, implicit $exec + ; CHECK-NEXT: undef [[COPY4:%[0-9]+]].sub0:vreg_64 = COPY [[V_INTERP_P1_F32_]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]].sub1:vreg_64 = COPY [[V_INTERP_P1_F32_1]] + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY4]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) + ; CHECK-NEXT: S_CMP_EQ_U32 [[COPY3]], 0, implicit-def $scc + ; CHECK-NEXT: undef [[V_ADD_F32_e64_:%[0-9]+]].sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[IMAGE_SAMPLE_V4_V2_]].sub0, 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]].sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 [[V_INTERP_P1_F32_]], [[V_INTERP_P1_F32_1]], implicit $mode, implicit $exec + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc + ; CHECK-NEXT: $scc = COPY [[COPY5]] + ; CHECK-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[V_INTERP_P1_F32_]], 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_1:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_ADD_F32_e64_]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) + ; CHECK-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[V_ADD_F32_e64_1]], [[DEF1]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0 + ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1 + ; CHECK-NEXT: $vgpr2 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub0 + ; CHECK-NEXT: $vgpr3 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub1 + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1, $vgpr2, $vgpr3 bb.0: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2 @@ -281,18 +375,26 @@ body: | --- # Check that unnecessary instruction do not get marked for WWM # -#CHECK-NOT: ENTER_STRICT_WWM -#CHECK: BUFFER_LOAD_DWORDX2 -#CHECK: ENTER_STRICT_WWM -#CHECK: V_SET_INACTIVE_B32 -#CHECK: V_SET_INACTIVE_B32 -#CHECK-NOT: ENTER_STRICT_WWM -#CHECK: V_MAX name: test_wwm_set_inactive_propagation tracksRegLiveness: true body: | bb.0: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; CHECK-LABEL: name: test_wwm_set_inactive_propagation + ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64_xexec = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub0:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub1:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[V_MAX_F64_e64_:%[0-9]+]]:vreg_64 = nnan nsz arcp contract reassoc nofpexcept V_MAX_F64_e64 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub0, implicit $exec + ; CHECK-NEXT: early-clobber $vgpr1 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub1, implicit $exec + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 %1:vgpr_32 = COPY $vgpr0 %2:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN %1:vgpr_32, %0:sgpr_128, 0, 0, 0, 0, implicit $exec @@ -308,15 +410,46 @@ body: | --- # Check that WQM marking occurs correctly through phi nodes in live range graph. # If not then initial V_MOV will not be in WQM. -# -#CHECK-LABEL: name: test_wqm_lr_phi -#CHECK: COPY $exec -#CHECK-NEXT: S_WQM -#CHECK-NEXT: V_MOV_B32_e32 -10 -#CHECK-NEXT: V_MOV_B32_e32 0 name: test_wqm_lr_phi tracksRegLiveness: true body: | + ; CHECK-LABEL: name: test_wqm_lr_phi + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc + ; CHECK-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: [[S_GETPC_B64_:%[0-9]+]]:sreg_64 = S_GETPC_B64 + ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[S_GETPC_B64_]], 32, 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $vcc = V_CMP_LT_U32_e64 4, 4, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: successors: %bb.3(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4: + ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX8_IMM]], [[DEF]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7) + ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0 + ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1 + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 bb.0: undef %0.sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec %0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec @@ -345,14 +478,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_cs -#CHECK-NOT: S_WQM name: no_wqm_in_cs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_cs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -362,14 +501,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_es -#CHECK-NOT: S_WQM name: no_wqm_in_es tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_es + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -379,14 +524,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_gs -#CHECK-NOT: S_WQM name: no_wqm_in_gs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_gs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -396,14 +547,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_hs -#CHECK-NOT: S_WQM name: no_wqm_in_hs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_hs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -413,14 +570,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_ls -#CHECK-NOT: S_WQM name: no_wqm_in_ls tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_ls + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -430,14 +593,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_vs -#CHECK-NOT: S_WQM name: no_wqm_in_vs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_vs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF diff --git a/llvm/test/CodeGen/ARM/scmp.ll b/llvm/test/CodeGen/ARM/scmp.ll index 6e493c9..9189aee 100644 --- a/llvm/test/CodeGen/ARM/scmp.ll +++ b/llvm/test/CodeGen/ARM/scmp.ll @@ -4,12 +4,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind { ; CHECK-LABEL: scmp_8_8: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.scmp(i8 %x, i8 %y) ret i8 %1 @@ -18,12 +15,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind { define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind { ; CHECK-LABEL: scmp_8_16: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.scmp(i16 %x, i16 %y) ret i8 %1 @@ -32,12 +26,9 @@ define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind { define i8 @scmp_8_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: scmp_8_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.scmp(i32 %x, i32 %y) ret i8 %1 @@ -92,17 +83,26 @@ define i8 @scmp_8_128(i128 %x, i128 %y) nounwind { define i32 @scmp_32_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: scmp_32_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i32 @llvm.scmp(i32 %x, i32 %y) ret i32 %1 } +define i32 @scmp_neg(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: scmp_neg: +; CHECK: @ %bb.0: +; CHECK-NEXT: adds r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 +; CHECK-NEXT: bx lr + %yy = sub nsw i32 0, %y + %1 = call i32 @llvm.scmp(i32 %x, i32 %yy) + ret i32 %1 +} + define i32 @scmp_32_64(i64 %x, i64 %y) nounwind { ; CHECK-LABEL: scmp_32_64: ; CHECK: @ %bb.0: diff --git a/llvm/test/CodeGen/ARM/ucmp.ll b/llvm/test/CodeGen/ARM/ucmp.ll index ad4af53..bb02014 100644 --- a/llvm/test/CodeGen/ARM/ucmp.ll +++ b/llvm/test/CodeGen/ARM/ucmp.ll @@ -4,12 +4,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind { ; CHECK-LABEL: ucmp_8_8: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.ucmp(i8 %x, i8 %y) ret i8 %1 @@ -18,12 +15,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind { define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind { ; CHECK-LABEL: ucmp_8_16: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.ucmp(i16 %x, i16 %y) ret i8 %1 @@ -32,12 +26,9 @@ define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind { define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: ucmp_8_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.ucmp(i32 %x, i32 %y) ret i8 %1 @@ -92,12 +83,9 @@ define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind { define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: ucmp_32_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i32 @llvm.ucmp(i32 %x, i32 %y) ret i32 %1 diff --git a/llvm/test/CodeGen/Thumb/scmp.ll b/llvm/test/CodeGen/Thumb/scmp.ll index 661dbe9..c002449 100644 --- a/llvm/test/CodeGen/Thumb/scmp.ll +++ b/llvm/test/CodeGen/Thumb/scmp.ll @@ -1,151 +1,420 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1 +; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2 +; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind { -; CHECK-LABEL: scmp_8_8: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_8: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB0_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB0_4 +; THUMB1-NEXT: .LBB0_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB0_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB0_2 +; THUMB1-NEXT: .LBB0_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_8_8: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_8: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i8 %x, i8 %y) ret i8 %1 } define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind { -; CHECK-LABEL: scmp_8_16: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_16: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB1_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB1_4 +; THUMB1-NEXT: .LBB1_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB1_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB1_2 +; THUMB1-NEXT: .LBB1_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_8_16: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_16: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i16 %x, i16 %y) ret i8 %1 } define i8 @scmp_8_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scmp_8_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB2_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB2_4 +; THUMB1-NEXT: .LBB2_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB2_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB2_2 +; THUMB1-NEXT: .LBB2_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_8_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i32 %x, i32 %y) ret i8 %1 } define i8 @scmp_8_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scmp_8_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blt .LBB3_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB3_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blt .LBB3_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB3_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: scmp_8_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lt +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i64 %x, i64 %y) ret i8 %1 } define i8 @scmp_8_128(i128 %x, i128 %y) nounwind { -; CHECK-LABEL: scmp_8_128: -; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: add.w lr, sp, #16 -; CHECK-NEXT: ldr r4, [sp, #28] -; CHECK-NEXT: movs r5, #0 -; CHECK-NEXT: ldm.w lr, {r9, r12, lr} -; CHECK-NEXT: subs.w r6, r0, r9 -; CHECK-NEXT: sbcs.w r6, r1, r12 -; CHECK-NEXT: sbcs.w r6, r2, lr -; CHECK-NEXT: sbcs.w r6, r3, r4 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: subs.w r0, r9, r0 -; CHECK-NEXT: sbcs.w r0, r12, r1 -; CHECK-NEXT: sbcs.w r0, lr, r2 -; CHECK-NEXT: sbcs.w r0, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: subs r0, r5, r6 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; THUMB1-LABEL: scmp_8_128: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, r7, lr} +; THUMB1-NEXT: push {r4, r5, r6, r7, lr} +; THUMB1-NEXT: .pad #20 +; THUMB1-NEXT: sub sp, #20 +; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #1 +; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill +; THUMB1-NEXT: ldr r6, [sp, #52] +; THUMB1-NEXT: add r7, sp, #40 +; THUMB1-NEXT: ldm r7, {r3, r5, r7} +; THUMB1-NEXT: subs r4, r0, r3 +; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r1 +; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r5 +; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r2 +; THUMB1-NEXT: sbcs r4, r7 +; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r6 +; THUMB1-NEXT: mov r2, r1 +; THUMB1-NEXT: blt .LBB4_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_2: +; THUMB1-NEXT: subs r0, r3, r0 +; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; THUMB1-NEXT: sbcs r5, r0 +; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload +; THUMB1-NEXT: sbcs r7, r0 +; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r6, r0 +; THUMB1-NEXT: blt .LBB4_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_4: +; THUMB1-NEXT: subs r0, r1, r2 +; THUMB1-NEXT: add sp, #20 +; THUMB1-NEXT: pop {r4, r5, r6, r7, pc} +; +; THUMB2-LABEL: scmp_8_128: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: push {r4, r5, r6, lr} +; THUMB2-NEXT: add.w lr, sp, #16 +; THUMB2-NEXT: ldr r4, [sp, #28] +; THUMB2-NEXT: movs r5, #0 +; THUMB2-NEXT: ldm.w lr, {r9, r12, lr} +; THUMB2-NEXT: subs.w r6, r0, r9 +; THUMB2-NEXT: sbcs.w r6, r1, r12 +; THUMB2-NEXT: sbcs.w r6, r2, lr +; THUMB2-NEXT: sbcs.w r6, r3, r4 +; THUMB2-NEXT: mov.w r6, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt r6, #1 +; THUMB2-NEXT: subs.w r0, r9, r0 +; THUMB2-NEXT: sbcs.w r0, r12, r1 +; THUMB2-NEXT: sbcs.w r0, lr, r2 +; THUMB2-NEXT: sbcs.w r0, r4, r3 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt r5, #1 +; THUMB2-NEXT: subs r0, r5, r6 +; THUMB2-NEXT: pop {r4, r5, r6, pc} +; +; V81M-LABEL: scmp_8_128: +; V81M: @ %bb.0: +; V81M-NEXT: .save {r4, r5, r6, lr} +; V81M-NEXT: push {r4, r5, r6, lr} +; V81M-NEXT: ldrd r5, r4, [sp, #16] +; V81M-NEXT: ldrd lr, r12, [sp, #24] +; V81M-NEXT: subs r6, r0, r5 +; V81M-NEXT: sbcs.w r6, r1, r4 +; V81M-NEXT: sbcs.w r6, r2, lr +; V81M-NEXT: sbcs.w r6, r3, r12 +; V81M-NEXT: cset r6, lt +; V81M-NEXT: subs r0, r5, r0 +; V81M-NEXT: sbcs.w r0, r4, r1 +; V81M-NEXT: sbcs.w r0, lr, r2 +; V81M-NEXT: sbcs.w r0, r12, r3 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: subs r0, r0, r6 +; V81M-NEXT: pop {r4, r5, r6, pc} %1 = call i8 @llvm.scmp(i128 %x, i128 %y) ret i8 %1 } define i32 @scmp_32_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scmp_32_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_32_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB5_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB5_4 +; THUMB1-NEXT: .LBB5_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB5_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB5_2 +; THUMB1-NEXT: .LBB5_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_32_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_32_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i32 @llvm.scmp(i32 %x, i32 %y) ret i32 %1 } define i32 @scmp_32_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scmp_32_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_32_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blt .LBB6_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB6_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blt .LBB6_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB6_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: scmp_32_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_32_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lt +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i32 @llvm.scmp(i64 %x, i64 %y) ret i32 %1 } define i64 @scmp_64_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scmp_64_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: asrs r1, r0, #31 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_64_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blt .LBB7_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB7_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blt .LBB7_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB7_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: asrs r1, r0, #31 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: scmp_64_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: asrs r1, r0, #31 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_64_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lt +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: asrs r1, r0, #31 +; V81M-NEXT: bx lr %1 = call i64 @llvm.scmp(i64 %x, i64 %y) ret i64 %1 } diff --git a/llvm/test/CodeGen/Thumb/ucmp.ll b/llvm/test/CodeGen/Thumb/ucmp.ll index 7e6d0a3..5d0f57e 100644 --- a/llvm/test/CodeGen/Thumb/ucmp.ll +++ b/llvm/test/CodeGen/Thumb/ucmp.ll @@ -1,151 +1,376 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1 +; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2 +; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind { -; CHECK-LABEL: ucmp_8_8: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_8: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_8_8: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_8: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i8 %x, i8 %y) ret i8 %1 } define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind { -; CHECK-LABEL: ucmp_8_16: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_16: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_8_16: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_16: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i16 %x, i16 %y) ret i8 %1 } define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: ucmp_8_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_8_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i32 %x, i32 %y) ret i8 %1 } define i8 @ucmp_8_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: ucmp_8_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blo .LBB3_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB3_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blo .LBB3_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB3_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: ucmp_8_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lo +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i64 %x, i64 %y) ret i8 %1 } define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind { -; CHECK-LABEL: ucmp_8_128: -; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: add.w lr, sp, #16 -; CHECK-NEXT: ldr r4, [sp, #28] -; CHECK-NEXT: movs r5, #0 -; CHECK-NEXT: ldm.w lr, {r9, r12, lr} -; CHECK-NEXT: subs.w r6, r0, r9 -; CHECK-NEXT: sbcs.w r6, r1, r12 -; CHECK-NEXT: sbcs.w r6, r2, lr -; CHECK-NEXT: sbcs.w r6, r3, r4 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: subs.w r0, r9, r0 -; CHECK-NEXT: sbcs.w r0, r12, r1 -; CHECK-NEXT: sbcs.w r0, lr, r2 -; CHECK-NEXT: sbcs.w r0, r4, r3 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 -; CHECK-NEXT: subs r0, r5, r6 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; THUMB1-LABEL: ucmp_8_128: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, r7, lr} +; THUMB1-NEXT: push {r4, r5, r6, r7, lr} +; THUMB1-NEXT: .pad #20 +; THUMB1-NEXT: sub sp, #20 +; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #1 +; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill +; THUMB1-NEXT: ldr r6, [sp, #52] +; THUMB1-NEXT: add r7, sp, #40 +; THUMB1-NEXT: ldm r7, {r3, r5, r7} +; THUMB1-NEXT: subs r4, r0, r3 +; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r1 +; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r5 +; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r2 +; THUMB1-NEXT: sbcs r4, r7 +; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r6 +; THUMB1-NEXT: mov r2, r1 +; THUMB1-NEXT: blo .LBB4_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_2: +; THUMB1-NEXT: subs r0, r3, r0 +; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; THUMB1-NEXT: sbcs r5, r0 +; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload +; THUMB1-NEXT: sbcs r7, r0 +; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r6, r0 +; THUMB1-NEXT: blo .LBB4_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_4: +; THUMB1-NEXT: subs r0, r1, r2 +; THUMB1-NEXT: add sp, #20 +; THUMB1-NEXT: pop {r4, r5, r6, r7, pc} +; +; THUMB2-LABEL: ucmp_8_128: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: push {r4, r5, r6, lr} +; THUMB2-NEXT: add.w lr, sp, #16 +; THUMB2-NEXT: ldr r4, [sp, #28] +; THUMB2-NEXT: movs r5, #0 +; THUMB2-NEXT: ldm.w lr, {r9, r12, lr} +; THUMB2-NEXT: subs.w r6, r0, r9 +; THUMB2-NEXT: sbcs.w r6, r1, r12 +; THUMB2-NEXT: sbcs.w r6, r2, lr +; THUMB2-NEXT: sbcs.w r6, r3, r4 +; THUMB2-NEXT: mov.w r6, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo r6, #1 +; THUMB2-NEXT: subs.w r0, r9, r0 +; THUMB2-NEXT: sbcs.w r0, r12, r1 +; THUMB2-NEXT: sbcs.w r0, lr, r2 +; THUMB2-NEXT: sbcs.w r0, r4, r3 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo r5, #1 +; THUMB2-NEXT: subs r0, r5, r6 +; THUMB2-NEXT: pop {r4, r5, r6, pc} +; +; V81M-LABEL: ucmp_8_128: +; V81M: @ %bb.0: +; V81M-NEXT: .save {r4, r5, r6, lr} +; V81M-NEXT: push {r4, r5, r6, lr} +; V81M-NEXT: ldrd r5, r4, [sp, #16] +; V81M-NEXT: ldrd lr, r12, [sp, #24] +; V81M-NEXT: subs r6, r0, r5 +; V81M-NEXT: sbcs.w r6, r1, r4 +; V81M-NEXT: sbcs.w r6, r2, lr +; V81M-NEXT: sbcs.w r6, r3, r12 +; V81M-NEXT: cset r6, lo +; V81M-NEXT: subs r0, r5, r0 +; V81M-NEXT: sbcs.w r0, r4, r1 +; V81M-NEXT: sbcs.w r0, lr, r2 +; V81M-NEXT: sbcs.w r0, r12, r3 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: subs r0, r0, r6 +; V81M-NEXT: pop {r4, r5, r6, pc} %1 = call i8 @llvm.ucmp(i128 %x, i128 %y) ret i8 %1 } define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: ucmp_32_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_32_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_32_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_32_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i32 @llvm.ucmp(i32 %x, i32 %y) ret i32 %1 } define i32 @ucmp_32_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: ucmp_32_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_32_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blo .LBB6_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB6_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blo .LBB6_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB6_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: ucmp_32_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_32_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lo +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i32 @llvm.ucmp(i64 %x, i64 %y) ret i32 %1 } define i64 @ucmp_64_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: ucmp_64_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: asrs r1, r0, #31 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_64_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blo .LBB7_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB7_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blo .LBB7_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB7_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: asrs r1, r0, #31 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: ucmp_64_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: asrs r1, r0, #31 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_64_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lo +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: asrs r1, r0, #31 +; V81M-NEXT: bx lr %1 = call i64 @llvm.ucmp(i64 %x, i64 %y) ret i64 %1 } |