diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/bf16-math.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/bf16-math.ll | 383 |
1 files changed, 383 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll index 029604c..1adf542 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll @@ -2,6 +2,385 @@ ; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GCN %s ; TODO: Add global-isel when it can support bf16 +define amdgpu_ps void @llvm_sqrt_bf16_v(ptr addrspace(1) %out, bfloat %src) { +; GCN-LABEL: llvm_sqrt_bf16_v: +; GCN: ; %bb.0: +; GCN-NEXT: v_sqrt_bf16_e32 v2, v2 +; GCN-NEXT: global_store_b16 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %sqrt = call bfloat @llvm.sqrt.bf16(bfloat %src) + store bfloat %sqrt, ptr addrspace(1) %out, align 2 + ret void +} + +define amdgpu_ps void @llvm_sqrt_bf16_s(ptr addrspace(1) %out, bfloat inreg %src) { +; GCN-LABEL: llvm_sqrt_bf16_s: +; GCN: ; %bb.0: +; GCN-NEXT: v_sqrt_bf16_e32 v2, s0 +; GCN-NEXT: global_store_b16 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %sqrt = call bfloat @llvm.sqrt.bf16(bfloat %src) + store bfloat %sqrt, ptr addrspace(1) %out, align 2 + ret void +} + +define amdgpu_ps void @v_test_add_v2bf16_vv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b) { +; GCN-LABEL: v_test_add_v2bf16_vv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, v3 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fadd <2 x bfloat> %a, %b + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_add_v2bf16_vs(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_add_v2bf16_vs: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, s0 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fadd <2 x bfloat> %a, %b + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_add_v2bf16_ss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_add_v2bf16_ss: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, s0, s1 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fadd <2 x bfloat> %a, %b + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_add_v2bf16_vc(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_add_v2bf16_vc: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, 2.0 op_sel_hi:[1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fadd <2 x bfloat> %a, <bfloat 2.0, bfloat 2.0> + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_add_v2bf16_vl(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_add_v2bf16_vl: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, 0x42c83f80, v2 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fadd <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_vv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b) { +; GCN-LABEL: v_test_sub_v2bf16_vv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, v3 neg_lo:[0,1] neg_hi:[0,1] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> %a, %b + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_vs(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_sub_v2bf16_vs: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, s0 neg_lo:[0,1] neg_hi:[0,1] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> %a, %b + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_ss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_sub_v2bf16_ss: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, s0, s1 neg_lo:[0,1] neg_hi:[0,1] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> %a, %b + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_vc(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_sub_v2bf16_vc: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, -2.0 op_sel_hi:[1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> %a, <bfloat 2.0, bfloat 2.0> + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_vl(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_sub_v2bf16_vl: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, 0xc2c8bf80, v2 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_lv(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_sub_v2bf16_lv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, 0x42c83f80, v2 neg_lo:[0,1] neg_hi:[0,1] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> <bfloat 1.0, bfloat 100.0>, %a + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_sub_v2bf16_iv(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_sub_v2bf16_iv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_add_bf16 v2, v2, 1.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %add = fsub <2 x bfloat> <bfloat 1.0, bfloat 1.0>, %a + store <2 x bfloat> %add, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_mul_v2bf16_vv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b) { +; GCN-LABEL: v_test_mul_v2bf16_vv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_mul_bf16 v2, v2, v3 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %mul = fmul <2 x bfloat> %a, %b + store <2 x bfloat> %mul, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_mul_v2bf16_vs(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_mul_v2bf16_vs: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %mul = fmul <2 x bfloat> %a, %b + store <2 x bfloat> %mul, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_mul_v2bf16_ss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_mul_v2bf16_ss: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_mul_bf16 v2, s0, s1 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %mul = fmul <2 x bfloat> %a, %b + store <2 x bfloat> %mul, ptr addrspace(1) %out + ret void +} + +; FIXME: We can do better folding inline constant instead of a literal. + +define amdgpu_ps void @v_test_mul_v2bf16_vc(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_mul_v2bf16_vc: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_mul_bf16 v2, v2, 0.5 op_sel_hi:[1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %mul = fmul <2 x bfloat> %a, <bfloat 0.5, bfloat 0.5> + store <2 x bfloat> %mul, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_mul_v2bf16_vl(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_mul_v2bf16_vl: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_mul_bf16 v2, 0x42c83f80, v2 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %mul = fmul <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> + store <2 x bfloat> %mul, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_min_v2bf16_vv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b) { +; GCN-LABEL: v_test_min_v2bf16_vv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_min_num_bf16 v2, v2, v3 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %min = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) + store <2 x bfloat> %min, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_min_v2bf16_vs(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_min_v2bf16_vs: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_min_num_bf16 v2, v2, s0 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %min = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) + store <2 x bfloat> %min, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_min_v2bf16_ss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_min_v2bf16_ss: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_min_num_bf16 v2, s0, s1 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %min = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) + store <2 x bfloat> %min, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_min_v2bf16_vc(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_min_v2bf16_vc: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_min_num_bf16 v2, v2, 0.5 op_sel_hi:[1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %min = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> <bfloat 0.5, bfloat 0.5>) + store <2 x bfloat> %min, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_min_v2bf16_vl(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_min_v2bf16_vl: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_min_num_bf16 v2, 0x42c83f80, v2 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %min = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> <bfloat 1.0, bfloat 100.0>) + store <2 x bfloat> %min, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_max_v2bf16_vv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b) { +; GCN-LABEL: v_test_max_v2bf16_vv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_max_num_bf16 v2, v2, v3 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) + store <2 x bfloat> %max, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_max_v2bf16_vs(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_max_v2bf16_vs: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_max_num_bf16 v2, v2, s0 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) + store <2 x bfloat> %max, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_max_v2bf16_ss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_max_v2bf16_ss: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_max_num_bf16 v2, s0, s1 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) + store <2 x bfloat> %max, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_max_v2bf16_vc(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_max_v2bf16_vc: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_max_num_bf16 v2, v2, 0.5 op_sel_hi:[1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> <bfloat 0.5, bfloat 0.5>) + store <2 x bfloat> %max, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_max_v2bf16_vl(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_max_v2bf16_vl: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_max_num_bf16 v2, 0x42c83f80, v2 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> <bfloat 1.0, bfloat 100.0>) + store <2 x bfloat> %max, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_fma_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) { +; GCN-LABEL: v_test_fma_v2bf16_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %fma = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) + store <2 x bfloat> %fma, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_fma_v2bf16_vss(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) { +; GCN-LABEL: v_test_fma_v2bf16_vss: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %fma = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) + store <2 x bfloat> %fma, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_fma_v2bf16_sss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) { +; GCN-LABEL: v_test_fma_v2bf16_sss: +; GCN: ; %bb.0: +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %fma = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) + store <2 x bfloat> %fma, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_fma_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { +; GCN-LABEL: v_test_fma_v2bf16_vsc: +; GCN: ; %bb.0: +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0] +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %fma = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> <bfloat 0.5, bfloat 0.5>) + store <2 x bfloat> %fma, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_test_fma_v2bf16_vll(ptr addrspace(1) %out, <2 x bfloat> %a) { +; GCN-LABEL: v_test_fma_v2bf16_vll: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s0, 0x42c83f80 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0x43484000 +; GCN-NEXT: global_store_b32 v[0:1], v2, off +; GCN-NEXT: s_endpgm + %fma = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> %a, <2 x bfloat> <bfloat 1.0, bfloat 100.0>, <2 x bfloat> <bfloat 2.0, bfloat 200.0>) + store <2 x bfloat> %fma, ptr addrspace(1) %out + ret void +} define amdgpu_ps void @llvm_log2_bf16_v(ptr addrspace(1) %out, bfloat %src) { ; GCN-LABEL: llvm_log2_bf16_v: @@ -47,5 +426,9 @@ define amdgpu_ps void @llvm_exp2_bf16_s(ptr addrspace(1) %out, bfloat inreg %src ret void } +declare <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) +declare <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) +declare <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat>, <2 x bfloat>, <2 x bfloat>) +declare bfloat @llvm.sqrt.bf16(bfloat) declare bfloat @llvm.log2.bf16(bfloat) declare bfloat @llvm.exp2.bf16(bfloat) |