; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -force-streaming -enable-subreg-liveness -verify-machineinstrs < %s | FileCheck %s target triple="aarch64-linux-gnu" ; == Multi, multi (16-bit float) == define void @fdot_multi_za32_f16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: fdot_multi_za32_f16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @fdot_multi_za32_f16_vg1x2_tuple(i64 %stride, ptr %ptr) #0 { ; CHECK-LABEL: fdot_multi_za32_f16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x9, x1, x0 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1h { z16.h, z24.h }, pn8/z, [x1] ; CHECK-NEXT: ld1h { z17.h, z25.h }, pn8/z, [x9] ; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z16.h, z17.h }, { z24.h, z25.h } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8f16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 0, %2, %5, %3, %6) ret void } define void @fdot_multi_za32_f16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: fdot_multi_za32_f16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @fdot_multi_za32_f16_vg1x4_tuple(i64 %stride, ptr %ptr) #0 { ; CHECK-LABEL: fdot_multi_za32_f16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: add x9, x0, x0, lsl #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x10, x1, x0 ; CHECK-NEXT: ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x1] ; CHECK-NEXT: ld1h { z17.h, z21.h, z25.h, z29.h }, pn8/z, [x10] ; CHECK-NEXT: ld1h { z18.h, z22.h, z26.h, z30.h }, pn8/z, [x1, x0, lsl #1] ; CHECK-NEXT: add x9, x1, x9 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1h { z19.h, z23.h, z27.h, z31.h }, pn8/z, [x9] ; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z16.h - z19.h }, { z20.h - z23.h } ; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z24.h - z27.h }, { z28.h - z31.h } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 0, %2, %7, %12, %17, %3, %8, %13, %18) call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 0, %4, %9, %14, %19, %5, %10, %15, %20) ret void } ; == Multi, multi (16-bit bfloat) == define void @bfdot_multi_za32_bf16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: bfdot_multi_za32_bf16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @fdot_multi_za32_bf16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: fdot_multi_za32_bf16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } ; == Multi, single (16-bit float) == define void @fdot_single_za32_f16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: fdot_single_za32_f16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @fdot_single_za32_f16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: fdot_single_za32_f16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } ; == Multi, single (16-bit bfloat) == define void @bfdot_single_za32_bf16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: bfdot_single_za32_bf16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @bfdot_single_za32_bf16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: bfdot_single_za32_bf16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } ; == Multi, indexed (16-bit float) == define void @fdot_lane_za32_f16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: fdot_lane_za32_f16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @fdot_lane_za32_f16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: fdot_lane_za32_f16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3] ; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } ; == Multi, indexed (16-bit bfloat) == define void @bfdot_lane_za32_bf16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @bfdot_lane_za32_bf16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3] ; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } attributes #0 = { nounwind "target-features"="+sme2" } ; == Multi, multi (16-bit float) declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32, , , , ) declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32, , , , , , , , ) ; == Multi, multi (16-bit bfloat) declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32, , , , ) declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32, , , , , , , , ) ; == Multi, single (16-bit float) declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32, , , ) declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32, , , , , ) ; == Multi, single (16-bit bfloat) declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32, , , ) declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32, , , , , ) ; == Multi, indexed (16-bit float) declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32, , , , i32) declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32, , , , , , i32) ; == Multi, indexed (16-bit bfloat) declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32, , , , i32) declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32, , , , , , i32)