; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -force-streaming -enable-subreg-liveness -verify-machineinstrs < %s | FileCheck %s target triple="aarch64-linux-gnu" ; == Multi, multi (unsigned) == define void @udot_multi_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: udot_multi_za32_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @udot_multi_za32_u16_vg1x2_tuple(i64 %stride, ptr %ptr) #1 { ; CHECK-LABEL: udot_multi_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x1, x0] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z16.b, z17.b }, { z24.b, z25.b } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.udot.za32.vg1x2.nxv16i8(i32 0, %2, %5, %3, %6) ret void } define void @udot_multi_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: udot_multi_za32_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @udot_multi_za32_u16_vg1x4_tuple(i64 %stride, ptr %ptr) #1 { ; CHECK-LABEL: udot_multi_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x0, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x1, x0] ; CHECK-NEXT: add x10, x9, x0 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x1, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x1, x10] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z16.b - z19.b }, { z20.b - z23.b } ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, { z28.b - z31.b } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, %3, %8, %13, %18) call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, %5, %10, %15, %20) ret void } define void @udot_multi_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: udot_multi_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @udot_multi_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: udot_multi_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z4.b - z7.b }, { z24.b - z27.b } ; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z4.b - z7.b }, { z24.b - z27.b } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @udot_multi_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #1 { ; CHECK-LABEL: udot_multi_za64_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: udot za.d[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: udot za.d[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @udot_multi_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: udot_multi_za64_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: udot za.d[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: udot za.d[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #1 { call void @llvm.aarch64.sme.udot.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @usdot_multi_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: usdot_multi_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: usdot za.s[w8, 7, vgx2], { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.usdot.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.usdot.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @usdot_multi_za32_u16_vg1x2_tuple(i64 %stride, ptr %ptr) #1 { ; CHECK-LABEL: usdot_multi_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x1, x0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z16.b, z17.b }, { z24.b, z25.b } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.usdot.za32.vg1x2.nxv16i8(i32 0, %2, %5, %3, %6) ret void } define void @usdot_multi_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: usdot_multi_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, { z24.b - z27.b } ; CHECK-NEXT: usdot za.s[w8, 7, vgx4], { z4.b - z7.b }, { z24.b - z27.b } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.usdot.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.usdot.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @usdot_multi_za32_u16_vg1x4_tuple(i64 %stride, ptr %ptr) #1 { ; CHECK-LABEL: usdot_multi_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x0, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x1, x0] ; CHECK-NEXT: add x10, x9, x0 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x1, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x1, x10] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, { z20.b - z23.b } ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, { z28.b - z31.b } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.usdot.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, %3, %8, %13, %18) call void @llvm.aarch64.sme.usdot.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, %5, %10, %15, %20) ret void } ; == Multi, multi (signed) == define void @sdot_multi_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: sdot_multi_za32_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @sdot_multi_za32_u16_vg1x2_tuple(i64 %stride, ptr %ptr) #0 { ; CHECK-LABEL: sdot_multi_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x1, x0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z16.b, z17.b }, { z24.b, z25.b } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv16i8(i32 0, %2, %5, %3, %6) ret void } define void @sdot_multi_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: sdot_multi_za32_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @sdot_multi_za32_u16_vg1x4_tuple(i64 %stride, ptr %ptr) #0 { ; CHECK-LABEL: sdot_multi_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x0, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x1, x0] ; CHECK-NEXT: add x10, x9, x0 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x1, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x1, x10] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z16.b - z19.b }, { z20.b - z23.b } ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.b - z27.b }, { z28.b - z31.b } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, %3, %8, %13, %18) call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, %5, %10, %15, %20) ret void } define void @sdot_multi_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #0 { ; CHECK-LABEL: sdot_multi_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @sdot_multi_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: sdot_multi_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, { z24.b - z27.b } ; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z4.b - z7.b }, { z24.b - z27.b } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #0 { call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @sdot_multi_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3) #1 { ; CHECK-LABEL: sdot_multi_za64_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sdot za.d[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: sdot za.d[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3) ret void } define void @sdot_multi_za64_u16_vg1x2_tuple(i64 %stride, ptr %ptr) #1 { ; CHECK-LABEL: sdot_multi_za64_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x9, x1, x0 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1h { z16.h, z24.h }, pn8/z, [x1] ; CHECK-NEXT: ld1h { z17.h, z25.h }, pn8/z, [x9] ; CHECK-NEXT: sdot za.d[w8, 0, vgx2], { z16.h, z17.h }, { z24.h, z25.h } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.sdot.za64.vg1x2.nxv8i16(i32 0, %2, %5, %3, %6) ret void } define void @sdot_multi_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: sdot_multi_za64_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: ldr z27, [x1] ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sdot za.d[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: sdot za.d[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h } ; CHECK-NEXT: ret %zn4, %zn5, %zn6, %zn7) #1 { call void @llvm.aarch64.sme.sdot.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, %zn5, %zn6, %zn7) ret void } define void @sdot_multi_za64_u16_vg1x4_tuple(i64 %stride, ptr %ptr) #1 { ; CHECK-LABEL: sdot_multi_za64_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: add x9, x0, x0, lsl #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x10, x1, x0 ; CHECK-NEXT: ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x1] ; CHECK-NEXT: ld1h { z17.h, z21.h, z25.h, z29.h }, pn8/z, [x10] ; CHECK-NEXT: ld1h { z18.h, z22.h, z26.h, z30.h }, pn8/z, [x1, x0, lsl #1] ; CHECK-NEXT: add x9, x1, x9 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1h { z19.h, z23.h, z27.h, z31.h }, pn8/z, [x9] ; CHECK-NEXT: sdot za.d[w8, 0, vgx4], { z16.h - z19.h }, { z20.h - z23.h } ; CHECK-NEXT: sdot za.d[w8, 0, vgx4], { z24.h - z27.h }, { z28.h - z31.h } ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.sdot.za64.vg1x4.nxv8i16(i32 0, %2, %7, %12, %17, %3, %8, %13, %18) call void @llvm.aarch64.sme.sdot.za64.vg1x4.nxv8i16(i32 0, %4, %9, %14, %19, %5, %10, %15, %20) ret void } ; == Multi, single (unsigned) == define void @udot_single_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: udot_single_za32_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @udot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: udot_single_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x9, x0, x1 ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1h { z1.h, z9.h }, pn8/z, [x0] ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x9] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z1.h, z2.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z9.h, z10.h }, z0.h ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, %2, %5, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, %3, %6, %zn) ret void } define void @udot_single_za32_u16_vg1x2_x4load_x2tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: udot_single_za32_u16_vg1x2_x4load_x2tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-5 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x9, x0, x1 ; CHECK-NEXT: str z14, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z13, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1h { z1.h, z5.h, z9.h, z13.h }, pn8/z, [x0] ; CHECK-NEXT: ld1h { z2.h, z6.h, z10.h, z14.h }, pn8/z, [x9] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z1.h, z2.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z5.h, z6.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z9.h, z10.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z13.h, z14.h }, z0.h ; CHECK-NEXT: ldr z14, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z13, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #5 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, %2, %7, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, %3, %8, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, %4, %9, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, %5, %10, %zn) ret void } define void @udot_single_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: udot_single_za32_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @udot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: udot_single_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: add x9, x1, x1, lsl #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x0, x1 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x9, x0, x9 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x0] ; CHECK-NEXT: ld1h { z17.h, z21.h, z25.h, z29.h }, pn8/z, [x10] ; CHECK-NEXT: ld1h { z18.h, z22.h, z26.h, z30.h }, pn8/z, [x0, x1, lsl #1] ; CHECK-NEXT: ld1h { z19.h, z23.h, z27.h, z31.h }, pn8/z, [x9] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z16.h - z19.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.h - z23.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.h - z27.h }, z0.h ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.h - z31.h }, z0.h ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, %2, %7, %12, %17, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, %3, %8, %13, %18, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, %4, %9, %14, %19, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, %5, %10, %15, %20, %zn) ret void } define void @udot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: udot_single_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @udot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: udot_single_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @udot_single_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #1 { ; CHECK-LABEL: udot_single_za64_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.d[w8, 0, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: udot za.d[w8, 7, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.single.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.single.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @udot_single_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #1 { ; CHECK-LABEL: udot_single_za64_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.d[w8, 0, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: udot za.d[w8, 7, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.single.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.single.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @usdot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: usdot_single_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: usdot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @usdot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: usdot_single_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z1.b, z9.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0, x1] ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z1.b, z2.b }, z0.b ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z9.b, z10.b }, z0.b ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 0, %2, %5, %zn) call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 0, %3, %6, %zn) ret void } define void @usdot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: usdot_single_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: usdot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @usdot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: usdot_single_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, %zn) call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, %zn) call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, %zn) call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, %zn) ret void } ; == Multi, single (signed) == define void @sdot_single_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: sdot_single_za32_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @sdot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: sdot_single_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x9, x0, x1 ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1h { z1.h, z9.h }, pn8/z, [x0] ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x9] ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z0.h ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z9.h, z10.h }, z0.h ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 0, %2, %5, %zn) call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 0, %3, %6, %zn) ret void } define void @sdot_single_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: sdot_single_za32_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @sdot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: sdot_single_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: add x9, x1, x1, lsl #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x0, x1 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x9, x0, x9 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x0] ; CHECK-NEXT: ld1h { z17.h, z21.h, z25.h, z29.h }, pn8/z, [x10] ; CHECK-NEXT: ld1h { z18.h, z22.h, z26.h, z30.h }, pn8/z, [x0, x1, lsl #1] ; CHECK-NEXT: ld1h { z19.h, z23.h, z27.h, z31.h }, pn8/z, [x9] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z16.h - z19.h }, z0.h ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z20.h - z23.h }, z0.h ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z0.h ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z28.h - z31.h }, z0.h ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, %2, %7, %12, %17, %zn) call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, %3, %8, %13, %18, %zn) call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, %4, %9, %14, %19, %zn) call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, %5, %10, %15, %20, %zn) ret void } define void @sdot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: sdot_single_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @sdot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: sdot_single_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @sdot_single_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #1 { ; CHECK-LABEL: sdot_single_za64_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.d[w8, 0, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: sdot za.d[w8, 7, vgx2], { z1.h, z2.h }, z3.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.single.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.single.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @sdot_single_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #1 { ; CHECK-LABEL: sdot_single_za64_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.d[w8, 0, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: sdot za.d[w8, 7, vgx4], { z1.h - z4.h }, z5.h ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.single.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.single.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @sudot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: sudot_single_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: sudot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) ret void } define void @sudot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: sudot_single_za32_u16_vg1x2_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z1.b, z9.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0, x1] ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z1.b, z2.b }, z0.b ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z9.b, z10.b }, z0.b ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 0, %2, %5, %zn) call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 0, %3, %6, %zn) ret void } define void @sudot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: sudot_single_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: sudot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) ret void } define void @sudot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: sudot_single_za32_u16_vg1x4_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, %zn) call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, %zn) call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, %zn) call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, %zn) ret void } ; == Multi, indexed (unsigned) == define void @udot_lane_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: udot_lane_za32_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @udot_lane_za32_u16_vg1x4(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: udot_lane_za32_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z0.h - z3.h }, z4.h[3] ; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z0.h - z3.h }, z4.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } define void @udot_lane_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: udot_lane_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @udot_lane_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: udot_lane_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } define void @udot_form_2x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: udot_form_2x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) ret void } define void @udot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: udot_form_2x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x0, x1] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0] ; CHECK-NEXT: ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) store %scalable_arg, ptr %ptr ret void } define void @udot_form_4x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: udot_form_4x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) ret void } define void @udot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: udot_form_4x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) store %scalable_arg, ptr %ptr ret void } define void @udot_single_za32_u16_vg1x4_x2load_x4tuple(ptr %ptr, i64 %stride, %zn) #0 { ; CHECK-LABEL: udot_single_za32_u16_vg1x4_x2load_x4tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-5 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z9, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z1.b, z9.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z4.b, z12.b }, pn8/z, [x0, x10] ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z1.b - z4.b }, z0.b ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z9.b - z12.b }, z0.b ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #5 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %7 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %8 = extractvalue { , } %7, 0 %9 = extractvalue { , } %7, 1 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %10 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %11 = extractvalue { , } %10, 0 %12 = extractvalue { , } %10, 1 call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32 0, %2, %5, %8, %11, %zn) call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32 0, %3, %6, %9, %12, %zn) ret void } define void @udot_lane_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #1 { ; CHECK-LABEL: udot_lane_za64_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: udot za.d[w8, 0, vgx2], { z4.h, z5.h }, z3.h[1] ; CHECK-NEXT: udot za.d[w8, 7, vgx2], { z4.h, z5.h }, z3.h[1] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.lane.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, i32 1) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.lane.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, i32 1) ret void } define void @udot_lane_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #1 { ; CHECK-LABEL: udot_lane_za64_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: udot za.d[w8, 0, vgx4], { z24.h - z27.h }, z5.h[1] ; CHECK-NEXT: udot za.d[w8, 7, vgx4], { z24.h - z27.h }, z5.h[1] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.udot.lane.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 1) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.udot.lane.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 1) ret void } define void @usdot_lane_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: usdot_lane_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: usdot za.s[w8, 7, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @usdot_lane_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: usdot_lane_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: usdot za.s[w8, 7, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } define void @usdot_form_2x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: usdot_form_2x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1] ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) ret void } define void @usdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: usdot_form_2x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x0, x1] ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0] ; CHECK-NEXT: ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) store %scalable_arg, ptr %ptr ret void } define void @usdot_form_4x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: usdot_form_4x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) ret void } define void @usdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: usdot_form_4x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) store %scalable_arg, ptr %ptr ret void } ; == Multi, indexed (signed) == define void @sdot_lane_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: sdot_lane_za32_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @sdot_lane_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: sdot_lane_za32_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3] ; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } define void @sdot_lane_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: sdot_lane_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @sdot_lane_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: sdot_lane_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } define void @sdot_form_2x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: sdot_form_2x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1] ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) ret void } define void @sdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: sdot_form_2x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x0, x1] ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0] ; CHECK-NEXT: ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) store %scalable_arg, ptr %ptr ret void } define void @sdot_form_4x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: sdot_form_4x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) ret void } define void @sdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: sdot_form_4x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) store %scalable_arg, ptr %ptr ret void } define void @sdot_lane_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #1 { ; CHECK-LABEL: sdot_lane_za64_u16_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sdot za.d[w8, 0, vgx2], { z4.h, z5.h }, z3.h[1] ; CHECK-NEXT: sdot za.d[w8, 7, vgx2], { z4.h, z5.h }, z3.h[1] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.lane.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, i32 1) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.lane.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, i32 1) ret void } define void @sdot_lane_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #1 { ; CHECK-LABEL: sdot_lane_za64_u16_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: sdot za.d[w8, 0, vgx4], { z24.h - z27.h }, z5.h[1] ; CHECK-NEXT: sdot za.d[w8, 7, vgx4], { z24.h - z27.h }, z5.h[1] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sdot.lane.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 1) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sdot.lane.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 1) ret void } define void @sudot_lane_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { ; CHECK-LABEL: sudot_lane_za32_u8_vg1x2: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: sudot za.s[w8, 7, vgx2], { z4.b, z5.b }, z3.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, i32 3) ret void } define void @sudot_lane_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { ; CHECK-LABEL: sudot_lane_za32_u8_vg1x4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: sudot za.s[w8, 7, vgx4], { z24.b - z27.b }, z5.b[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) %slice2 = add i32 %slice, 7 call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4, i32 3) ret void } define void @sudot_form_2x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: sudot_form_2x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1] ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) ret void } define void @sudot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: sudot_form_2x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z2.b, z10.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x0, x1] ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0] ; CHECK-NEXT: ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 0, %2, %5, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 0, %3, %6, poison, i32 0) store %scalable_arg, ptr %ptr ret void } define void @sudot_form_4x_tuple(ptr %ptr, i64 %stride) #0 { ; CHECK-LABEL: sudot_form_4x_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) ret void } define void @sudot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, %scalable_arg) #0 { ; CHECK-LABEL: sudot_form_4x_tuple_svecc: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-9 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: mov w8, wzr ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: add x10, x9, x1 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0] ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1] ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9] ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0] ; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0] ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: str z0, [x0] ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , , , } %1, 0 %3 = extractvalue { , , , } %1, 1 %4 = extractvalue { , , , } %1, 2 %5 = extractvalue { , , , } %1, 3 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %7 = extractvalue { , , , } %6, 0 %8 = extractvalue { , , , } %6, 1 %9 = extractvalue { , , , } %6, 2 %10 = extractvalue { , , , } %6, 3 %mul3 = shl i64 %stride, 1 %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4) %12 = extractvalue { , , , } %11, 0 %13 = extractvalue { , , , } %11, 1 %14 = extractvalue { , , , } %11, 2 %15 = extractvalue { , , , } %11, 3 %mul5 = mul i64 %stride, 3 %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6) %17 = extractvalue { , , , } %16, 0 %18 = extractvalue { , , , } %16, 1 %19 = extractvalue { , , , } %16, 2 %20 = extractvalue { , , , } %16, 3 tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %2, %7, %12, %17, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %3, %8, %13, %18, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %4, %9, %14, %19, poison, i32 0) tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, %5, %10, %15, %20, poison, i32 0) store %scalable_arg, ptr %ptr ret void } attributes #0 = { nounwind "target-features"="+sme2" } attributes #1 = { nounwind "target-features"="+sme2,+sme-i16i64" } ; == Multi, multi (unsigned) declare void @llvm.aarch64.sme.udot.za32.vg1x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.udot.za32.vg1x4.nxv8i16(i32, , , , , , , , ) declare void @llvm.aarch64.sme.udot.za32.vg1x2.nxv16i8(i32, , , , ) declare void @llvm.aarch64.sme.udot.za32.vg1x4.nxv16i8(i32, , , , , , , , ) declare void @llvm.aarch64.sme.udot.za64.vg1x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.udot.za64.vg1x4.nxv8i16(i32, , , , , , , , ) declare void @llvm.aarch64.sme.usdot.za32.vg1x2.nxv16i8(i32, , , , ) declare void @llvm.aarch64.sme.usdot.za32.vg1x4.nxv16i8(i32, , , , , , , , ) ; == Multi, multi (signed) declare void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv8i16(i32, , , , , , , , ) declare void @llvm.aarch64.sme.sdot.za32.vg1x2.nxv16i8(i32, , , , ) declare void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv16i8(i32, , , , , , , , ) declare void @llvm.aarch64.sme.sdot.za64.vg1x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.sdot.za64.vg1x4.nxv8i16(i32, , , , , , , , ) ; == Multi, single (unsigned) declare void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32, , , ) declare void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32, , , , , ) declare void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32, , , , , ) declare void @llvm.aarch64.sme.udot.single.za64.vg1x2.nxv8i16(i32, , , ) declare void @llvm.aarch64.sme.udot.single.za64.vg1x4.nxv8i16(i32, , , , , ) declare void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32, , , , , ) ; == Multi, single (signed) declare void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32, , , ) declare void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32, , , , , ) declare void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv16i8(i32, , , , , ) declare void @llvm.aarch64.sme.sdot.single.za64.vg1x2.nxv8i16(i32, , , ) declare void @llvm.aarch64.sme.sdot.single.za64.vg1x4.nxv8i16(i32, , , , , ) declare void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32, , , , , ) ; == Multi, indexed (unsigned) declare void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv8i16(i32, , , , i32) declare void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv8i16(i32, , , , , , i32) declare void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32, , , , i32) declare void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32) declare void @llvm.aarch64.sme.udot.lane.za64.vg1x2.nxv8i16(i32, , , , i32) declare void @llvm.aarch64.sme.udot.lane.za64.vg1x4.nxv8i16(i32, , , , , , i32) declare void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32, , , , i32) declare void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32) ; == Multi, indexed (signed) declare void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv8i16(i32, , , , i32) declare void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv8i16(i32, , , , , , i32) declare void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32, , , , i32) declare void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32) declare void @llvm.aarch64.sme.sdot.lane.za64.vg1x2.nxv8i16(i32, , , , i32) declare void @llvm.aarch64.sme.sdot.lane.za64.vg1x4.nxv8i16(i32, , , , , , i32) declare void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32, , , , i32) declare void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32)