; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -enable-subreg-liveness -force-streaming -mattr=+sve2,+sme2 | FileCheck %s ; ; TBL2 ; define { , } @tbl2_b_tuple(i64 %stride, ptr %ptr, %a) { ; CHECK-LABEL: tbl2_b_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z4.b, z12.b }, pn8/z, [x1, x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.b, { z3.b, z4.b }, z0.b ; CHECK-NEXT: tbl z1.b, { z11.b, z12.b }, z0.b ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv16i8( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv16i8( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } define { , } @tbl2_h_tuple(i64 %stride, ptr %ptr, %a) { ; CHECK-LABEL: tbl2_h_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] ; CHECK-NEXT: ld1h { z4.h, z12.h }, pn8/z, [x8] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.h, { z3.h, z4.h }, z0.h ; CHECK-NEXT: tbl z1.h, { z11.h, z12.h }, z0.h ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv8i16( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv8i16( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } define { , } @tbl2_s_tuple(i64 %stride, ptr %ptr, %a) { ; CHECK-LABEL: tbl2_s_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] ; CHECK-NEXT: ld1w { z4.s, z12.s }, pn8/z, [x8] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.s, { z3.s, z4.s }, z0.s ; CHECK-NEXT: tbl z1.s, { z11.s, z12.s }, z0.s ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4i32(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4i32(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv4i32( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv4i32( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } define { , } @tbl2_d_tuple(i64 %stride, ptr %ptr, %a) { ; CHECK-LABEL: tbl2_d_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] ; CHECK-NEXT: ld1d { z4.d, z12.d }, pn8/z, [x8] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.d, { z3.d, z4.d }, z0.d ; CHECK-NEXT: tbl z1.d, { z11.d, z12.d }, z0.d ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2i64(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2i64(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv2i64( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv2i64( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } define { , } @tbl2_bf16_tuple(i64 %stride, ptr %ptr, %a) #0 { ; CHECK-LABEL: tbl2_bf16_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] ; CHECK-NEXT: ld1h { z4.h, z12.h }, pn8/z, [x8] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.h, { z3.h, z4.h }, z0.h ; CHECK-NEXT: tbl z1.h, { z11.h, z12.h }, z0.h ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8bf16(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8bf16(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv8bf16( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv8bf16( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } define { , } @tbl2_f32_tuple(i64 %stride, ptr %ptr, %a) { ; CHECK-LABEL: tbl2_f32_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] ; CHECK-NEXT: ld1w { z4.s, z12.s }, pn8/z, [x8] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.s, { z3.s, z4.s }, z0.s ; CHECK-NEXT: tbl z1.s, { z11.s, z12.s }, z0.s ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv4f32( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv4f32( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } define { , } @tbl2_f64_tuple(i64 %stride, ptr %ptr, %a) { ; CHECK-LABEL: tbl2_f64_tuple: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] ; CHECK-NEXT: ld1d { z4.d, z12.d }, pn8/z, [x8] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: tbl z2.d, { z3.d, z4.d }, z0.d ; CHECK-NEXT: tbl z1.d, { z11.d, z12.d }, z0.d ; CHECK-NEXT: ldr z12, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z11, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %0, ptr %ptr) %2 = extractvalue { , } %1, 0 %3 = extractvalue { , } %1, 1 %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %0, ptr %arrayidx2) %5 = extractvalue { , } %4, 0 %6 = extractvalue { , } %4, 1 %res1 = call @llvm.aarch64.sve.tbl2.nxv2f64( %2, %5, %a) %res2 = call @llvm.aarch64.sve.tbl2.nxv2f64( %3, %6, %a) %ins1 = insertvalue { , } poison, %res1, 0 %ins2 = insertvalue { , } %ins1, %res2, 1 ret { , } %ins2 } declare @llvm.aarch64.sve.tbl2.nxv16i8(, , ) declare @llvm.aarch64.sve.tbl2.nxv8i16(, , ) declare @llvm.aarch64.sve.tbl2.nxv4i32(, , ) declare @llvm.aarch64.sve.tbl2.nxv2i64(, , ) declare @llvm.aarch64.sve.tbl2.nxv8f16(, , ) declare @llvm.aarch64.sve.tbl2.nxv4f32(, , ) declare @llvm.aarch64.sve.tbl2.nxv2f64(, , ) declare @llvm.aarch64.sve.tbl2.nxv8bf16(, , ) ; +bf16 is required for the bfloat version. attributes #0 = { "target-features"="+sve2,+bf16" }