diff options
author | Andrzej Warzynski <andrzej.warzynski@arm.com> | 2020-03-19 09:35:31 +0000 |
---|---|---|
committer | Andrzej Warzynski <andrzej.warzynski@arm.com> | 2020-03-19 12:53:36 +0000 |
commit | 0ea4fb5bb77ba3da165acaee54bb77379b137bb0 (patch) | |
tree | e9c8db8fbfedaed001865e5673f9650dec74b410 /llvm | |
parent | 4a58996dd2d226063bc9703c4722e06a5d041b23 (diff) | |
download | llvm-0ea4fb5bb77ba3da165acaee54bb77379b137bb0.zip llvm-0ea4fb5bb77ba3da165acaee54bb77379b137bb0.tar.gz llvm-0ea4fb5bb77ba3da165acaee54bb77379b137bb0.tar.bz2 |
[AArch64][SVE] Rename intrinsics for gather prefetch [NFC]
Summary:
In order to keep the names consistent with other SVE gather loads, the
intrinsics for gather prefetch are renamed as follows:
* @llvm.aarch64.sve.gather.prfb -> @llvm.aarch64.sve.prfb.gather
Reviewed by: fpetrogalli
Differential Revision: https://reviews.llvm.org/D76421
Diffstat (limited to 'llvm')
6 files changed, 271 insertions, 271 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index a6de3f9..50f26a7 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1302,29 +1302,29 @@ def int_aarch64_sve_prf // Scalar + 32-bit scaled offset vector, zero extend, packed and // unpacked. -def int_aarch64_sve_gather_prfb_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfh_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfw_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfd_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfb_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfh_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfw_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfd_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; // Scalar + 32-bit scaled offset vector, sign extend, packed and // unpacked. -def int_aarch64_sve_gather_prfb_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfw_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfh_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfd_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfb_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfw_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfh_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfd_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; // Scalar + 64-bit scaled offset vector. -def int_aarch64_sve_gather_prfb_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfh_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfw_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_gather_prfd_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfb_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfh_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfw_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfd_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; // Vector + scalar. -def int_aarch64_sve_gather_prfb : SVE_gather_prf_vector_base_scalar_offset; -def int_aarch64_sve_gather_prfh : SVE_gather_prf_vector_base_scalar_offset; -def int_aarch64_sve_gather_prfw : SVE_gather_prf_vector_base_scalar_offset; -def int_aarch64_sve_gather_prfd : SVE_gather_prf_vector_base_scalar_offset; +def int_aarch64_sve_prfb_gather : SVE_gather_prf_vector_base_scalar_offset; +def int_aarch64_sve_prfh_gather : SVE_gather_prf_vector_base_scalar_offset; +def int_aarch64_sve_prfw_gather : SVE_gather_prf_vector_base_scalar_offset; +def int_aarch64_sve_prfd_gather : SVE_gather_prf_vector_base_scalar_offset; // // Scalar to vector operations diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 7b189da..ab9176a 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -12995,9 +12995,9 @@ static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) { return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); } -/// Combines a node carrying the intrinsic `aarch64_sve_gather_prf<T>` into a -/// node that uses `aarch64_sve_gather_prf<T>_scaled_uxtw` when the scalar -/// offset passed to `aarch64_sve_gather_prf<T>` is not a valid immediate for +/// Combines a node carrying the intrinsic `aarch64_sve_prf_gather<T>` into a +/// node that uses `aarch64_sve_prf_gather<T>_scaled_uxtw` when the scalar +/// offset passed to `aarch64_sve_prf_gather<T>` is not a valid immediate for /// the sve gather prefetch instruction with vector plus immediate addressing /// mode. static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, @@ -13011,8 +13011,8 @@ static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, // ...otherwise swap the offset base with the offset... SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end()); std::swap(Ops[ImmPos], Ops[OffsetPos]); - // ...and remap the intrinsic `aarch64_sve_gather_prf<T>` to - // `aarch64_sve_gather_prf<T>_scaled_uxtw`. + // ...and remap the intrinsic `aarch64_sve_prf_gather<T>` to + // `aarch64_sve_prf_gather<T>_scaled_uxtw`. SDLoc DL(N); Ops[1] = DAG.getConstant(NewIID, DL, MVT::i64); @@ -13083,30 +13083,30 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { - case Intrinsic::aarch64_sve_gather_prfb: + case Intrinsic::aarch64_sve_prfb_gather: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_gather_prfb_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw, 1 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_gather_prfh: + case Intrinsic::aarch64_sve_prfh_gather: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_gather_prfh_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw, 2 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_gather_prfw: + case Intrinsic::aarch64_sve_prfw_gather: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_gather_prfw_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw, 4 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_gather_prfd: + case Intrinsic::aarch64_sve_prfd_gather: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_gather_prfd_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw, 8 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_gather_prfb_scaled_uxtw: - case Intrinsic::aarch64_sve_gather_prfb_scaled_sxtw: - case Intrinsic::aarch64_sve_gather_prfh_scaled_uxtw: - case Intrinsic::aarch64_sve_gather_prfh_scaled_sxtw: - case Intrinsic::aarch64_sve_gather_prfw_scaled_uxtw: - case Intrinsic::aarch64_sve_gather_prfw_scaled_sxtw: - case Intrinsic::aarch64_sve_gather_prfd_scaled_uxtw: - case Intrinsic::aarch64_sve_gather_prfd_scaled_sxtw: + case Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw: + case Intrinsic::aarch64_sve_prfb_gather_scaled_sxtw: + case Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw: + case Intrinsic::aarch64_sve_prfh_gather_scaled_sxtw: + case Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw: + case Intrinsic::aarch64_sve_prfw_gather_scaled_sxtw: + case Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw: + case Intrinsic::aarch64_sve_prfd_gather_scaled_sxtw: return legalizeSVEGatherPrefetchOffsVec(N, DAG); case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index d3a541d..f3b3405 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -880,37 +880,37 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio // Gather prefetch using scaled 32-bit offsets, e.g. // prfh pldl1keep, p0, [x0, z0.s, uxtw #1] - defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, int_aarch64_sve_gather_prfb_scaled_sxtw, int_aarch64_sve_gather_prfb_scaled_uxtw>; - defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16, int_aarch64_sve_gather_prfh_scaled_sxtw, int_aarch64_sve_gather_prfh_scaled_uxtw>; - defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32, int_aarch64_sve_gather_prfw_scaled_sxtw, int_aarch64_sve_gather_prfw_scaled_uxtw>; - defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64, int_aarch64_sve_gather_prfd_scaled_sxtw, int_aarch64_sve_gather_prfd_scaled_uxtw>; + defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>; + defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16, int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>; + defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32, int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>; + defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64, int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>; // Gather prefetch using unpacked, scaled 32-bit offsets, e.g. // prfh pldl1keep, p0, [x0, z0.d, uxtw #1] - defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_gather_prfb_scaled_sxtw, int_aarch64_sve_gather_prfb_scaled_uxtw>; - defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16, int_aarch64_sve_gather_prfh_scaled_sxtw, int_aarch64_sve_gather_prfh_scaled_uxtw>; - defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32, int_aarch64_sve_gather_prfw_scaled_sxtw, int_aarch64_sve_gather_prfw_scaled_uxtw>; - defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64, int_aarch64_sve_gather_prfd_scaled_sxtw, int_aarch64_sve_gather_prfd_scaled_uxtw>; + defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>; + defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16, int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>; + defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32, int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>; + defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64, int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>; // Gather prefetch using scaled 64-bit offsets, e.g. // prfh pldl1keep, p0, [x0, z0.d, lsl #1] - defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8, int_aarch64_sve_gather_prfb_scaled>; - defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_gather_prfh_scaled>; - defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_gather_prfw_scaled>; - defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_gather_prfd_scaled>; + defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8, int_aarch64_sve_prfb_gather_scaled>; + defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_prfh_gather_scaled>; + defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_prfw_gather_scaled>; + defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_prfd_gather_scaled>; // Gather prefetch using 32/64-bit pointers with offset, e.g. // prfh pldl1keep, p0, [z0.s, #16] // prfh pldl1keep, p0, [z0.d, #16] - defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_gather_prfb>; - defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_gather_prfh>; - defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_gather_prfw>; - defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_gather_prfd>; - - defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_gather_prfb>; - defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_gather_prfh>; - defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_gather_prfw>; - defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_gather_prfd>; + defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>; + defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>; + defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>; + defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>; + + defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>; + defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>; + defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>; + defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>; defm ADR_SXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_sxtw<0b00, "adr">; defm ADR_UXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_uxtw<0b01, "adr">; diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll index 7825170..2de5668 100644 --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll @@ -1,200 +1,200 @@ ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset -define void @llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx4vi32: +define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx4vi32: +define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, sxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx2vi64: +define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx2vi64: +define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, sxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_gather_prfb_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_nx2vi64: +define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset -define void @llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx4vi32: +define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx4vi32: +define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, sxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx2vi64: +define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx2vi64: +define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, sxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_gather_prfh_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_nx2vi64: +define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, lsl #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset -define void @llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx4vi32: +define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx4vi32: +define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, sxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx2vi64: +define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx2vi64: +define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, sxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_gather_prfw_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_nx2vi64: +define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, lsl #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset -define void @llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx4vi32: +define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx4vi32: +define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, sxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) ret void } ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx2vi64: +define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx2vi64: +define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, sxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) ret void } ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_gather_prfd_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_nx2vi64: +define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, lsl #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) - ret void - } - -declare void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfb.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) + call void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) + ret void + } + +declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll index 481302c..8be10be 100644 --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll @@ -1,82 +1,82 @@ ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element -define void @llvm_aarch64_sve_gather_prfb_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32: +define void @llvm_aarch64_sve_prfb_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32: ; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1) ret void } ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element -define void @llvm_aarch64_sve_gather_prfb_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64: +define void @llvm_aarch64_sve_prfb_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64: ; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1) ret void } ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element -define void @llvm_aarch64_sve_gather_prfh_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32: +define void @llvm_aarch64_sve_prfh_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32: ; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1) ret void } ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element -define void @llvm_aarch64_sve_gather_prfh_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64: +define void @llvm_aarch64_sve_prfh_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64: ; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1) ret void } ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element -define void @llvm_aarch64_sve_gather_prfw_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32: +define void @llvm_aarch64_sve_prfw_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32: ; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1) ret void } ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element -define void @llvm_aarch64_sve_gather_prfw_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64: +define void @llvm_aarch64_sve_prfw_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64: ; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1) ret void } ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element -define void @llvm_aarch64_sve_gather_prfd_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32: +define void @llvm_aarch64_sve_prfd_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32: ; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1) ret void } ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element -define void @llvm_aarch64_sve_gather_prfd_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64: +define void @llvm_aarch64_sve_prfd_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64: ; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1) ret void } -declare void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll index 4b0b42e..ca027ed 100644 --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll @@ -1,286 +1,286 @@ ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 1, ..., 31 -define void @llvm_aarch64_sve_gather_prfb_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #32 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) ret void } ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 1, ..., 31 -define void @llvm_aarch64_sve_gather_prfb_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #32 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 2, ..., 62 -define void @llvm_aarch64_sve_gather_prfh_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #63 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2: +define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) ret void } ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 2, ..., 62 -define void @llvm_aarch64_sve_gather_prfh_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #63 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2: +define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 4, ..., 124 -define void @llvm_aarch64_sve_gather_prfw_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4: +define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) ret void } ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 124 -define void @llvm_aarch64_sve_gather_prfw_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4: +define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 8, ..., 248 -define void @llvm_aarch64_sve_gather_prfd_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8: +define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) ret void } ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 248 -define void @llvm_aarch64_sve_gather_prfd_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8: +define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) ret void } -declare void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) |