aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Mayer <fmayer@google.com>2024-08-07 14:00:51 -0700
committerFlorian Mayer <fmayer@google.com>2024-08-07 14:00:51 -0700
commit10fbc246b146dbdf6b7cd9083bf392505c534fbc (patch)
tree898a2504a8bc941166603c77021937fd995e79ce
parent191d02015fba3458bdf6381ee93c32e485daf25e (diff)
parenta05fa131db58f2d66a1f9e68fea74068f9218c2b (diff)
downloadllvm-users/fmayer/spr/compiler-rt-ubsan-leave-bufferedstacktrace-uninit.zip
llvm-users/fmayer/spr/compiler-rt-ubsan-leave-bufferedstacktrace-uninit.tar.gz
llvm-users/fmayer/spr/compiler-rt-ubsan-leave-bufferedstacktrace-uninit.tar.bz2
Created using spr 1.3.4
-rw-r--r--bolt/CMakeLists.txt2
-rw-r--r--bolt/include/bolt/Core/BinaryContext.h3
-rw-r--r--bolt/lib/Core/BinaryContext.cpp23
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp5
-rw-r--r--bolt/lib/Rewrite/MachORewriteInstance.cpp1
-rw-r--r--bolt/lib/Rewrite/RewriteInstance.cpp73
-rw-r--r--bolt/test/AArch64/Inputs/build_id.ldscript9
-rw-r--r--bolt/test/AArch64/Inputs/got_end_of_section_symbol.lld_script6
-rw-r--r--bolt/test/AArch64/build_id.c25
-rw-r--r--bolt/test/AArch64/got_end_of_section_symbol.s28
-rw-r--r--bolt/test/AArch64/update-weak-reference-symbol.s34
-rw-r--r--bolt/test/X86/Inputs/build_id.yaml326
-rw-r--r--bolt/test/X86/build_id.test8
-rw-r--r--bolt/test/X86/dynamic-relocs-on-entry.s32
-rw-r--r--bolt/test/X86/section-end-sym.s4
-rw-r--r--bolt/unittests/Core/BinaryContext.cpp1
-rw-r--r--bolt/unittests/Core/MCPlusBuilder.cpp1
-rw-r--r--clang-tools-extra/clang-tidy/tool/CMakeLists.txt6
-rw-r--r--clang/cmake/modules/AddClang.cmake2
-rw-r--r--clang/docs/CommandGuide/clang.rst4
-rw-r--r--clang/docs/OpenMPSupport.rst2
-rw-r--r--clang/docs/ReleaseNotes.rst15
-rw-r--r--clang/docs/StandardCPlusPlusModules.rst17
-rw-r--r--clang/docs/UsersManual.rst6
-rw-r--r--clang/docs/tools/clang-formatted-files.txt1
-rw-r--r--clang/include/clang/AST/ExprCXX.h7
-rw-r--r--clang/include/clang/AST/OpenMPClause.h79
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h2
-rw-r--r--clang/include/clang/Basic/AttributeCommonInfo.h6
-rw-r--r--clang/include/clang/Basic/BuiltinsPPC.def10
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.def38
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td2
-rw-r--r--clang/include/clang/Basic/Features.def3
-rw-r--r--clang/include/clang/Basic/LangOptions.def5
-rw-r--r--clang/include/clang/Basic/PointerAuthOptions.h7
-rw-r--r--clang/include/clang/Basic/arm_sve.td34
-rw-r--r--clang/include/clang/Basic/riscv_vector.td78
-rw-r--r--clang/include/clang/Basic/riscv_vector_common.td4
-rw-r--r--clang/include/clang/Driver/Options.td21
-rw-r--r--clang/include/clang/Lex/Preprocessor.h4
-rw-r--r--clang/include/clang/Sema/Sema.h9
-rw-r--r--clang/include/clang/Sema/SemaOpenMP.h3
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h10
-rw-r--r--clang/include/clang/Support/RISCVVIntrinsicUtils.h3
-rw-r--r--clang/lib/AST/ASTImporter.cpp17
-rw-r--r--clang/lib/AST/ExprCXX.cpp19
-rw-r--r--clang/lib/AST/ExprConstant.cpp110
-rw-r--r--clang/lib/AST/Interp/Compiler.cpp25
-rw-r--r--clang/lib/AST/Interp/Interp.cpp7
-rw-r--r--clang/lib/AST/Interp/Interp.h11
-rw-r--r--clang/lib/AST/Interp/Pointer.cpp27
-rw-r--r--clang/lib/AST/Interp/Pointer.h8
-rw-r--r--clang/lib/AST/Interp/State.h3
-rw-r--r--clang/lib/AST/OpenMPClause.cpp26
-rw-r--r--clang/lib/AST/StmtProfile.cpp3
-rw-r--r--clang/lib/Basic/Attributes.cpp34
-rw-r--r--clang/lib/Basic/CMakeLists.txt1
-rw-r--r--clang/lib/Basic/IdentifierTable.cpp86
-rw-r--r--clang/lib/Basic/Targets.cpp12
-rw-r--r--clang/lib/Basic/Targets/Le64.cpp30
-rw-r--r--clang/lib/Basic/Targets/Le64.h64
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h3
-rw-r--r--clang/lib/Basic/Targets/PPC.cpp3
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp7
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp2
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp19
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp66
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp7
-rw-r--r--clang/lib/Driver/ToolChain.cpp3
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp13
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp37
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp18
-rw-r--r--clang/lib/Headers/CMakeLists.txt2
-rw-r--r--clang/lib/Headers/avx10_2_512satcvtintrin.h301
-rw-r--r--clang/lib/Headers/avx10_2satcvtintrin.h444
-rw-r--r--clang/lib/Headers/immintrin.h2
-rw-r--r--clang/lib/Headers/ptrauth.h6
-rw-r--r--clang/lib/Parse/ParseDecl.cpp71
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp8
-rw-r--r--clang/lib/Sema/SemaBoundsSafety.cpp8
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp3
-rw-r--r--clang/lib/Sema/SemaDecl.cpp9
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp3
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp2
-rw-r--r--clang/lib/Sema/SemaExpr.cpp4
-rw-r--r--clang/lib/Sema/SemaExprMember.cpp3
-rw-r--r--clang/lib/Sema/SemaLambda.cpp4
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp97
-rw-r--r--clang/lib/Sema/SemaOverload.cpp6
-rw-r--r--clang/lib/Sema/SemaPPC.cpp3
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp6
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp76
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp20
-rw-r--r--clang/lib/Sema/SemaX86.cpp16
-rw-r--r--clang/lib/Sema/TreeTransform.h70
-rw-r--r--clang/lib/Serialization/ASTReader.cpp12
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp4
-rw-r--r--clang/test/AST/Interp/arrays.cpp2
-rw-r--r--clang/test/AST/Interp/builtin-functions.cpp26
-rw-r--r--clang/test/AST/Interp/c.c7
-rw-r--r--clang/test/AST/Interp/cxx20.cpp17
-rw-r--r--clang/test/AST/Interp/objc.mm25
-rw-r--r--clang/test/AST/Interp/vectors.cpp60
-rw-r--r--clang/test/CXX/temp/temp.spec/temp.expl.spec/p14-23.cpp116
-rw-r--r--clang/test/CodeGen/PowerPC/builtins-bcd-assist.c58
-rw-r--r--clang/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.c75
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c61
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c242
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c242
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c242
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c62
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c242
-rwxr-xr-xclang/test/CodeGen/X86/avx10_2_512satcvt-builtins-error.c198
-rwxr-xr-xclang/test/CodeGen/X86/avx10_2_512satcvt-builtins.c379
-rw-r--r--clang/test/CodeGen/X86/avx10_2satcvt-builtins.c603
-rw-r--r--clang/test/CodeGen/aarch64-elf-pauthabi.c12
-rw-r--r--clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_clamp.c10
-rw-r--r--clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_max.c10
-rw-r--r--clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_maxnm.c10
-rw-r--r--clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_min.c10
-rw-r--r--clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_minnm.c10
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfadd.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfclamp.c10
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmax.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmaxnm.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmin.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfminnm.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla_lane.c10
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls_lane.c10
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul.c12
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul_lane.c10
-rw-r--r--clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfsub.c12
-rw-r--r--clang/test/CodeGen/bitfield-access-pad.c1
-rw-r--r--clang/test/CodeGen/bitfield-access-unit.c4
-rw-r--r--clang/test/CodeGen/ptrauth-init-fini.c39
-rw-r--r--clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-1.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-2.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/diamond-inheritance.cpp6
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/diamond-virtual-inheritance.cpp10
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/inheritted-virtual-function.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/inline-virtual-function.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/multiple-inheritance.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/no-alias-when-dso-local.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/override-pure-virtual-method.cpp4
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/overriden-virtual-function.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-flag.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-hwasan.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp2
-rw-r--r--clang/test/CodeGenCXX/bitfield-access-empty.cpp1
-rw-r--r--clang/test/CodeGenCXX/bitfield-access-tail.cpp1
-rw-r--r--clang/test/CodeGenCXX/temporaries.cpp41
-rw-r--r--clang/test/Driver/Inputs/DriverKit23.0.sdk/SDKSettings.json1
-rw-r--r--clang/test/Driver/Inputs/MacOSX15.0.sdk/SDKSettings.json (renamed from clang/test/Driver/Inputs/MacOSX99.0.sdk/SDKSettings.json)0
-rw-r--r--clang/test/Driver/aarch64-ptrauth.c10
-rw-r--r--clang/test/Driver/cl-cxx20-modules.cppm8
-rw-r--r--clang/test/Driver/darwin-builtin-modules.c5
-rw-r--r--clang/test/Driver/fsanitize.c2
-rw-r--r--[-rwxr-xr-x]clang/test/Driver/ftime-trace-sections.py11
-rw-r--r--clang/test/Driver/linker-wrapper-passes.c75
-rw-r--r--clang/test/Driver/print-enabled-extensions/aarch64-apple-a15.c1
-rw-r--r--clang/test/Driver/print-enabled-extensions/aarch64-apple-a16.c1
-rw-r--r--clang/test/Driver/print-enabled-extensions/aarch64-apple-a17.c1
-rw-r--r--clang/test/Driver/print-enabled-extensions/aarch64-apple-m4.c1
-rw-r--r--clang/test/Driver/print-supported-extensions-aarch64.c3
-rw-r--r--clang/test/Driver/riscv-cpus.c48
-rw-r--r--clang/test/Misc/target-invalid-cpu-note.c5
-rw-r--r--clang/test/Modules/crash-vfs-include-pch.m2
-rw-r--r--clang/test/OpenMP/target_teams_ast_print.cpp4
-rw-r--r--clang/test/OpenMP/target_teams_distribute_num_teams_messages.cpp12
-rw-r--r--clang/test/OpenMP/target_teams_distribute_parallel_for_num_teams_messages.cpp5
-rw-r--r--clang/test/OpenMP/teams_num_teams_messages.cpp7
-rw-r--r--clang/test/Preprocessor/predefined-macros-no-warnings.c2
-rw-r--r--clang/test/Preprocessor/ptrauth_feature.c29
-rw-r--r--clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_b16b16.cpp41
-rw-r--r--clang/test/Sema/aarch64-sve2p1-intrinsics/acle_sve2p1_b16b16.cpp56
-rw-r--r--clang/test/SemaCXX/constexpr-vectors-access-elements.cpp46
-rw-r--r--clang/test/SemaCXX/decltype.cpp25
-rw-r--r--clang/test/SemaCXX/fold_lambda_with_variadics.cpp181
-rw-r--r--clang/test/SemaHLSL/BuiltIns/length-errors.hlsl3
-rw-r--r--clang/test/SemaTemplate/address_space-dependent.cpp13
-rw-r--r--clang/test/TableGen/attrs-parser-string-switches.td132
-rw-r--r--clang/test/lit.cfg.py12
-rw-r--r--clang/test/lit.site.cfg.py.in4
-rw-r--r--clang/tools/clang-linker-wrapper/CMakeLists.txt3
-rw-r--r--clang/tools/clang-repl/CMakeLists.txt4
-rw-r--r--clang/tools/driver/CMakeLists.txt7
-rw-r--r--clang/tools/libclang/CIndex.cpp2
-rw-r--r--clang/unittests/AST/ASTImporterTest.cpp36
-rw-r--r--clang/unittests/Interpreter/CMakeLists.txt4
-rw-r--r--clang/unittests/Interpreter/ExceptionTests/CMakeLists.txt4
-rw-r--r--clang/utils/TableGen/ClangAttrEmitter.cpp274
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp1
-rw-r--r--compiler-rt/lib/asan/asan_errors.cpp2
-rw-r--r--compiler-rt/lib/builtins/crtbegin.c36
-rw-r--r--compiler-rt/lib/scudo/standalone/CMakeLists.txt1
-rw-r--r--compiler-rt/lib/scudo/standalone/benchmarks/CMakeLists.txt33
-rw-r--r--compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp105
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map_base.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/release.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h177
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/common_test.cpp4
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/map_test.cpp6
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp2
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/strings_test.cpp2
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/vector_test.cpp2
-rw-r--r--compiler-rt/lib/scudo/standalone/vector.h3
-rw-r--r--flang/include/flang/Runtime/CUDA/allocator.h7
-rw-r--r--flang/include/flang/Runtime/CUDA/descriptor.h30
-rw-r--r--flang/include/flang/Runtime/allocator-registry.h3
-rw-r--r--flang/lib/Lower/ConvertVariable.cpp5
-rw-r--r--flang/lib/Lower/DirectivesCommon.h77
-rw-r--r--flang/lib/Lower/OpenACC.cpp49
-rw-r--r--flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp6
-rw-r--r--flang/lib/Optimizer/Transforms/CufOpConversion.cpp99
-rw-r--r--flang/runtime/CUDA/CMakeLists.txt1
-rw-r--r--flang/runtime/CUDA/allocator.cpp16
-rw-r--r--flang/runtime/CUDA/descriptor.cpp28
-rw-r--r--flang/runtime/copy.cpp49
-rw-r--r--flang/test/Fir/CUDA/cuda-allocate.fir11
-rw-r--r--flang/test/Fir/box-typecode.fir5
-rw-r--r--flang/test/Fir/convert-to-llvm.fir15
-rw-r--r--flang/test/Fir/polymorphic.fir10
-rw-r--r--flang/test/Fir/tbaa.fir20
-rw-r--r--flang/test/Lower/OpenACC/acc-bounds.f9011
-rw-r--r--flang/test/Lower/allocatable-polymorphic.f9020
-rw-r--r--flang/tools/flang-driver/CMakeLists.txt16
-rw-r--r--flang/unittests/Runtime/CUDA/AllocatorCUF.cpp18
-rw-r--r--libc/cmake/modules/LLVMLibCTestRules.cmake2
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt5
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt5
-rw-r--r--libc/config/darwin/arm/entrypoints.txt6
-rw-r--r--libc/config/darwin/x86_64/entrypoints.txt6
-rw-r--r--libc/config/gpu/entrypoints.txt2
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt16
-rw-r--r--libc/config/linux/api.td1
-rw-r--r--libc/config/linux/arm/entrypoints.txt7
-rw-r--r--libc/config/linux/riscv/entrypoints.txt16
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt13
-rw-r--r--libc/config/windows/entrypoints.txt9
-rw-r--r--libc/docs/dev/undefined_behavior.rst8
-rw-r--r--libc/docs/math/index.rst16
-rw-r--r--libc/docs/overlay_mode.rst2
-rw-r--r--libc/include/CMakeLists.txt1
-rw-r--r--libc/include/llvm-libc-macros/float-macros.h12
-rw-r--r--libc/include/llvm-libc-macros/limits-macros.h10
-rw-r--r--libc/include/llvm-libc-types/CMakeLists.txt1
-rw-r--r--libc/include/llvm-libc-types/pthread_spinlock_t.h17
-rw-r--r--libc/newhdrgen/yaml/math.yaml102
-rw-r--r--libc/newhdrgen/yaml/pthread.yaml27
-rw-r--r--[-rwxr-xr-x]libc/newhdrgen/yaml_to_classes.py0
-rw-r--r--libc/spec/gnu_ext.td6
-rw-r--r--libc/spec/llvm_libc_ext.td8
-rw-r--r--libc/spec/posix.td29
-rw-r--r--libc/spec/stdc.td17
-rw-r--r--libc/src/__support/threads/spin_lock.h36
-rw-r--r--libc/src/math/CMakeLists.txt16
-rw-r--r--libc/src/math/exp10f16.h21
-rw-r--r--libc/src/math/exp2f16.h21
-rw-r--r--libc/src/math/fdiv.h20
-rw-r--r--libc/src/math/fdivf128.h21
-rw-r--r--libc/src/math/fdivl.h20
-rw-r--r--libc/src/math/ffma.h20
-rw-r--r--libc/src/math/ffmaf128.h21
-rw-r--r--libc/src/math/ffmal.h20
-rw-r--r--libc/src/math/fsub.h20
-rw-r--r--libc/src/math/fsubf128.h21
-rw-r--r--libc/src/math/fsubl.h20
-rw-r--r--libc/src/math/generic/CMakeLists.txt191
-rw-r--r--libc/src/math/generic/exp10f16.cpp170
-rw-r--r--libc/src/math/generic/exp2f16.cpp127
-rw-r--r--libc/src/math/generic/expxf16.h28
-rw-r--r--libc/src/math/generic/fdiv.cpp20
-rw-r--r--libc/src/math/generic/fdivf128.cpp20
-rw-r--r--libc/src/math/generic/fdivl.cpp20
-rw-r--r--libc/src/math/generic/ffma.cpp20
-rw-r--r--libc/src/math/generic/ffmaf128.cpp20
-rw-r--r--libc/src/math/generic/ffmal.cpp21
-rw-r--r--libc/src/math/generic/fsub.cpp20
-rw-r--r--libc/src/math/generic/fsubf128.cpp20
-rw-r--r--libc/src/math/generic/fsubl.cpp20
-rw-r--r--libc/src/math/generic/getpayloadl.cpp20
-rw-r--r--libc/src/math/generic/pow.cpp68
-rw-r--r--libc/src/math/generic/remainderf128.cpp21
-rw-r--r--libc/src/math/getpayloadl.h20
-rw-r--r--libc/src/math/remainderf128.h21
-rw-r--r--libc/src/pthread/CMakeLists.txt65
-rw-r--r--libc/src/pthread/pthread_spin_destroy.cpp47
-rw-r--r--libc/src/pthread/pthread_spin_destroy.h21
-rw-r--r--libc/src/pthread/pthread_spin_init.cpp37
-rw-r--r--libc/src/pthread/pthread_spin_init.h21
-rw-r--r--libc/src/pthread/pthread_spin_lock.cpp47
-rw-r--r--libc/src/pthread/pthread_spin_lock.h21
-rw-r--r--libc/src/pthread/pthread_spin_trylock.cpp41
-rw-r--r--libc/src/pthread/pthread_spin_trylock.h22
-rw-r--r--libc/src/pthread/pthread_spin_unlock.cpp44
-rw-r--r--libc/src/pthread/pthread_spin_unlock.h21
-rw-r--r--libc/src/time/mktime.cpp18
-rw-r--r--libc/src/time/time_utils.h5
-rw-r--r--libc/test/integration/src/pthread/CMakeLists.txt22
-rw-r--r--libc/test/integration/src/pthread/pthread_rwlock_test.cpp4
-rw-r--r--libc/test/integration/src/pthread/pthread_spinlock_test.cpp145
-rw-r--r--libc/test/src/math/CMakeLists.txt145
-rw-r--r--libc/test/src/math/exp10f16_test.cpp40
-rw-r--r--libc/test/src/math/exp2f16_test.cpp40
-rw-r--r--libc/test/src/math/fdiv_test.cpp13
-rw-r--r--libc/test/src/math/fdivl_test.cpp13
-rw-r--r--libc/test/src/math/ffma_test.cpp13
-rw-r--r--libc/test/src/math/ffmal_test.cpp13
-rw-r--r--libc/test/src/math/fsub_test.cpp13
-rw-r--r--libc/test/src/math/fsubl_test.cpp13
-rw-r--r--libc/test/src/math/performance_testing/CMakeLists.txt22
-rw-r--r--libc/test/src/math/performance_testing/exp10f16_perf.cpp22
-rw-r--r--libc/test/src/math/performance_testing/exp2f16_perf.cpp22
-rw-r--r--libc/test/src/math/pow_test.cpp14
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt200
-rw-r--r--libc/test/src/math/smoke/DivTest.h2
-rw-r--r--libc/test/src/math/smoke/GetPayloadTest.h4
-rw-r--r--libc/test/src/math/smoke/SetPayloadTest.h17
-rw-r--r--libc/test/src/math/smoke/exp10f16_test.cpp65
-rw-r--r--libc/test/src/math/smoke/exp2f16_test.cpp65
-rw-r--r--libc/test/src/math/smoke/fdiv_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fdivf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fdivl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ffma_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ffmaf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ffmal_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fsub_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fsubf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fsubl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/getpayloadl_test.cpp13
-rw-r--r--libc/test/src/time/mktime_test.cpp125
-rw-r--r--libc/utils/MPFRWrapper/MPFRUtils.cpp17
-rw-r--r--libc/utils/gpu/loader/Loader.h2
-rw-r--r--libc/utils/gpu/loader/amdgpu/amdhsa-loader.cpp2
-rw-r--r--libc/utils/gpu/loader/nvptx/nvptx-loader.cpp2
-rw-r--r--libc/utils/gpu/server/CMakeLists.txt2
-rw-r--r--libc/utils/gpu/server/rpc_server.cpp3
-rw-r--r--libcxx/docs/Status/Cxx20Issues.csv2
-rw-r--r--libcxx/include/fstream43
-rw-r--r--libcxx/include/ios11
-rw-r--r--libcxx/include/sstream32
-rw-r--r--libcxx/include/string10
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/default.pass.cpp8
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/move.pass.cpp26
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/path.pass.cpp24
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/pointer.pass.cpp24
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/string.pass.cpp24
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/default.pass.cpp8
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/move.pass.cpp16
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/path.pass.cpp13
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/pointer.pass.cpp13
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/string.pass.cpp13
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/default.pass.cpp8
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/move.pass.cpp28
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/path.pass.cpp69
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp80
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp79
-rw-r--r--libcxx/test/std/input.output/iostreams.base/ios/basic.ios.members/copyfmt.pass.cpp9
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/default.pass.cpp34
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/mode.alloc.pass.cpp15
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/move.pass.cpp27
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string-alloc.mode.pass.cpp13
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.alloc.pass.cpp14
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.mode.alloc.pass.cpp14
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.move.mode.pass.cpp14
-rw-r--r--libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.pass.cpp56
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/default.pass.cpp34
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/mode.alloc.pass.cpp15
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/move.pass.cpp27
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string-alloc.mode.pass.cpp13
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.alloc.pass.cpp14
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.mode.alloc.pass.cpp16
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.move.mode.pass.cpp25
-rw-r--r--libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.pass.cpp48
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/default.pass.cpp21
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/mode.alloc.pass.cpp16
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/move.pass.cpp35
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string-alloc.mode.pass.cpp13
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.alloc.pass.cpp14
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.mode.alloc.pass.cpp15
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.move.mode.pass.cpp17
-rw-r--r--libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.pass.cpp32
-rw-r--r--libcxx/test/std/numerics/c.math/hermite.pass.cpp43
-rw-r--r--libcxx/test/std/strings/basic.string/string.cons/move_alloc.pass.cpp13
-rw-r--r--libcxx/test/std/strings/basic.string/string.cons/substr_rvalue.pass.cpp3
-rw-r--r--libcxx/test/std/strings/basic.string/string.modifiers/string_swap/swap.pass.cpp2
-rw-r--r--libcxx/test/std/time/time.cal/time.cal.ymdlast/time.cal.ymdlast.nonmembers/comparisons.pass.cpp4
-rw-r--r--libcxx/test/support/operator_hijacker.h18
-rw-r--r--lld/ELF/Arch/ARM.cpp21
-rw-r--r--lld/ELF/Config.h3
-rw-r--r--lld/ELF/InputFiles.cpp6
-rw-r--r--lld/ELF/Writer.cpp10
-rw-r--r--lld/MachO/Driver.cpp98
-rw-r--r--lld/cmake/modules/AddLLD.cmake2
-rw-r--r--lld/test/ELF/arm-mixed-plts.s44
-rw-r--r--lld/test/ELF/hip-section-layout.s39
-rw-r--r--lld/test/MachO/lto-object-path.ll6
-rw-r--r--lld/test/MachO/objc-category-conflicts.s19
-rw-r--r--lld/test/MachO/objc-category-merging-complete-test.s13
-rw-r--r--lld/test/MachO/objc-category-merging-erase-objc-name-test.s5
-rw-r--r--lld/test/MachO/objc-category-merging-minimal.s9
-rw-r--r--lld/test/MachO/objc-relative-method-lists-simple.s16
-rw-r--r--lld/test/wasm/lto/stub-library.s21
-rw-r--r--lld/tools/lld/CMakeLists.txt2
-rw-r--r--lld/wasm/Driver.cpp11
-rw-r--r--lld/wasm/InputFiles.cpp4
-rw-r--r--lldb/include/lldb/Host/Config.h.cmake2
-rw-r--r--lldb/include/lldb/Target/Process.h28
-rw-r--r--lldb/include/lldb/Target/StopInfo.h4
-rw-r--r--lldb/include/lldb/Target/Thread.h2
-rw-r--r--lldb/include/lldb/Target/ThreadPlan.h12
-rw-r--r--lldb/include/lldb/Target/ThreadPlanSingleThreadTimeout.h110
-rw-r--r--lldb/include/lldb/Target/ThreadPlanStepOut.h1
-rw-r--r--lldb/include/lldb/Target/ThreadPlanStepOverRange.h7
-rw-r--r--lldb/include/lldb/Target/ThreadPlanStepRange.h7
-rw-r--r--lldb/include/lldb/Target/TimeoutResumeAll.h43
-rw-r--r--lldb/include/lldb/Utility/AddressableBits.h2
-rw-r--r--lldb/include/lldb/lldb-enumerations.h1
-rw-r--r--lldb/packages/Python/lldbsuite/test/decorators.py4
-rw-r--r--lldb/packages/Python/lldbsuite/test/make/Makefile.rules102
-rw-r--r--lldb/source/API/SBDebugger.cpp13
-rw-r--r--lldb/source/API/SBStructuredData.cpp7
-rw-r--r--lldb/source/API/SBThread.cpp6
-rw-r--r--lldb/source/Core/Progress.cpp3
-rw-r--r--lldb/source/Interpreter/CommandInterpreter.cpp2
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp11
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h2
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp4
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangUserExpression.cpp7
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp8
-rw-r--r--lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp2
-rw-r--r--lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp61
-rw-r--r--lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h4
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp39
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp38
-rw-r--r--lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp2
-rw-r--r--lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp2
-rw-r--r--lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp2
-rw-r--r--lldb/source/Plugins/SymbolLocator/CMakeLists.txt7
-rw-r--r--lldb/source/Plugins/SymbolVendor/ELF/SymbolVendorELF.cpp34
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp68
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h41
-rw-r--r--lldb/source/Target/CMakeLists.txt1
-rw-r--r--lldb/source/Target/Process.cpp21
-rw-r--r--lldb/source/Target/StopInfo.cpp28
-rw-r--r--lldb/source/Target/TargetProperties.td4
-rw-r--r--lldb/source/Target/Thread.cpp15
-rw-r--r--lldb/source/Target/ThreadPlan.cpp1
-rw-r--r--lldb/source/Target/ThreadPlanSingleThreadTimeout.cpp250
-rw-r--r--lldb/source/Target/ThreadPlanStepInRange.cpp1
-rw-r--r--lldb/source/Target/ThreadPlanStepOverRange.cpp21
-rw-r--r--lldb/source/Target/ThreadPlanStepRange.cpp71
-rw-r--r--lldb/test/API/debuginfod/Normal/Makefile19
-rw-r--r--lldb/test/API/debuginfod/Normal/TestDebuginfod.py186
-rw-r--r--lldb/test/API/debuginfod/Normal/main.c7
-rw-r--r--lldb/test/API/debuginfod/SplitDWARF/Makefile23
-rw-r--r--lldb/test/API/debuginfod/SplitDWARF/TestDebuginfodDWP.py196
-rw-r--r--lldb/test/API/debuginfod/SplitDWARF/main.c7
-rw-r--r--lldb/test/API/functionalities/single-thread-step/Makefile4
-rw-r--r--lldb/test/API/functionalities/single-thread-step/TestSingleThreadStepTimeout.py254
-rw-r--r--lldb/test/API/functionalities/single-thread-step/main.cpp68
-rw-r--r--lldb/test/API/python_api/sbstructureddata/TestStructuredDataAPI.py31
-rw-r--r--lldb/tools/lldb-dap/JSONUtils.cpp3
-rw-r--r--lldb/tools/lldb-dap/LLDBUtils.cpp1
-rw-r--r--lldb/unittests/Core/ProgressReportTest.cpp75
-rw-r--r--lldb/unittests/Symbol/TestClangASTImporter.cpp6
-rw-r--r--lldb/unittests/Symbol/TestTypeSystemClang.cpp14
-rw-r--r--llvm/CMakeLists.txt4
-rw-r--r--llvm/cmake/modules/AddLLVM.cmake13
-rw-r--r--llvm/docs/NVPTXUsage.rst35
-rw-r--r--llvm/docs/ReleaseNotes.rst1
-rw-r--r--llvm/examples/ExceptionDemo/CMakeLists.txt4
-rw-r--r--llvm/examples/HowToUseLLJIT/CMakeLists.txt4
-rw-r--r--llvm/examples/Kaleidoscope/BuildingAJIT/Chapter1/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/BuildingAJIT/Chapter2/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/BuildingAJIT/Chapter3/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/BuildingAJIT/Chapter4/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/Chapter4/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/Chapter5/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/Chapter6/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/Chapter7/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/Chapter8/CMakeLists.txt2
-rw-r--r--llvm/examples/Kaleidoscope/Chapter9/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITDumpObjects/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITRemovableCode/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithCustomObjectLinkingLayer/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithExecutorProcessControl/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithGDBRegistrationListener/CMakeLists.txt4
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithInitializers/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithLazyReexports/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithObjectCache/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithObjectLinkingLayerPlugin/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithOptimizingIRTransform/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithRemoteDebugging/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/LLJITWithThinLTOSummaries/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsAddObjectFile/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsBasicUsage/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsDumpObjects/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsIRTransforms/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsLazy/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsRemovableCode/CMakeLists.txt2
-rw-r--r--llvm/examples/OrcV2Examples/OrcV2CBindingsVeryLazy/CMakeLists.txt2
-rw-r--r--llvm/include/llvm/ADT/GraphTraits.h25
-rw-r--r--llvm/include/llvm/ADT/STLExtras.h2
-rw-r--r--llvm/include/llvm/Analysis/CtxProfAnalysis.h70
-rw-r--r--llvm/include/llvm/Analysis/RegionInfoImpl.h3
-rw-r--r--llvm/include/llvm/Analysis/TargetLibraryInfo.def26
-rw-r--r--llvm/include/llvm/BinaryFormat/COFF.h15
-rw-r--r--llvm/include/llvm/BinaryFormat/ELF.h4
-rw-r--r--llvm/include/llvm/CodeGen/BasicTTIImpl.h103
-rw-r--r--llvm/include/llvm/CodeGen/ExpandVectorPredication.h13
-rw-r--r--llvm/include/llvm/CodeGen/FunctionLoweringInfo.h13
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h5
-rw-r--r--llvm/include/llvm/CodeGen/MachineBasicBlock.h42
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunction.h37
-rw-r--r--llvm/include/llvm/CodeGen/MachineRegisterInfo.h11
-rw-r--r--llvm/include/llvm/CodeGen/Passes.h5
-rw-r--r--llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h4
-rw-r--r--llvm/include/llvm/CodeGen/SDPatternMatch.h55
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h5
-rw-r--r--llvm/include/llvm/CodeGen/TargetFrameLowering.h10
-rw-r--r--llvm/include/llvm/CodeGen/TargetRegisterInfo.h6
-rw-r--r--llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h62
-rw-r--r--llvm/include/llvm/IR/Constants.h13
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td14
-rw-r--r--llvm/include/llvm/IR/IntrinsicsPowerPC.td13
-rw-r--r--llvm/include/llvm/IR/IntrinsicsX86.td154
-rw-r--r--llvm/include/llvm/IR/Module.h7
-rw-r--r--llvm/include/llvm/LinkAllPasses.h1
-rw-r--r--llvm/include/llvm/MC/MCELFObjectWriter.h2
-rw-r--r--llvm/include/llvm/MCA/Stages/InOrderIssueStage.h6
-rw-r--r--llvm/include/llvm/Object/COFF.h99
-rw-r--r--llvm/include/llvm/Passes/CodeGenPassBuilder.h2
-rw-r--r--llvm/include/llvm/Passes/MachinePassRegistry.def1
-rw-r--r--llvm/include/llvm/ProfileData/PGOCtxProfReader.h35
-rw-r--r--llvm/include/llvm/SandboxIR/SandboxIR.h146
-rw-r--r--llvm/include/llvm/SandboxIR/SandboxIRValues.def4
-rw-r--r--llvm/include/llvm/SandboxIR/Tracker.h52
-rw-r--r--llvm/include/llvm/Support/GenericDomTree.h135
-rw-r--r--llvm/include/llvm/Support/GenericDomTreeConstruction.h78
-rw-r--r--llvm/include/llvm/Support/Windows/WindowsSupport.h2
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td2
-rw-r--r--llvm/include/llvm/TargetParser/Triple.h2
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp10
-rw-r--r--llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--llvm/lib/Analysis/CtxProfAnalysis.cpp95
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp4
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp4
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp20
-rw-r--r--llvm/lib/CodeGen/BasicBlockSections.cpp11
-rw-r--r--llvm/lib/CodeGen/CFIInstrInserter.cpp33
-rw-r--r--llvm/lib/CodeGen/ExpandVectorPredication.cpp138
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp5
-rw-r--r--llvm/lib/CodeGen/IntrinsicLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRSampleProfile.cpp19
-rw-r--r--llvm/lib/CodeGen/MachineBlockPlacement.cpp1
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp1
-rw-r--r--llvm/lib/CodeGen/MachinePipeliner.cpp11
-rw-r--r--llvm/lib/CodeGen/MachineRegisterInfo.cpp2
-rw-r--r--llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp25
-rw-r--r--llvm/lib/CodeGen/PrologEpilogInserter.cpp8
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp97
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp37
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp5
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp9
-rw-r--r--llvm/lib/CodeGen/TargetPassConfig.cpp5
-rw-r--r--llvm/lib/CodeGen/TargetRegisterInfo.cpp9
-rw-r--r--llvm/lib/CodeGen/UnreachableBlockElim.cpp2
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp6
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp87
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp2
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/aarch64.cpp4
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp2
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp20
-rw-r--r--llvm/lib/IR/ConstantRange.cpp104
-rw-r--r--llvm/lib/IR/Constants.cpp12
-rw-r--r--llvm/lib/IR/DebugProgramInstruction.cpp2
-rw-r--r--llvm/lib/IR/LegacyPassManager.cpp4
-rw-r--r--llvm/lib/IR/Module.cpp50
-rw-r--r--llvm/lib/IR/Verifier.cpp8
-rw-r--r--llvm/lib/MC/ELFObjectWriter.cpp2
-rw-r--r--llvm/lib/MC/MCELFObjectTargetWriter.cpp3
-rw-r--r--llvm/lib/MCA/Stages/InOrderIssueStage.cpp2
-rw-r--r--llvm/lib/Object/COFFObjectFile.cpp340
-rw-r--r--llvm/lib/Object/ELFObjectFile.cpp2
-rw-r--r--llvm/lib/Passes/CMakeLists.txt1
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp3
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp2
-rw-r--r--llvm/lib/Passes/PassRegistry.def4
-rw-r--r--llvm/lib/Passes/StandardInstrumentations.cpp9
-rw-r--r--llvm/lib/ProfileData/PGOCtxProfReader.cpp20
-rw-r--r--llvm/lib/SandboxIR/SandboxIR.cpp146
-rw-r--r--llvm/lib/SandboxIR/Tracker.cpp40
-rw-r--r--llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp73
-rw-r--r--llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp33
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64Features.td9
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp204
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.h6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td9
-rw-r--r--llvm/lib/Target/AArch64/AArch64MCInstLower.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp25
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h10
-rw-r--r--llvm/lib/Target/AArch64/AArch64Processors.td20
-rw-r--r--llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td10
-rw-r--r--llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td21
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp33
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp4
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp10
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp34
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp5
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp164
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp21
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td4
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp127
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp81
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMInstrThumb2.td1
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp24
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp21
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp20
-rw-r--r--llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp3
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp26
-rw-r--r--llvm/lib/Target/NVPTX/NVPTX.h29
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp454
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h3
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td42
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXUtilities.h31
-rw-r--r--llvm/lib/Target/PowerPC/PPCFrameLowering.cpp14
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstr64Bit.td10
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.td10
-rw-r--r--llvm/lib/Target/RISCV/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/RISCV/RISCV.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp7
-rw-r--r--llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp102
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td23
-rw-r--r--llvm/lib/Target/RISCV/RISCVProcessors.td31
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetMachine.cpp1
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp7
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp12
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp320
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.h25
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h9
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX10.td173
-rw-r--r--llvm/lib/Target/X86/X86InstrFragmentsSIMD.td14
-rw-r--r--llvm/lib/Target/X86/X86InstrUtils.td6
-rw-r--r--llvm/lib/Target/X86/X86IntrinsicsInfo.h72
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.cpp55
-rw-r--r--llvm/lib/TargetParser/Triple.cpp21
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp54
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp57
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp253
-rw-r--r--llvm/test/Analysis/BasicAA/nusw_nuw_nonneg.ll20
-rw-r--r--llvm/test/Analysis/BasicAA/struct-geps.ll11
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll8
-rw-r--r--llvm/test/Analysis/CostModel/ARM/arith-ssat.ll144
-rw-r--r--llvm/test/Analysis/CostModel/ARM/arith-usat.ll132
-rw-r--r--llvm/test/Analysis/CtxProfAnalysis/load.ll58
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/always_uniform.ll33
-rw-r--r--llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-exchange-fence.ll64
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/O0-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/AArch64/O3-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/AArch64/abds.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/abdu.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/emutls_alias.ll17
-rw-r--r--llvm/test/CodeGen/AArch64/note-gnu-property-elf-pauthabi.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-basic-pic.ll82
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-elf-globals-pic.ll23
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-extern-weak.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-got-abuse.ll44
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-init-fini.ll104
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-tagged-globals-pic.ll66
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sms-order-physreg-deps.mir452
-rw-r--r--llvm/test/CodeGen/AArch64/srem-seteq-vec-splat.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll152
-rw-r--r--llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfclamp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla_lane.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls_lane.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul_lane.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll37
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-copy.mir298
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir1541
-rw-r--r--llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-u32.mir1277
-rw-r--r--llvm/test/CodeGen/AMDGPU/indirect-call-set-from-other-function.ll73
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll56
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-sbuffer-load.mir725
-rw-r--r--llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll15
-rw-r--r--llvm/test/CodeGen/ARM/O3-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/ARM/setjmp-bti-basic.ll76
-rw-r--r--llvm/test/CodeGen/BPF/objdump_atomics.ll2
-rw-r--r--llvm/test/CodeGen/BPF/objdump_cond_op.ll2
-rw-r--r--llvm/test/CodeGen/BPF/objdump_imm_hex.ll4
-rw-r--r--llvm/test/CodeGen/BPF/objdump_static_var.ll4
-rw-r--r--llvm/test/CodeGen/LoongArch/O0-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/LoongArch/opt-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/M68k/pipeline.ll1
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/and-srl.ll28
-rw-r--r--llvm/test/CodeGen/NVPTX/fence-proxy-tensormap.ll36
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store-sm-70.ll1188
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store.ll823
-rw-r--r--llvm/test/CodeGen/PowerPC/O0-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/O3-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-base-pointer.ll5
-rw-r--r--llvm/test/CodeGen/PowerPC/builtins-bcd-assist.ll111
-rw-r--r--llvm/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.ll79
-rw-r--r--llvm/test/CodeGen/PowerPC/common-chain.ll313
-rw-r--r--llvm/test/CodeGen/RISCV/O0-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/O3-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/jumptable-swguarded.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/lpad.ll101
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcompress.ll137
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-select.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vrgather.ll786
-rw-r--r--llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/offset.ll20
-rw-r--r--llvm/test/CodeGen/X86/O0-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/X86/apx/and.ll64
-rw-r--r--llvm/test/CodeGen/X86/apx/cmov.ll22
-rw-r--r--llvm/test/CodeGen/X86/apx/mul-i1024.ll916
-rw-r--r--llvm/test/CodeGen/X86/apx/or.ll64
-rw-r--r--llvm/test/CodeGen/X86/apx/push2-pop2-vector-register.ll4
-rw-r--r--llvm/test/CodeGen/X86/apx/push2-pop2.ll24
-rw-r--r--llvm/test/CodeGen/X86/apx/pushp-popp.ll4
-rw-r--r--llvm/test/CodeGen/X86/apx/shift-eflags.ll28
-rw-r--r--llvm/test/CodeGen/X86/apx/sub.ll96
-rw-r--r--llvm/test/CodeGen/X86/apx/xor.ll80
-rw-r--r--llvm/test/CodeGen/X86/avx10_2_512satcvt-intrinsics.ll1003
-rw-r--r--llvm/test/CodeGen/X86/avx10_2satcvt-intrinsics.ll1618
-rw-r--r--llvm/test/CodeGen/X86/avx512-intel-ocl.ll8
-rw-r--r--llvm/test/CodeGen/X86/clobber_base_ptr.ll118
-rw-r--r--llvm/test/CodeGen/X86/clobber_frame_ptr.ll159
-rw-r--r--llvm/test/CodeGen/X86/clobber_frame_ptr_x32.ll53
-rw-r--r--llvm/test/CodeGen/X86/cmp.ll52
-rw-r--r--llvm/test/CodeGen/X86/combine-srem.ll40
-rw-r--r--llvm/test/CodeGen/X86/i386-baseptr.ll4
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-function-call-pic.ll4
-rw-r--r--llvm/test/CodeGen/X86/opt-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/X86/popcnt.ll216
-rw-r--r--llvm/test/CodeGen/X86/select_const_i128.ll4
-rw-r--r--llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll6
-rw-r--r--llvm/test/CodeGen/X86/x86-32-intrcc.ll4
-rw-r--r--llvm/test/CodeGen/X86/x86-64-baseptr.ll12
-rw-r--r--llvm/test/CodeGen/X86/x86-64-flags-intrinsics.ll8
-rw-r--r--llvm/test/DebugInfo/Generic/assignment-tracking/dse/dse-after-memcpyopt-merge.ll6
-rw-r--r--llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten-offset.ll8
-rw-r--r--llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten.ll12
-rw-r--r--llvm/test/DebugInfo/Generic/assignment-tracking/salvage-value.ll4
-rw-r--r--llvm/test/DebugInfo/Generic/sroa-extract-bits.ll8
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/ELF_relocations.s14
-rw-r--r--llvm/test/MC/AArch64/SME2/bfclamp-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SME2/bfclamp.s28
-rw-r--r--llvm/test/MC/AArch64/SME2/bfmax-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SME2/bfmax.s44
-rw-r--r--llvm/test/MC/AArch64/SME2/bfmaxnm-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SME2/bfmaxnm.s44
-rw-r--r--llvm/test/MC/AArch64/SME2/bfmin-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SME2/bfmin.s44
-rw-r--r--llvm/test/MC/AArch64/SME2/bfminnm-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SME2/bfminnm.s44
-rw-r--r--llvm/test/MC/AArch64/SME2p1/directive-arch-negative.s6
-rw-r--r--llvm/test/MC/AArch64/SME2p1/directive-arch.s4
-rw-r--r--llvm/test/MC/AArch64/SME2p1/directive-arch_extension-negative.s7
-rw-r--r--llvm/test/MC/AArch64/SME2p1/directive-arch_extension.s5
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfadd-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfadd.s48
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfclamp-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfclamp.s38
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmax-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmax.s40
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmaxnm-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmaxnm.s40
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmin-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmin.s40
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfminnm-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfminnm.s40
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmla-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmla.s50
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmls-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmls.s50
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmul-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfmul.s56
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfsub-diagnostics.s2
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/bfsub.s48
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/directive-arch-negative.s6
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/directive-arch.s4
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/directive-arch_extension-negative.s7
-rw-r--r--llvm/test/MC/AArch64/SVE2p1/directive-arch_extension.s5
-rw-r--r--llvm/test/MC/AArch64/adrp-auth-relocation.s12
-rw-r--r--llvm/test/MC/AArch64/arm64-elf-relocs.s20
-rw-r--r--llvm/test/MC/AArch64/ilp32-diagnostics.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx10_asm_vop1.s9
-rw-r--r--llvm/test/MC/AMDGPU/gfx10_err_pos.s32
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_smem.s12
-rw-r--r--llvm/test/MC/ARM/Windows/branch-reloc-offset.s38
-rw-r--r--llvm/test/MC/BPF/insn-unit.s14
-rw-r--r--llvm/test/MC/BPF/load-store-32.s2
-rw-r--r--llvm/test/MC/Disassembler/X86/avx10.2-satcvt-32.txt1363
-rw-r--r--llvm/test/MC/Disassembler/X86/avx10.2-satcvt-64.txt1363
-rw-r--r--llvm/test/MC/X86/avx10.2satcvt-32-att.s1362
-rw-r--r--llvm/test/MC/X86/avx10.2satcvt-32-intel.s1362
-rw-r--r--llvm/test/MC/X86/avx10.2satcvt-64-att.s1362
-rw-r--r--llvm/test/MC/X86/avx10.2satcvt-64-intel.s1362
-rw-r--r--llvm/test/TableGen/SubtargetFeatureUniqueNames.td15
-rw-r--r--llvm/test/TableGen/x86-fold-tables.inc216
-rw-r--r--llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll22
-rw-r--r--llvm/test/Transforms/Attributor/value-simplify-gpu.ll29
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/shl.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/debuginfo.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/select-icmp-and.ll86
-rw-r--r--llvm/test/Transforms/InstSimplify/insertelement.ll8
-rwxr-xr-xllvm/test/Transforms/InstSimplify/select-icmp.ll246
-rw-r--r--llvm/test/Transforms/LICM/hoist-binop.ll29
-rw-r--r--llvm/test/Transforms/LoopIdiom/basic.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll86
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll16
-rw-r--r--llvm/test/Transforms/Mem2Reg/dbg_declare_to_value_conversions.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/debug-alloca-vla-2.ll2
-rw-r--r--llvm/test/Transforms/OpenMP/barrier_removal.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/varargs.ll3
-rw-r--r--llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-fp-intrinsics.ll (renamed from llvm/test/CodeGen/Generic/expand-vp-fp-intrinsics.ll)2
-rw-r--r--llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-gather-scatter.ll (renamed from llvm/test/CodeGen/Generic/expand-vp-gather-scatter.ll)2
-rw-r--r--llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-load-store.ll (renamed from llvm/test/CodeGen/Generic/expand-vp-load-store.ll)4
-rw-r--r--llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp.ll (renamed from llvm/test/CodeGen/Generic/expand-vp.ll)14
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll635
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-use-ptr.ll17
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/PR32086.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/addsub.ll96
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cmp-diff-sized.ll30
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/extractelement-phi-in-landingpad.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/fmuladd.ll25
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/landing_pad.ll19
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduced-value-replace-extractelement.ll34
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/stores_mix_sizes.ll10
-rw-r--r--llvm/test/Transforms/SLPVectorizer/revec.ll36
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll187
-rw-r--r--llvm/test/Transforms/StructurizeCFG/loop-break-phi.ll213
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/arm64x-reloc-invalid.yaml1618
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/arm64x-reloc.yaml322
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s100
-rw-r--r--llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml11
-rw-r--r--llvm/test/tools/yaml2obj/COFF/load-config.yaml16
-rw-r--r--llvm/tools/bugpoint/CMakeLists.txt2
-rw-r--r--llvm/tools/llc/CMakeLists.txt3
-rw-r--r--llvm/tools/llc/llc.cpp1
-rw-r--r--llvm/tools/lli/CMakeLists.txt4
-rw-r--r--llvm/tools/lli/ChildTarget/CMakeLists.txt4
-rw-r--r--llvm/tools/llvm-jitlink/CMakeLists.txt4
-rw-r--r--llvm/tools/llvm-jitlink/llvm-jitlink-executor/CMakeLists.txt4
-rw-r--r--llvm/tools/llvm-lto2/CMakeLists.txt3
-rw-r--r--llvm/tools/llvm-readobj/COFFDumper.cpp37
-rw-r--r--llvm/tools/llvm-readobj/ELFDumper.cpp5
-rw-r--r--llvm/tools/llvm-readobj/ObjDumper.cpp30
-rw-r--r--llvm/tools/llvm-reduce/ReducerWorkItem.cpp29
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp1
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp4
-rw-r--r--llvm/tools/opt/CMakeLists.txt3
-rw-r--r--llvm/tools/opt/optdriver.cpp1
-rw-r--r--llvm/unittests/Analysis/CMakeLists.txt11
-rw-r--r--llvm/unittests/Analysis/TargetLibraryInfoTest.cpp4
-rw-r--r--llvm/unittests/BinaryFormat/MachOTest.cpp11
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt4
-rw-r--r--llvm/unittests/IR/ConstantRangeTest.cpp39
-rw-r--r--llvm/unittests/Passes/Plugins/CMakeLists.txt3
-rw-r--r--llvm/unittests/ProfileData/PGOCtxProfReaderWriterTest.cpp2
-rw-r--r--llvm/unittests/SandboxIR/SandboxIRTest.cpp200
-rw-r--r--llvm/unittests/SandboxIR/TrackerTest.cpp54
-rw-r--r--llvm/unittests/Support/CMakeLists.txt1
-rw-r--r--llvm/unittests/Support/DynamicLibrary/CMakeLists.txt7
-rw-r--r--llvm/unittests/Support/GenericDomTreeTest.cpp109
-rw-r--r--llvm/unittests/TargetParser/TargetParserTest.cpp23
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp5
-rw-r--r--llvm/utils/TableGen/RegisterInfoEmitter.cpp16
-rw-r--r--llvm/utils/TableGen/SubtargetEmitter.cpp19
-rwxr-xr-xllvm/utils/extract_symbols.py7
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/clang/test/BUILD.gn23
-rw-r--r--llvm/utils/gn/secondary/lldb/include/lldb/Host/BUILD.gn7
-rw-r--r--llvm/utils/gn/secondary/lldb/source/Target/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/RISCV/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn1
-rw-r--r--mlir/docs/DefiningDialects/AttributesAndTypes.md4
-rw-r--r--mlir/docs/Dialects/Vector.md63
-rw-r--r--mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td20
-rw-r--r--mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td2
-rw-r--r--mlir/include/mlir/Pass/PassRegistry.h3
-rw-r--r--mlir/include/mlir/Target/LLVMIR/ModuleImport.h4
-rw-r--r--mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h10
-rw-r--r--mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp8
-rw-r--r--mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp49
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp9
-rw-r--r--mlir/lib/Dialect/GPU/CMakeLists.txt2
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/DecomposeMemRefs.cpp (renamed from mlir/lib/Dialect/GPU/Transforms/DecomposeMemrefs.cpp)2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp13
-rw-r--r--mlir/lib/Pass/PassRegistry.cpp26
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp28
-rw-r--r--mlir/lib/Tools/mlir-opt/MlirOptMain.cpp16
-rw-r--r--mlir/lib/Transforms/Utils/RegionUtils.cpp212
-rw-r--r--mlir/python/requirements.txt4
-rw-r--r--mlir/test/Dialect/ArmSME/outer-product-fusion.mlir32
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir20
-rw-r--r--mlir/test/Dialect/LLVMIR/types.mlir4
-rw-r--r--mlir/test/Dialect/Linalg/detensorize_entry_block.mlir6
-rw-r--r--mlir/test/Dialect/Linalg/detensorize_if.mlir67
-rw-r--r--mlir/test/Dialect/Linalg/detensorize_while.mlir12
-rw-r--r--mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir12
-rw-r--r--mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir4
-rw-r--r--mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir14
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul-mixed-types.mlir6
-rw-r--r--mlir/test/Integration/Dialect/MemRef/cast-runtime-verification.mlir (renamed from mlir/test/Integration/Dialect/Memref/cast-runtime-verification.mlir)0
-rw-r--r--mlir/test/Integration/Dialect/MemRef/load-runtime-verification.mlir (renamed from mlir/test/Integration/Dialect/Memref/load-runtime-verification.mlir)0
-rw-r--r--mlir/test/Integration/Dialect/MemRef/memref_abi.c (renamed from mlir/test/Integration/Dialect/Memref/memref_abi.c)0
-rw-r--r--mlir/test/Integration/Dialect/MemRef/print-memref.mlir (renamed from mlir/test/Integration/Dialect/Memref/print-memref.mlir)0
-rw-r--r--mlir/test/Integration/Dialect/MemRef/reinterpret-cast-runtime-verification.mlir (renamed from mlir/test/Integration/Dialect/Memref/reinterpret-cast-runtime-verification.mlir)0
-rw-r--r--mlir/test/Integration/Dialect/MemRef/subview-runtime-verification.mlir (renamed from mlir/test/Integration/Dialect/Memref/subview-runtime-verification.mlir)0
-rw-r--r--mlir/test/Integration/Dialect/MemRef/verify-memref.mlir (renamed from mlir/test/Integration/Dialect/Memref/verify-memref.mlir)0
-rw-r--r--mlir/test/Target/LLVMIR/Import/global-variables.ll19
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-types.mlir4
-rw-r--r--mlir/test/Transforms/canonicalize-block-merge.mlir6
-rw-r--r--mlir/test/Transforms/canonicalize-dce.mlir8
-rw-r--r--mlir/test/Transforms/make-isolated-from-above.mlir18
-rw-r--r--mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir192
-rw-r--r--mlir/tools/mlir-cpu-runner/CMakeLists.txt4
-rw-r--r--mlir/tools/mlir-opt/CMakeLists.txt2
-rw-r--r--openmp/runtime/src/kmp_os.h4
-rw-r--r--openmp/runtime/src/kmp_platform.h8
-rw-r--r--openmp/runtime/src/z_Linux_asm.S20
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel1
985 files changed, 42891 insertions, 7049 deletions
diff --git a/bolt/CMakeLists.txt b/bolt/CMakeLists.txt
index 9f5875d..9ac196a 100644
--- a/bolt/CMakeLists.txt
+++ b/bolt/CMakeLists.txt
@@ -82,7 +82,7 @@ endforeach()
set(BOLT_ENABLE_RUNTIME_default OFF)
if ((CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64"
- OR CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
+ OR CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64|aarch64)$")
AND (CMAKE_SYSTEM_NAME STREQUAL "Linux"
OR CMAKE_SYSTEM_NAME STREQUAL "Darwin")
AND (NOT CMAKE_CROSSCOMPILING))
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 5fb32a1..d2c1709 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -911,7 +911,8 @@ public:
/// of \p Flags.
MCSymbol *registerNameAtAddress(StringRef Name, uint64_t Address,
uint64_t Size, uint16_t Alignment,
- unsigned Flags = 0);
+ unsigned Flags = 0,
+ BinarySection *Section = NULL);
/// Return BinaryData registered at a given \p Address or nullptr if no
/// global symbol was registered at the location.
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index e86075e..ebaa24e 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -142,7 +142,6 @@ BinaryContext::BinaryContext(std::unique_ptr<MCContext> Ctx,
InstPrinter(std::move(InstPrinter)), MIA(std::move(MIA)),
MIB(std::move(MIB)), MRI(std::move(MRI)), DisAsm(std::move(DisAsm)),
Logger(Logger), InitialDynoStats(isAArch64()) {
- Relocation::Arch = this->TheTriple->getArch();
RegularPageSize = isAArch64() ? RegularPageSizeAArch64 : RegularPageSizeX86;
PageAlign = opts::NoHugePages ? RegularPageSize : HugePageSize;
}
@@ -1056,18 +1055,28 @@ void BinaryContext::adjustCodePadding() {
MCSymbol *BinaryContext::registerNameAtAddress(StringRef Name, uint64_t Address,
uint64_t Size,
uint16_t Alignment,
- unsigned Flags) {
+ unsigned Flags,
+ BinarySection *Section) {
// Register the name with MCContext.
MCSymbol *Symbol = Ctx->getOrCreateSymbol(Name);
+ BinaryData *BD;
+
+ // Register out of section symbols only in GlobalSymbols map
+ if (Section && Section->getEndAddress() == Address) {
+ BD = new BinaryData(*Symbol, Address, Size, Alignment ? Alignment : 1,
+ *Section, Flags);
+ GlobalSymbols[Name] = BD;
+ return Symbol;
+ }
auto GAI = BinaryDataMap.find(Address);
- BinaryData *BD;
if (GAI == BinaryDataMap.end()) {
ErrorOr<BinarySection &> SectionOrErr = getSectionForAddress(Address);
- BinarySection &Section =
- SectionOrErr ? SectionOrErr.get() : absoluteSection();
+ BinarySection &SectionRef = Section ? *Section
+ : SectionOrErr ? SectionOrErr.get()
+ : absoluteSection();
BD = new BinaryData(*Symbol, Address, Size, Alignment ? Alignment : 1,
- Section, Flags);
+ SectionRef, Flags);
GAI = BinaryDataMap.emplace(Address, BD).first;
GlobalSymbols[Name] = BD;
updateObjectNesting(GAI);
@@ -1402,7 +1411,7 @@ void BinaryContext::postProcessSymbolTable() {
if ((BD->getName().starts_with("SYMBOLat") ||
BD->getName().starts_with("DATAat")) &&
!BD->getParent() && !BD->getSize() && !BD->isAbsolute() &&
- BD->getSection()) {
+ BD->getSection().getSize()) {
this->errs() << "BOLT-WARNING: zero-sized top level symbol: " << *BD
<< "\n";
Valid = false;
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index ea09371..e5c6d55 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -2502,7 +2502,10 @@ void BinaryFunction::annotateCFIState() {
}
}
- assert(StateStack.empty() && "corrupt CFI stack");
+ if (!StateStack.empty()) {
+ BC.errs() << "BOLT-WARNING: non-empty CFI stack at the end of " << *this
+ << '\n';
+ }
}
namespace {
diff --git a/bolt/lib/Rewrite/MachORewriteInstance.cpp b/bolt/lib/Rewrite/MachORewriteInstance.cpp
index 172cb64..c328232 100644
--- a/bolt/lib/Rewrite/MachORewriteInstance.cpp
+++ b/bolt/lib/Rewrite/MachORewriteInstance.cpp
@@ -72,6 +72,7 @@ MachORewriteInstance::MachORewriteInstance(object::MachOObjectFile *InputFile,
StringRef ToolPath, Error &Err)
: InputFile(InputFile), ToolPath(ToolPath) {
ErrorAsOutParameter EAO(&Err);
+ Relocation::Arch = InputFile->makeTriple().getArch();
auto BCOrErr = BinaryContext::createBinaryContext(
InputFile->makeTriple(), InputFile->getFileName(), nullptr,
/* IsPIC */ true, DWARFContext::create(*InputFile),
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index 9077869..8769340 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -354,6 +354,7 @@ RewriteInstance::RewriteInstance(ELFObjectFileBase *File, const int Argc,
}
}
+ Relocation::Arch = TheTriple.getArch();
auto BCOrErr = BinaryContext::createBinaryContext(
TheTriple, File->getFileName(), Features.get(), IsPIC,
DWARFContext::create(*File, DWARFContext::ProcessDebugRelocations::Ignore,
@@ -955,13 +956,13 @@ void RewriteInstance::discoverFileObjects() {
uint64_t SymbolSize = ELFSymbolRef(Symbol).getSize();
uint64_t SymbolAlignment = Symbol.getAlignment();
- auto registerName = [&](uint64_t FinalSize) {
+ auto registerName = [&](uint64_t FinalSize, BinarySection *Section = NULL) {
// Register names even if it's not a function, e.g. for an entry point.
BC->registerNameAtAddress(UniqueName, SymbolAddress, FinalSize,
- SymbolAlignment, SymbolFlags);
+ SymbolAlignment, SymbolFlags, Section);
if (!AlternativeName.empty())
BC->registerNameAtAddress(AlternativeName, SymbolAddress, FinalSize,
- SymbolAlignment, SymbolFlags);
+ SymbolAlignment, SymbolFlags, Section);
};
section_iterator Section =
@@ -986,12 +987,25 @@ void RewriteInstance::discoverFileObjects() {
<< " for function\n");
if (SymbolAddress == Section->getAddress() + Section->getSize()) {
+ ErrorOr<BinarySection &> SectionOrError =
+ BC->getSectionForAddress(Section->getAddress());
+
+ // Skip symbols from invalid sections
+ if (!SectionOrError) {
+ BC->errs() << "BOLT-WARNING: " << UniqueName << " (0x"
+ << Twine::utohexstr(SymbolAddress)
+ << ") does not have any section\n";
+ continue;
+ }
+
assert(SymbolSize == 0 &&
"unexpect non-zero sized symbol at end of section");
- LLVM_DEBUG(
- dbgs()
- << "BOLT-DEBUG: rejecting as symbol points to end of its section\n");
- registerName(SymbolSize);
+ LLVM_DEBUG({
+ dbgs() << "BOLT-DEBUG: rejecting as symbol " << UniqueName
+ << " points to end of " << SectionOrError->getName()
+ << " section\n";
+ });
+ registerName(SymbolSize, &SectionOrError.get());
continue;
}
@@ -2143,6 +2157,14 @@ bool RewriteInstance::analyzeRelocation(
if (!Relocation::isSupported(RType))
return false;
+ auto IsWeakReference = [](const SymbolRef &Symbol) {
+ Expected<uint32_t> SymFlagsOrErr = Symbol.getFlags();
+ if (!SymFlagsOrErr)
+ return false;
+ return (*SymFlagsOrErr & SymbolRef::SF_Undefined) &&
+ (*SymFlagsOrErr & SymbolRef::SF_Weak);
+ };
+
const bool IsAArch64 = BC->isAArch64();
const size_t RelSize = Relocation::getSizeForType(RType);
@@ -2174,7 +2196,8 @@ bool RewriteInstance::analyzeRelocation(
// Section symbols are marked as ST_Debug.
IsSectionRelocation = (cantFail(Symbol.getType()) == SymbolRef::ST_Debug);
// Check for PLT entry registered with symbol name
- if (!SymbolAddress && (IsAArch64 || BC->isRISCV())) {
+ if (!SymbolAddress && !IsWeakReference(Symbol) &&
+ (IsAArch64 || BC->isRISCV())) {
const BinaryData *BD = BC->getPLTBinaryDataByName(SymbolName);
SymbolAddress = BD ? BD->getAddress() : 0;
}
@@ -2603,7 +2626,7 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection,
Expected<StringRef> SectionName = Section->getName();
if (SectionName && !SectionName->empty())
ReferencedSection = BC->getUniqueSectionByName(*SectionName);
- } else if (ReferencedSymbol && ContainingBF &&
+ } else if (BC->isRISCV() && ReferencedSymbol && ContainingBF &&
(cantFail(Symbol.getFlags()) & SymbolRef::SF_Absolute)) {
// This might be a relocation for an ABS symbols like __global_pointer$ on
// RISC-V
@@ -2614,6 +2637,30 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection,
}
}
+ if (Relocation::isGOT(RType) && !Relocation::isTLS(RType)) {
+ auto exitOnGotEndSymol = [&](StringRef Name) {
+ BC->errs() << "BOLT-ERROR: GOT table contains currently unsupported "
+ "section end symbol "
+ << Name << "\n";
+ exit(1);
+ };
+
+ if (SymbolIter != InputFile->symbol_end() && ReferencedSection) {
+ if (cantFail(SymbolIter->getAddress()) ==
+ ReferencedSection->getEndAddress())
+ exitOnGotEndSymol(cantFail(SymbolIter->getName()));
+ } else {
+ // If no section and symbol are provided by relocation, try to find the
+ // symbol by its name, including the possibility that the symbol is local.
+ BinaryData *BD = BC->getBinaryDataByName(SymbolName);
+ if (!BD && NR.getUniquifiedNameCount(SymbolName) == 1)
+ BD = BC->getBinaryDataByName(NR.getUniqueName(SymbolName, 1));
+
+ if ((BD && BD->getAddress() == BD->getSection().getEndAddress()))
+ exitOnGotEndSymol(BD->getName());
+ }
+ }
+
if (!ReferencedSection)
ReferencedSection = BC->getSectionForAddress(SymbolAddress);
@@ -5509,6 +5556,14 @@ uint64_t RewriteInstance::getNewFunctionOrDataAddress(uint64_t OldAddress) {
if (const BinaryFunction *BF =
BC->getBinaryFunctionContainingAddress(OldAddress)) {
if (BF->isEmitted()) {
+ // If OldAddress is the another entry point of
+ // the function, then BOLT could get the new address.
+ if (BF->isMultiEntry()) {
+ for (const BinaryBasicBlock &BB : *BF)
+ if (BB.isEntryPoint() &&
+ (BF->getAddress() + BB.getOffset()) == OldAddress)
+ return BF->getOutputAddress() + BB.getOffset();
+ }
BC->errs() << "BOLT-ERROR: unable to get new address corresponding to "
"input address 0x"
<< Twine::utohexstr(OldAddress) << " in function " << *BF
diff --git a/bolt/test/AArch64/Inputs/build_id.ldscript b/bolt/test/AArch64/Inputs/build_id.ldscript
new file mode 100644
index 0000000..0af8e96
--- /dev/null
+++ b/bolt/test/AArch64/Inputs/build_id.ldscript
@@ -0,0 +1,9 @@
+SECTIONS
+{
+ PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x400000) + SIZEOF_HEADERS;
+ .note.gnu.build-id (0x400400):
+ {
+ build_id_note = ABSOLUTE(.);
+ *(.note.gnu.build-id)
+ }
+}
diff --git a/bolt/test/AArch64/Inputs/got_end_of_section_symbol.lld_script b/bolt/test/AArch64/Inputs/got_end_of_section_symbol.lld_script
new file mode 100644
index 0000000..2ad4169
--- /dev/null
+++ b/bolt/test/AArch64/Inputs/got_end_of_section_symbol.lld_script
@@ -0,0 +1,6 @@
+SECTIONS {
+ PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x400000) + SIZEOF_HEADERS;
+ .data : { *(.data) *(.array) }
+ .text : { *(.text) }
+ .got : { *(.got) *(.igot) }
+}
diff --git a/bolt/test/AArch64/build_id.c b/bolt/test/AArch64/build_id.c
new file mode 100644
index 0000000..01e433c
--- /dev/null
+++ b/bolt/test/AArch64/build_id.c
@@ -0,0 +1,25 @@
+// This test checks that referencing build_id through GOT table
+// would result in GOT access after disassembly, not directly
+// to build_id address.
+
+// RUN: %clang %cflags -fuse-ld=lld -Wl,-T,%S/Inputs/build_id.ldscript -Wl,-q \
+// RUN: -Wl,--no-relax -Wl,--build-id=sha1 %s -o %t.exe
+// RUN: llvm-bolt -print-disasm --print-only=get_build_id %t.exe -o %t.bolt | \
+// RUN: FileCheck %s
+
+// CHECK: adrp [[REG:x[0-28]+]], __BOLT_got_zero
+// CHECK: ldr x{{.*}}, [[[REG]], :lo12:__BOLT_got_zero{{.*}}]
+
+struct build_id_note {
+ char pad[16];
+ char hash[20];
+};
+
+extern const struct build_id_note build_id_note;
+
+__attribute__((noinline)) char get_build_id() { return build_id_note.hash[0]; }
+
+int main() {
+ get_build_id();
+ return 0;
+}
diff --git a/bolt/test/AArch64/got_end_of_section_symbol.s b/bolt/test/AArch64/got_end_of_section_symbol.s
new file mode 100644
index 0000000..1f3732c
--- /dev/null
+++ b/bolt/test/AArch64/got_end_of_section_symbol.s
@@ -0,0 +1,28 @@
+# RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown \
+# RUN: %s -o %t.o
+# RUN: %clang %cflags -nostartfiles -nodefaultlibs -static -Wl,--no-relax \
+# RUN: -Wl,-q -Wl,-T %S/Inputs/got_end_of_section_symbol.lld_script \
+# RUN: %t.o -o %t.exe
+# RUN: not llvm-bolt %t.exe -o %t.bolt 2>&1 | FileCheck %s
+
+# CHECK: BOLT-ERROR: GOT table contains currently unsupported section end
+# CHECK-SAME: symbol array_end
+
+.section .array, "a", @progbits
+.globl array_start
+.globl array_end
+array_start:
+ .word 0
+array_end:
+
+.section .text
+.globl _start
+.type _start, %function
+_start:
+ adrp x1, #:got:array_start
+ ldr x1, [x1, #:got_lo12:array_start]
+ adrp x0, #:got:array_end
+ ldr x0, [x0, #:got_lo12:array_end]
+ adrp x2, #:got:_start
+ ldr x2, [x2, #:got_lo12:_start]
+ ret
diff --git a/bolt/test/AArch64/update-weak-reference-symbol.s b/bolt/test/AArch64/update-weak-reference-symbol.s
new file mode 100644
index 0000000..600a06b8
--- /dev/null
+++ b/bolt/test/AArch64/update-weak-reference-symbol.s
@@ -0,0 +1,34 @@
+// This test checks whether BOLT can correctly handle relocations against weak symbols.
+
+// RUN: %clang %cflags -Wl,-z,notext -shared -Wl,-q %s -o %t.so
+// RUN: llvm-bolt %t.so -o %t.so.bolt
+// RUN: llvm-nm -n %t.so.bolt > %t.out.txt
+// RUN: llvm-objdump -dj .rodata %t.so.bolt >> %t.out.txt
+// RUN: FileCheck %s --input-file=%t.out.txt
+
+# CHECK: w func_1
+# CHECK: {{0+}}[[#%x,ADDR:]] W func_2
+
+# CHECK: {{.*}} <.rodata>:
+# CHECK-NEXT: {{.*}} .word 0x00000000
+# CHECK-NEXT: {{.*}} .word 0x00000000
+# CHECK-NEXT: {{.*}} .word 0x{{[0]+}}[[#ADDR]]
+# CHECK-NEXT: {{.*}} .word 0x00000000
+
+ .text
+ .weak func_2
+ .weak func_1
+ .global wow
+ .type wow, %function
+wow:
+ bl func_1
+ bl func_2
+ ret
+ .type func_2, %function
+func_2:
+ ret
+ .section .rodata
+.LC0:
+ .xword func_1
+.LC1:
+ .xword func_2
diff --git a/bolt/test/X86/Inputs/build_id.yaml b/bolt/test/X86/Inputs/build_id.yaml
new file mode 100644
index 0000000..af01290
--- /dev/null
+++ b/bolt/test/X86/Inputs/build_id.yaml
@@ -0,0 +1,326 @@
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_X86_64
+ Entry: 0x4010A0
+ProgramHeaders:
+ - Type: PT_PHDR
+ Flags: [ PF_R ]
+ VAddr: 0x400040
+ Align: 0x8
+ Offset: 0x40
+ - Type: PT_INTERP
+ Flags: [ PF_R ]
+ FirstSec: .interp
+ LastSec: .interp
+ VAddr: 0x400444
+ Offset: 0x444
+ - Type: PT_LOAD
+ Flags: [ PF_X, PF_R ]
+ FirstSec: .init
+ LastSec: .fini
+ VAddr: 0x401000
+ Align: 0x1000
+ Offset: 0x1000
+ - Type: PT_LOAD
+ Flags: [ PF_R ]
+ FirstSec: .rodata
+ LastSec: .rodata
+ VAddr: 0x402000
+ Align: 0x1000
+ Offset: 0x2000
+ - Type: PT_LOAD
+ Flags: [ PF_W, PF_R ]
+ FirstSec: .init_array
+ LastSec: .bss
+ VAddr: 0x403DD8
+ Align: 0x1000
+ Offset: 0x2DD8
+ - Type: PT_DYNAMIC
+ Flags: [ PF_W, PF_R ]
+ FirstSec: .dynamic
+ LastSec: .dynamic
+ VAddr: 0x403DE8
+ Align: 0x8
+ Offset: 0x2DE8
+ - Type: PT_NOTE
+ Flags: [ PF_R ]
+ FirstSec: .note.gnu.build-id
+ LastSec: .note.ABI-tag
+ VAddr: 0x400400
+ Align: 0x4
+ Offset: 0x400
+Sections:
+ - Name: .note.gnu.build-id
+ Type: SHT_NOTE
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400400
+ AddressAlign: 0x4
+ Offset: 0x400
+ Notes:
+ - Name: GNU
+ Desc: 3C34F7D1612996940C48F98DC272543BC3C9C956
+ Type: NT_PRPSINFO
+ - Name: .note.ABI-tag
+ Type: SHT_NOTE
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400424
+ AddressAlign: 0x4
+ Notes:
+ - Name: GNU
+ Desc: '00000000030000000200000000000000'
+ Type: NT_VERSION
+ - Name: .interp
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400444
+ AddressAlign: 0x1
+ Content: 2F6C696236342F6C642D6C696E75782D7838362D36342E736F2E3200
+ - Name: .gnu.hash
+ Type: SHT_GNU_HASH
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400460
+ Link: .dynsym
+ AddressAlign: 0x8
+ Header:
+ SymNdx: 0x7
+ Shift2: 0x6
+ BloomFilter: [ 0x810000 ]
+ HashBuckets: [ 0x7, 0x0 ]
+ HashValues: [ 0x6DCE65D1 ]
+ - Name: .dynsym
+ Type: SHT_DYNSYM
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400488
+ Link: .dynstr
+ AddressAlign: 0x8
+ - Name: .dynstr
+ Type: SHT_STRTAB
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400548
+ AddressAlign: 0x1
+ - Name: .gnu.version
+ Type: SHT_GNU_versym
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4005F2
+ Link: .dynsym
+ AddressAlign: 0x2
+ Entries: [ 0, 2, 3, 1, 1, 4, 1, 2 ]
+ - Name: .gnu.version_r
+ Type: SHT_GNU_verneed
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400608
+ Link: .dynstr
+ AddressAlign: 0x8
+ Dependencies:
+ - Version: 1
+ File: libc.so.6
+ Entries:
+ - Name: GLIBC_2.3.4
+ Hash: 157882740
+ Flags: 0
+ Other: 4
+ - Name: GLIBC_2.34
+ Hash: 110530996
+ Flags: 0
+ Other: 3
+ - Name: GLIBC_2.2.5
+ Hash: 157882997
+ Flags: 0
+ Other: 2
+ - Name: .init
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x401000
+ AddressAlign: 0x4
+ Offset: 0x1000
+ Content: F30F1EFA4883EC08488B05D92F00004885C07402FFD04883C408C3
+ - Name: .plt.sec
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x401060
+ AddressAlign: 0x10
+ EntSize: 0x10
+ Content: F30F1EFAF2FF25AD2F00000F1F440000F30F1EFAF2FF25A52F00000F1F440000
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x401080
+ AddressAlign: 0x10
+ Content: F30F1EFA4883EC0831C0E80101000031C04883C408C3662E0F1F840000000000F30F1EFA31ED4989D15E4889E24883E4F050544531C031C9488D3DC1FFFFFFFF15132F0000F4662E0F1F840000000000488D3D612F0000488D055A2F00004839F87415488B05F62E00004885C07409FFE00F1F8000000000C30F1F8000000000488D3D312F0000488D352A2F00004829FE4889F048C1EE3F48C1F8034801C648D1FE7414488B05C52E00004885C07408FFE0660F1F440000C30F1F8000000000F30F1EFA803DED2E000000752B5548833DA22E0000004889E5740C488B3DCE2E0000E8E9FEFFFFE864FFFFFFC605C52E0000015DC30F1F00C30F1F8000000000F30F1EFAE977FFFFFF0F1F8000000000F30F1EFA415455488D2D660E000053488D1D6AF2FFFF4C8D6314660F1F4400000FB6134889EEBF0100000031C04883C301E8AAFEFFFF4C39E375E55BBF0A0000005D415CE987FEFFFF
+ - Name: .fini
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x4011DC
+ AddressAlign: 0x4
+ Content: F30F1EFA4883EC084883C408C3
+ - Name: .rodata
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0x402000
+ AddressAlign: 0x4
+ Offset: 0x2000
+ Content: '0100020025303268687800'
+ - Name: .init_array
+ Type: SHT_INIT_ARRAY
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403DD8
+ AddressAlign: 0x8
+ EntSize: 0x8
+ Offset: 0x2DD8
+ Content: '8011400000000000'
+ - Name: .fini_array
+ Type: SHT_FINI_ARRAY
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403DE0
+ AddressAlign: 0x8
+ EntSize: 0x8
+ Content: '4011400000000000'
+ - Name: .dynamic
+ Type: SHT_DYNAMIC
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403DE8
+ Link: .dynstr
+ AddressAlign: 0x8
+ Entries:
+ - Tag: DT_NEEDED
+ Value: 0x37
+ - Tag: DT_INIT
+ Value: 0x401000
+ - Tag: DT_FINI
+ Value: 0x4011DC
+ - Tag: DT_INIT_ARRAY
+ Value: 0x403DD8
+ - Tag: DT_INIT_ARRAYSZ
+ Value: 0x8
+ - Tag: DT_FINI_ARRAY
+ Value: 0x403DE0
+ - Tag: DT_FINI_ARRAYSZ
+ Value: 0x8
+ - Tag: DT_GNU_HASH
+ Value: 0x400460
+ - Tag: DT_STRTAB
+ Value: 0x400548
+ - Tag: DT_SYMTAB
+ Value: 0x400488
+ - Tag: DT_STRSZ
+ Value: 0xA9
+ - Tag: DT_SYMENT
+ Value: 0x18
+ - Tag: DT_DEBUG
+ Value: 0x0
+ - Tag: DT_PLTGOT
+ Value: 0x404000
+ - Tag: DT_PLTRELSZ
+ Value: 0x30
+ - Tag: DT_PLTREL
+ Value: 0x7
+ - Tag: DT_FLAGS
+ Value: 0x8
+ - Tag: DT_FLAGS_1
+ Value: 0x8000001
+ - Tag: DT_VERNEED
+ Value: 0x400608
+ - Tag: DT_VERNEEDNUM
+ Value: 0x1
+ - Tag: DT_VERSYM
+ Value: 0x4005F2
+ - Tag: DT_RELACOUNT
+ Value: 0x3
+ - Tag: DT_NULL
+ Value: 0x0
+ - Name: .data
+ Type: SHT_PROGBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404028
+ AddressAlign: 0x8
+ Content: '00000000000000003040400000000000'
+ - Name: .tm_clone_table
+ Type: SHT_PROGBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404038
+ AddressAlign: 0x8
+ - Name: .bss
+ Type: SHT_NOBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404038
+ AddressAlign: 0x1
+ Size: 0x8
+ - Name: .rela.text
+ Type: SHT_RELA
+ Flags: [ SHF_INFO_LINK ]
+ Link: .symtab
+ AddressAlign: 0x8
+ Info: .text
+ Relocations:
+ - Offset: 0x40108B
+ Symbol: print_build_id
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4010BB
+ Symbol: main
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4011A2
+ Symbol: build_id_note
+ Type: R_X86_64_PC32
+ Addend: 12
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .note.gnu.build-id
+ - Name: .note.ABI-tag
+ - Name: .interp
+ - Name: .gnu.hash
+ - Name: .dynsym
+ - Name: .dynstr
+ - Name: .gnu.version
+ - Name: .gnu.version_r
+ - Name: .init
+ - Name: .plt.sec
+ - Name: .text
+ - Name: .rela.text
+ - Name: .fini
+ - Name: .rodata
+ - Name: .init_array
+ - Name: .fini_array
+ - Name: .dynamic
+ - Name: .data
+ - Name: .tm_clone_table
+ - Name: .bss
+ - Name: .symtab
+ - Name: .strtab
+ - Name: .shstrtab
+Symbols:
+ - Name: print_build_id
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401190
+ Size: 0x49
+ - Name: _end
+ Section: .bss
+ Binding: STB_GLOBAL
+ Value: 0x404040
+ - Name: _start
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x4010A0
+ Size: 0x26
+ - Name: __bss_start
+ Section: .bss
+ Binding: STB_GLOBAL
+ Value: 0x404038
+ - Name: main
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401080
+ Size: 0x16
+ - Name: build_id_note
+ Index: SHN_ABS
+ Binding: STB_GLOBAL
+ Value: 0x400400
+...
diff --git a/bolt/test/X86/build_id.test b/bolt/test/X86/build_id.test
new file mode 100644
index 0000000..8d28e12
--- /dev/null
+++ b/bolt/test/X86/build_id.test
@@ -0,0 +1,8 @@
+// This test checks that relocation addend used to address build_id fields
+// is properly disassembled by BOLT.
+
+RUN: yaml2obj %p/Inputs/build_id.yaml &> %t.exe
+RUN: llvm-bolt -print-disasm --print-only=print_build_id %t.exe -o %t.bolt | \
+RUN: FileCheck %s
+
+CHECK: leaq build_id_note+16(%rip), %rbx
diff --git a/bolt/test/X86/dynamic-relocs-on-entry.s b/bolt/test/X86/dynamic-relocs-on-entry.s
new file mode 100644
index 0000000..2a29a43
--- /dev/null
+++ b/bolt/test/X86/dynamic-relocs-on-entry.s
@@ -0,0 +1,32 @@
+// This test examines whether BOLT can correctly process when
+// dynamic relocation points to other entry points of the
+// function.
+
+# RUN: %clang %cflags -fPIC -pie %s -o %t.exe -nostdlib -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt > %t.out.txt
+# RUN: readelf -r %t.bolt >> %t.out.txt
+# RUN: llvm-objdump --disassemble-symbols=chain %t.bolt >> %t.out.txt
+# RUN: FileCheck %s --input-file=%t.out.txt
+
+## Check if the new address in `chain` is correctly updated by BOLT
+# CHECK: Relocation section '.rela.dyn' at offset 0x{{.*}} contains 1 entry:
+# CHECK: {{.*}} R_X86_64_RELATIVE [[#%x,ADDR:]]
+# CHECK: [[#ADDR]]: c3 retq
+ .text
+ .type chain, @function
+chain:
+ movq $1, %rax
+Label:
+ ret
+ .size chain, .-chain
+
+ .type _start, @function
+ .global _start
+_start:
+ jmpq *.Lfoo(%rip)
+ ret
+ .size _start, .-_start
+
+ .data
+.Lfoo:
+ .quad Label \ No newline at end of file
diff --git a/bolt/test/X86/section-end-sym.s b/bolt/test/X86/section-end-sym.s
index 545cf37..29ff6e0 100644
--- a/bolt/test/X86/section-end-sym.s
+++ b/bolt/test/X86/section-end-sym.s
@@ -1,7 +1,7 @@
## Check that BOLT doesn't consider end-of-section symbols (e.g., _etext) as
## functions.
-# REQUIRES: x86_64-linux, asserts
+# REQUIRES: system-linux, asserts
# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-linux %s -o %t.o
# RUN: ld.lld %t.o -o %t.exe -q
@@ -9,7 +9,7 @@
# RUN: | FileCheck %s
# CHECK: considering symbol etext for function
-# CHECK-NEXT: rejecting as symbol points to end of its section
+# CHECK-NEXT: rejecting as symbol etext points to end of .text section
# CHECK-NOT: Binary Function "etext{{.*}}" after building cfg
diff --git a/bolt/unittests/Core/BinaryContext.cpp b/bolt/unittests/Core/BinaryContext.cpp
index cfec72a..6c32881 100644
--- a/bolt/unittests/Core/BinaryContext.cpp
+++ b/bolt/unittests/Core/BinaryContext.cpp
@@ -46,6 +46,7 @@ protected:
}
void initializeBOLT() {
+ Relocation::Arch = ObjFile->makeTriple().getArch();
BC = cantFail(BinaryContext::createBinaryContext(
ObjFile->makeTriple(), ObjFile->getFileName(), nullptr, true,
DWARFContext::create(*ObjFile.get()), {llvm::outs(), llvm::errs()}));
diff --git a/bolt/unittests/Core/MCPlusBuilder.cpp b/bolt/unittests/Core/MCPlusBuilder.cpp
index 62f3aaab..c66c2d0c 100644
--- a/bolt/unittests/Core/MCPlusBuilder.cpp
+++ b/bolt/unittests/Core/MCPlusBuilder.cpp
@@ -56,6 +56,7 @@ protected:
}
void initializeBolt() {
+ Relocation::Arch = ObjFile->makeTriple().getArch();
BC = cantFail(BinaryContext::createBinaryContext(
ObjFile->makeTriple(), ObjFile->getFileName(), nullptr, true,
DWARFContext::create(*ObjFile.get()), {llvm::outs(), llvm::errs()}));
diff --git a/clang-tools-extra/clang-tidy/tool/CMakeLists.txt b/clang-tools-extra/clang-tidy/tool/CMakeLists.txt
index b220cbe..9f327ce 100644
--- a/clang-tools-extra/clang-tidy/tool/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/tool/CMakeLists.txt
@@ -33,6 +33,7 @@ clang_target_link_libraries(clangTidyMain
# Support plugins.
if(CLANG_PLUGIN_SUPPORT)
set(support_plugins SUPPORT_PLUGINS)
+ set(export_symbols EXPORT_SYMBOLS_FOR_PLUGINS)
endif()
add_clang_tool(clang-tidy
@@ -41,6 +42,7 @@ add_clang_tool(clang-tidy
DEPENDS
clang-resource-headers
${support_plugins}
+ ${export_symbols}
)
clang_target_link_libraries(clang-tidy
PRIVATE
@@ -57,10 +59,6 @@ target_link_libraries(clang-tidy
${ALL_CLANG_TIDY_CHECKS}
)
-if(CLANG_PLUGIN_SUPPORT)
- export_executable_symbols_for_plugins(clang-tidy)
-endif()
-
install(PROGRAMS clang-tidy-diff.py
DESTINATION "${CMAKE_INSTALL_DATADIR}/clang"
COMPONENT clang-tidy)
diff --git a/clang/cmake/modules/AddClang.cmake b/clang/cmake/modules/AddClang.cmake
index 5327b5d..9f26472 100644
--- a/clang/cmake/modules/AddClang.cmake
+++ b/clang/cmake/modules/AddClang.cmake
@@ -160,7 +160,7 @@ macro(add_clang_tool name)
AND (NOT LLVM_DISTRIBUTION_COMPONENTS OR ${name} IN_LIST LLVM_DISTRIBUTION_COMPONENTS)
)
set(get_obj_args ${ARGN})
- list(FILTER get_obj_args EXCLUDE REGEX "^SUPPORT_PLUGINS$")
+ list(FILTER get_obj_args EXCLUDE REGEX "^(SUPPORT_PLUGINS|EXPORT_SYMBOLS_FOR_PLUGINS)$")
generate_llvm_objects(${name} ${get_obj_args})
add_custom_target(${name} DEPENDS llvm-driver clang-resource-headers)
else()
diff --git a/clang/docs/CommandGuide/clang.rst b/clang/docs/CommandGuide/clang.rst
index a0c2594..ca8176f 100644
--- a/clang/docs/CommandGuide/clang.rst
+++ b/clang/docs/CommandGuide/clang.rst
@@ -703,6 +703,10 @@ Preprocessor Options
Do not search clang's builtin directory for include files.
+.. option:: -nostdinc++
+
+ Do not search the system C++ standard library directory for include files.
+
.. option:: -fkeep-system-includes
Usable only with :option:`-E`. Do not copy the preprocessed content of
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 0e72b3c..3fc74cd 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -363,5 +363,7 @@ considered for standardization. Please post on the
| device extension | `'ompx_bare' clause on 'target teams' construct | :good:`prototyped` | #66844, #70612 |
| | <https://www.osti.gov/servlets/purl/2205717>`_ | | |
+------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
+| device extension | Multi-dim 'num_teams' clause on 'target teams ompx_bare' construct | :good:`partial` | #99732, #101407 |
++------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
.. _Discourse forums (Runtimes - OpenMP category): https://discourse.llvm.org/c/runtimes/openmp/35
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 0f1a4c1..a4a862c 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -37,6 +37,8 @@ These changes are ones which we think may surprise users when upgrading to
Clang |release| because of the opportunity they pose for disruption to existing
code bases.
+- The ``le32`` and ``le64`` targets have been removed.
+
C/C++ Language Potentially Breaking Changes
-------------------------------------------
@@ -73,6 +75,9 @@ sections with improvements to Clang's support for those languages.
C++ Language Changes
--------------------
+- Allow single element access of GCC vector/ext_vector_type object to be
+ constant expression. Supports the `V.xyzw` syntax and other tidbits
+ as seen in OpenCL. Selecting multiple elements is left as a future work.
C++17 Feature Support
^^^^^^^^^^^^^^^^^^^^^
@@ -105,6 +110,9 @@ C2y Feature Support
C23 Feature Support
^^^^^^^^^^^^^^^^^^^
+Non-comprehensive list of changes in this release
+-------------------------------------------------
+
New Compiler Flags
------------------
@@ -164,6 +172,7 @@ Bug Fixes in This Version
- Fixed the definition of ``ATOMIC_FLAG_INIT`` in ``<stdatomic.h>`` so it can
be used in C++.
- Fixed a failed assertion when checking required literal types in C context. (#GH101304).
+- Fixed a crash when trying to transform a dependent address space type. Fixes #GH101685.
Bug Fixes to Compiler Builtins
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -186,6 +195,9 @@ Bug Fixes to C++ Support
substitutions in concepts, so it doesn't incorrectly complain of missing
module imports in those situations. (#GH60336)
- Fix init-capture packs having a size of one before being instantiated. (#GH63677)
+- Clang now preserves the unexpanded flag in a lambda transform used for pack expansion. (#GH56852), (#GH85667),
+ (#GH99877).
+- Fixed a bug when diagnosing ambiguous explicit specializations of constrained member functions.
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -329,6 +341,9 @@ Improvements
^^^^^^^^^^^^
- Improve the handling of mapping array-section for struct containing nested structs with user defined mappers
+- `num_teams` now accepts multiple expressions when it is used along in ``target teams ompx_bare`` construct.
+ This allows the target region to be launched with multi-dim grid on GPUs.
+
Additional Information
======================
diff --git a/clang/docs/StandardCPlusPlusModules.rst b/clang/docs/StandardCPlusPlusModules.rst
index b874919..2478a77 100644
--- a/clang/docs/StandardCPlusPlusModules.rst
+++ b/clang/docs/StandardCPlusPlusModules.rst
@@ -398,6 +398,16 @@ BMIs cannot be shipped in an archive to create a module library. Instead, the
BMIs(``*.pcm``) are compiled into object files(``*.o``) and those object files
are added to the archive instead.
+clang-cl
+~~~~~~~~
+
+``clang-cl`` supports the same options as ``clang++`` for modules as detailed above;
+there is no need to prefix these options with ``/clang:``. Note that ``cl.exe``
+`options to emit/consume IFC files <https://devblogs.microsoft.com/cppblog/using-cpp-modules-in-msvc-from-the-command-line-part-1/>` are *not* supported.
+The resultant precompiled modules are also not compatible for use with ``cl.exe``.
+
+We recommend that build system authors use the above-mentioned ``clang++`` options with ``clang-cl`` to build modules.
+
Consistency Requirements
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1387,13 +1397,6 @@ have ``.cppm`` (or ``.ccm``, ``.cxxm``, ``.c++m``) as the file extension.
However, the behavior is inconsistent with other compilers. This is tracked by
`#57416 <https://github.com/llvm/llvm-project/issues/57416>`_.
-clang-cl is not compatible with standard C++ modules
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-``/clang:-fmodule-file`` and ``/clang:-fprebuilt-module-path`` cannot be used
-to specify the BMI with ``clang-cl.exe``. This is tracked by
-`#64118 <https://github.com/llvm/llvm-project/issues/64118>`_.
-
Incorrect ODR violation diagnostics
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/clang/docs/UsersManual.rst b/clang/docs/UsersManual.rst
index e9b9573..64e9914 100644
--- a/clang/docs/UsersManual.rst
+++ b/clang/docs/UsersManual.rst
@@ -4745,6 +4745,12 @@ Execute ``clang-cl /?`` to see a list of supported options:
-flto=<value> Set LTO mode to either 'full' or 'thin'
-flto Enable LTO in 'full' mode
-fmerge-all-constants Allow merging of constants
+ -fmodule-file=<module_name>=<module-file>
+ Use the specified module file that provides the module <module_name>
+ -fmodule-header=<header>
+ Build <header> as a C++20 header unit
+ -fmodule-output=<path>
+ Save intermediate module file results when compiling a standard C++ module unit.
-fms-compatibility-version=<value>
Dot-separated value representing the Microsoft compiler version
number to report in _MSC_VER (0 = don't define it; default is same value as installed cl.exe, or 1933)
diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt
index a8ee8f1..6287113 100644
--- a/clang/docs/tools/clang-formatted-files.txt
+++ b/clang/docs/tools/clang-formatted-files.txt
@@ -362,7 +362,6 @@ clang/lib/Basic/Targets/BPF.cpp
clang/lib/Basic/Targets/BPF.h
clang/lib/Basic/Targets/Hexagon.h
clang/lib/Basic/Targets/Lanai.h
-clang/lib/Basic/Targets/Le64.h
clang/lib/Basic/Targets/M68k.h
clang/lib/Basic/Targets/MSP430.h
clang/lib/Basic/Targets/NVPTX.cpp
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index f86f181..847a6ea 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -3229,7 +3229,7 @@ class UnresolvedLookupExpr final
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End,
- bool KnownDependent);
+ bool KnownDependent, bool KnownInstantiationDependent);
UnresolvedLookupExpr(EmptyShell Empty, unsigned NumResults,
bool HasTemplateKWAndArgsInfo);
@@ -3248,7 +3248,7 @@ public:
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
UnresolvedSetIterator Begin, UnresolvedSetIterator End,
- bool KnownDependent);
+ bool KnownDependent, bool KnownInstantiationDependent);
// After canonicalization, there may be dependent template arguments in
// CanonicalConverted But none of Args is dependent. When any of
@@ -3258,7 +3258,8 @@ public:
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End, bool KnownDependent);
+ UnresolvedSetIterator End, bool KnownDependent,
+ bool KnownInstantiationDependent);
static UnresolvedLookupExpr *CreateEmpty(const ASTContext &Context,
unsigned NumResults,
diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h
index 3a8337a..1e830b1 100644
--- a/clang/include/clang/AST/OpenMPClause.h
+++ b/clang/include/clang/AST/OpenMPClause.h
@@ -6369,43 +6369,54 @@ public:
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
-class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
- friend class OMPClauseReader;
+///
+/// When 'ompx_bare' clause exists on a 'target' directive, 'num_teams' clause
+/// can accept up to three expressions.
+///
+/// \code
+/// #pragma omp target teams ompx_bare num_teams(x, y, z)
+/// \endcode
+class OMPNumTeamsClause final
+ : public OMPVarListClause<OMPNumTeamsClause>,
+ public OMPClauseWithPreInit,
+ private llvm::TrailingObjects<OMPNumTeamsClause, Expr *> {
+ friend OMPVarListClause;
+ friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
- /// NumTeams number.
- Stmt *NumTeams = nullptr;
+ OMPNumTeamsClause(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
+ : OMPVarListClause(llvm::omp::OMPC_num_teams, StartLoc, LParenLoc, EndLoc,
+ N),
+ OMPClauseWithPreInit(this) {}
- /// Set the NumTeams number.
- ///
- /// \param E NumTeams number.
- void setNumTeams(Expr *E) { NumTeams = E; }
+ /// Build an empty clause.
+ OMPNumTeamsClause(unsigned N)
+ : OMPVarListClause(llvm::omp::OMPC_num_teams, SourceLocation(),
+ SourceLocation(), SourceLocation(), N),
+ OMPClauseWithPreInit(this) {}
public:
- /// Build 'num_teams' clause.
+ /// Creates clause with a list of variables \a VL.
///
- /// \param E Expression associated with this clause.
- /// \param HelperE Helper Expression associated with this clause.
- /// \param CaptureRegion Innermost OpenMP region where expressions in this
- /// clause must be captured.
+ /// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) {
- setPreInitStmt(HelperE, CaptureRegion);
- }
+ /// \param VL List of references to the variables.
+ /// \param PreInit
+ static OMPNumTeamsClause *
+ Create(const ASTContext &C, OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, Stmt *PreInit);
- /// Build an empty clause.
- OMPNumTeamsClause()
- : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(),
- SourceLocation()),
- OMPClauseWithPreInit(this) {}
+ /// Creates an empty clause with \a N variables.
+ ///
+ /// \param C AST context.
+ /// \param N The number of variables.
+ static OMPNumTeamsClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -6413,16 +6424,22 @@ public:
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
- /// Return NumTeams number.
- Expr *getNumTeams() { return cast<Expr>(NumTeams); }
+ /// Return NumTeams expressions.
+ ArrayRef<Expr *> getNumTeams() { return getVarRefs(); }
- /// Return NumTeams number.
- Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
+ /// Return NumTeams expressions.
+ ArrayRef<Expr *> getNumTeams() const {
+ return const_cast<OMPNumTeamsClause *>(this)->getNumTeams();
+ }
- child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
const_child_range children() const {
- return const_child_range(&NumTeams, &NumTeams + 1);
+ auto Children = const_cast<OMPNumTeamsClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index f469cd9..b505c74 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -3828,8 +3828,8 @@ bool RecursiveASTVisitor<Derived>::VisitOMPMapClause(OMPMapClause *C) {
template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPNumTeamsClause(
OMPNumTeamsClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
TRY_TO(VisitOMPClauseWithPreInit(C));
- TRY_TO(TraverseStmt(C->getNumTeams()));
return true;
}
diff --git a/clang/include/clang/Basic/AttributeCommonInfo.h b/clang/include/clang/Basic/AttributeCommonInfo.h
index cdf9dca..5f024b4 100644
--- a/clang/include/clang/Basic/AttributeCommonInfo.h
+++ b/clang/include/clang/Basic/AttributeCommonInfo.h
@@ -191,12 +191,6 @@ public:
/// __gnu__::__attr__ will be normalized to gnu::attr).
std::string getNormalizedFullName() const;
- /// Generate a normalized full name, with syntax, scope and name.
- static std::string
- normalizeFullNameWithSyntax(const IdentifierInfo *Name,
- const IdentifierInfo *Scope,
- AttributeCommonInfo::Syntax SyntaxUsed);
-
bool isDeclspecAttribute() const { return SyntaxUsed == AS_Declspec; }
bool isMicrosoftAttribute() const { return SyntaxUsed == AS_Microsoft; }
diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index 88ae0ce..261e91b 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -515,6 +515,16 @@ TARGET_BUILTIN(__builtin_altivec_vctzh, "V8UsV8Us", "", "power9-vector")
TARGET_BUILTIN(__builtin_altivec_vctzw, "V4UiV4Ui", "", "power9-vector")
TARGET_BUILTIN(__builtin_altivec_vctzd, "V2ULLiV2ULLi", "", "power9-vector")
+// P7 BCD builtins.
+TARGET_BUILTIN(__builtin_cdtbcd, "UiUi", "", "isa-v206-instructions")
+TARGET_BUILTIN(__builtin_cbcdtd, "UiUi", "", "isa-v206-instructions")
+TARGET_BUILTIN(__builtin_addg6s, "UiUiUi", "", "isa-v206-instructions")
+
+// P7 XL Compat BCD builtins.
+TARGET_BUILTIN(__builtin_ppc_cdtbcd, "LLiLLi", "", "isa-v206-instructions")
+TARGET_BUILTIN(__builtin_ppc_cbcdtd, "LLiLLi", "", "isa-v206-instructions")
+TARGET_BUILTIN(__builtin_ppc_addg6s, "LLiLLiLLi", "", "isa-v206-instructions")
+
// P8 BCD builtins.
TARGET_BUILTIN(__builtin_ppc_bcdadd, "V16UcV16UcV16UcIi", "",
"isa-v207-instructions")
diff --git a/clang/include/clang/Basic/BuiltinsX86.def b/clang/include/clang/Basic/BuiltinsX86.def
index 55551f6..a696cf1 100644
--- a/clang/include/clang/Basic/BuiltinsX86.def
+++ b/clang/include/clang/Basic/BuiltinsX86.def
@@ -2179,6 +2179,44 @@ TARGET_BUILTIN(__builtin_ia32_vminmaxps512_round_mask, "V16fV16fV16fIiV16fUsIi",
TARGET_BUILTIN(__builtin_ia32_vminmaxsd_round_mask, "V2dV2dV2dIiV2dUcIi", "nV:128:", "avx10.2-256")
TARGET_BUILTIN(__builtin_ia32_vminmaxsh_round_mask, "V8xV8xV8xIiV8xUcIi", "nV:128:", "avx10.2-256")
TARGET_BUILTIN(__builtin_ia32_vminmaxss_round_mask, "V4fV4fV4fIiV4fUcIi", "nV:128:", "avx10.2-256")
+
+// AVX10.2 SATCVT
+TARGET_BUILTIN(__builtin_ia32_vcvtnebf162ibs128, "V8UsV8y", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtnebf162ibs256, "V16UsV16y", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtnebf162ibs512, "V32UsV32y", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvtnebf162iubs128, "V8UsV8y", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtnebf162iubs256, "V16UsV16y", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtnebf162iubs512, "V32UsV32y", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2ibs128_mask, "V8UsV8xV8UsUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2ibs256_mask, "V16UsV16xV16UsUsIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2ibs512_mask, "V32UsV32xV32UsUiIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2iubs128_mask, "V8UsV8xV8UsUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2iubs256_mask, "V16UsV16xV16UsUsIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2iubs512_mask, "V32UsV32xV32UsUiIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2ibs128_mask, "V4UiV4fV4UiUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2ibs256_mask, "V8UiV8fV8UiUcIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2ibs512_mask, "V16UiV16fV16UiUsIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2iubs128_mask, "V4UiV4fV4UiUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2iubs256_mask, "V8UiV8fV8UiUcIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2iubs512_mask, "V16UiV16fV16UiUsIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvttnebf162ibs128, "V8UsV8y", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttnebf162ibs256, "V16UsV16y", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttnebf162ibs512, "V32UsV32y", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvttnebf162iubs128, "V8UsV8y", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttnebf162iubs256, "V16UsV16y", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttnebf162iubs512, "V32UsV32y", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2ibs128_mask, "V8UsV8xV8UsUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2ibs256_mask, "V16UsV16xV16UsUsIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2ibs512_mask, "V32UsV32xV32UsUiIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2iubs128_mask, "V8UsV8xV8UsUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2iubs256_mask, "V16UsV16xV16UsUsIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2iubs512_mask, "V32UsV32xV32UsUiIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvttps2ibs128_mask, "V4UiV4fV4UiUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttps2ibs256_mask, "V8UiV8fV8UiUcIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttps2ibs512_mask, "V16UiV16fV16UiUsIi", "nV:512:", "avx10.2-512")
+TARGET_BUILTIN(__builtin_ia32_vcvttps2iubs128_mask, "V4UiV4fV4UiUc", "nV:128:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttps2iubs256_mask, "V8UiV8fV8UiUcIi", "nV:256:", "avx10.2-256")
+TARGET_BUILTIN(__builtin_ia32_vcvttps2iubs512_mask, "V16UiV16fV16UiUsIi", "nV:512:", "avx10.2-512")
#undef BUILTIN
#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 581434d..02dc0ff 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -11639,6 +11639,8 @@ def warn_omp_unterminated_declare_target : Warning<
InGroup<SourceUsesOpenMP>;
def err_ompx_bare_no_grid : Error<
"'ompx_bare' clauses requires explicit grid size via 'num_teams' and 'thread_limit' clauses">;
+def err_omp_multi_expr_not_allowed: Error<"only one expression allowed in '%0' clause">;
+def err_ompx_more_than_three_expr_not_allowed: Error<"at most three expressions are allowed in '%0' clause in 'target teams ompx_bare' construct">;
} // end of OpenMP category
let CategoryName = "Related Result Type Issue" in {
diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def
index dc71ef8..10538f5 100644
--- a/clang/include/clang/Basic/Features.def
+++ b/clang/include/clang/Basic/Features.def
@@ -110,9 +110,10 @@ FEATURE(ptrauth_vtable_pointer_address_discrimination, LangOpts.PointerAuthVTPtr
FEATURE(ptrauth_vtable_pointer_type_discrimination, LangOpts.PointerAuthVTPtrTypeDiscrimination)
FEATURE(ptrauth_type_info_vtable_pointer_discrimination, LangOpts.PointerAuthTypeInfoVTPtrDiscrimination)
FEATURE(ptrauth_member_function_pointer_type_discrimination, LangOpts.PointerAuthCalls)
-FEATURE(ptrauth_init_fini, LangOpts.PointerAuthInitFini)
FEATURE(ptrauth_function_pointer_type_discrimination, LangOpts.PointerAuthFunctionTypeDiscrimination)
FEATURE(ptrauth_indirect_gotos, LangOpts.PointerAuthIndirectGotos)
+FEATURE(ptrauth_init_fini, LangOpts.PointerAuthInitFini)
+FEATURE(ptrauth_init_fini_address_discrimination, LangOpts.PointerAuthInitFiniAddressDiscrimination)
EXTENSION(swiftcc,
PP.getTargetInfo().checkCallingConvention(CC_Swift) ==
clang::TargetInfo::CCCR_OK)
diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index 0035092c..6945f8b 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -170,9 +170,12 @@ LANGOPT(PointerAuthAuthTraps, 1, 0, "pointer authentication failure traps")
LANGOPT(PointerAuthVTPtrAddressDiscrimination, 1, 0, "incorporate address discrimination in authenticated vtable pointers")
LANGOPT(PointerAuthVTPtrTypeDiscrimination, 1, 0, "incorporate type discrimination in authenticated vtable pointers")
LANGOPT(PointerAuthTypeInfoVTPtrDiscrimination, 1, 0, "incorporate type and address discrimination in authenticated vtable pointers for std::type_info")
-LANGOPT(PointerAuthInitFini, 1, 0, "sign function pointers in init/fini arrays")
BENIGN_LANGOPT(PointerAuthFunctionTypeDiscrimination, 1, 0,
"Use type discrimination when signing function pointers")
+LANGOPT(PointerAuthInitFini, 1, 0, "sign function pointers in init/fini arrays")
+LANGOPT(PointerAuthInitFiniAddressDiscrimination, 1, 0,
+ "incorporate address discrimination in authenticated function pointers in init/fini arrays")
+LANGOPT(PointerAuthELFGOT, 1, 0, "authenticate pointers from GOT")
LANGOPT(DoubleSquareBracketAttributes, 1, 0, "'[[]]' attributes extension for all language standard modes")
LANGOPT(ExperimentalLateParseAttributes, 1, 0, "experimental late parsing of attributes")
diff --git a/clang/include/clang/Basic/PointerAuthOptions.h b/clang/include/clang/Basic/PointerAuthOptions.h
index 417b4b0..8f63cf2 100644
--- a/clang/include/clang/Basic/PointerAuthOptions.h
+++ b/clang/include/clang/Basic/PointerAuthOptions.h
@@ -23,6 +23,10 @@
namespace clang {
+/// Constant discriminator to be used with function pointers in .init_array and
+/// .fini_array. The value is ptrauth_string_discriminator("init_fini")
+constexpr uint16_t InitFiniPointerConstantDiscriminator = 0xD9D4;
+
constexpr unsigned PointerAuthKeyNone = -1;
/// Constant discriminator for std::type_info vtable pointers: 0xB1EA/45546
@@ -186,6 +190,9 @@ struct PointerAuthOptions {
/// The ABI for C++ member function pointers.
PointerAuthSchema CXXMemberFunctionPointers;
+
+ /// The ABI for function addresses in .init_array and .fini_array
+ PointerAuthSchema InitFiniPointers;
};
} // end namespace clang
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 94c093d..0783738 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -2092,7 +2092,7 @@ let SVETargetGuard = "sve2p1", SMETargetGuard = "sme2" in {
def SVCNTP_COUNT : SInst<"svcntp_{d}", "n}i", "QcQsQiQl", MergeNone, "aarch64_sve_cntp_{d}", [IsOverloadNone, VerifyRuntimeMode], [ImmCheck<1, ImmCheck2_4_Mul2>]>;
}
-let SVETargetGuard = "sve2,b16b16", SMETargetGuard = "sme2,b16b16" in {
+let SVETargetGuard = "sve2,sve-b16b16", SMETargetGuard = "sme2,sve-b16b16" in {
defm SVMUL_BF : SInstZPZZ<"svmul", "b", "aarch64_sve_fmul", "aarch64_sve_fmul_u", [VerifyRuntimeMode]>;
defm SVADD_BF : SInstZPZZ<"svadd", "b", "aarch64_sve_fadd", "aarch64_sve_fadd_u", [VerifyRuntimeMode]>;
defm SVSUB_BF : SInstZPZZ<"svsub", "b", "aarch64_sve_fsub", "aarch64_sve_fsub_u", [VerifyRuntimeMode]>;
@@ -2116,7 +2116,7 @@ def SVFCLAMP_BF : SInst<"svclamp[_{d}]", "dddd", "b", MergeNone, "aarch64_sve_
multiclass MinMaxIntr<string i, string zm, string mul, string t> {
def SVS # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "csil", MergeNone, "aarch64_sve_s" # i # zm # "_" # mul, [IsStreaming], []>;
def SVU # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "UcUsUiUl", MergeNone, "aarch64_sve_u" # i # zm # "_" # mul, [IsStreaming], []>;
- def SVF # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "bhfd", MergeNone, "aarch64_sve_f" # i # zm # "_" # mul, [IsStreaming], []>;
+ def SVF # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "hfd", MergeNone, "aarch64_sve_f" # i # zm # "_" # mul, [IsStreaming], []>;
}
let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2" in {
@@ -2134,11 +2134,11 @@ let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2" in {
}
multiclass SInstMinMaxByVector<string name> {
- def NAME # _SINGLE_X2 : SInst<"sv" # name # "nm[_single_{d}_x2]", "22d", "bhfd", MergeNone, "aarch64_sve_f" # name # "nm_single_x2", [IsStreaming], []>;
- def NAME # _SINGLE_X4 : SInst<"sv" # name # "nm[_single_{d}_x4]", "44d", "bhfd", MergeNone, "aarch64_sve_f" # name # "nm_single_x4", [IsStreaming], []>;
+ def NAME # _SINGLE_X2 : SInst<"sv" # name # "nm[_single_{d}_x2]", "22d", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_single_x2", [IsStreaming], []>;
+ def NAME # _SINGLE_X4 : SInst<"sv" # name # "nm[_single_{d}_x4]", "44d", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_single_x4", [IsStreaming], []>;
- def NAME # _X2 : SInst<"sv" # name # "nm[_{d}_x2]", "222", "bhfd", MergeNone, "aarch64_sve_f" # name # "nm_x2", [IsStreaming], []>;
- def NAME # _X4 : SInst<"sv" # name # "nm[_{d}_x4]", "444", "bhfd", MergeNone, "aarch64_sve_f" # name # "nm_x4", [IsStreaming], []>;
+ def NAME # _X2 : SInst<"sv" # name # "nm[_{d}_x2]", "222", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_x2", [IsStreaming], []>;
+ def NAME # _X4 : SInst<"sv" # name # "nm[_{d}_x4]", "444", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_x4", [IsStreaming], []>;
}
let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2" in {
@@ -2172,9 +2172,25 @@ let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2" in {
def SVFCLAMP_X4 : SInst<"svclamp[_single_{d}_x4]", "44dd", "hfd", MergeNone, "aarch64_sve_fclamp_single_x4", [IsStreaming], []>;
}
-let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2,b16b16"in {
- def SVBFCLAMP_X2 : SInst<"svclamp[_single_{d}_x2]", "22dd", "b", MergeNone, "aarch64_sve_bfclamp_single_x2", [IsStreaming], []>;
- def SVBFCLAMP_X4 : SInst<"svclamp[_single_{d}_x4]", "44dd", "b", MergeNone, "aarch64_sve_bfclamp_single_x4", [IsStreaming], []>;
+multiclass BfSingleMultiVector<string name> {
+ def NAME # _SINGLE_X2 : SInst<"sv" # name # "[_single_{d}_x2]", "22d", "b", MergeNone, "aarch64_sve_f" # name # "_single_x2", [IsStreaming], []>;
+ def NAME # _SINGLE_X4 : SInst<"sv" # name # "[_single_{d}_x4]", "44d", "b", MergeNone, "aarch64_sve_f" # name # "_single_x4", [IsStreaming], []>;
+
+ def NAME # _X2 : SInst<"sv" # name # "[_{d}_x2]", "222", "b", MergeNone, "aarch64_sve_f" # name # "_x2", [IsStreaming], []>;
+ def NAME # _X4 : SInst<"sv" # name # "[_{d}_x4]", "444", "b", MergeNone, "aarch64_sve_f" # name # "_x4", [IsStreaming], []>;
+}
+
+let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2,sve-b16b16"in {
+ def SVBFCLAMP_X2 : SInst<"svclamp[_single_{d}_x2]", "22dd", "b", MergeNone, "aarch64_sve_bfclamp_single_x2", [IsStreaming], []>;
+ def SVBFCLAMP_X4 : SInst<"svclamp[_single_{d}_x4]", "44dd", "b", MergeNone, "aarch64_sve_bfclamp_single_x4", [IsStreaming], []>;
+
+ // bfmin, bfmax (single, multi)
+ defm SVBFMIN : BfSingleMultiVector<"min">;
+ defm SVBFMAX : BfSingleMultiVector<"max">;
+
+ // bfminnm, bfmaxnm (single, multi)
+ defm SVBFMINNM : BfSingleMultiVector<"minnm">;
+ defm SVBFMAXNM : BfSingleMultiVector<"maxnm">;
}
let SVETargetGuard = InvalidMode, SMETargetGuard = "sme2" in {
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 0cab4b8..dda2dcb 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1378,6 +1378,9 @@ let HasMasked = false,
let RequiredFeatures = ["Zvfhmin"] in
defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "x",
[["v", "v", "vv"]]>;
+ let RequiredFeatures = ["Zvfbfmin"] in
+ defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "y",
+ [["v", "v", "vv"]]>;
let SupportOverloading = false in
defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil",
[["x", "v", "ve"],
@@ -1890,6 +1893,9 @@ let HasMasked = false,
let RequiredFeatures = ["Zvfhmin"] in
defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "x",
[["vvm", "v", "vvvm"]]>;
+ let RequiredFeatures = ["Zvfbfmin"] in
+ defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "y",
+ [["vvm", "v", "vvvm"]]>;
defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
[["vfm", "v", "vvem"]]>;
}
@@ -1912,8 +1918,18 @@ def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">;
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">;
def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">;
- def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">;
- def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">;
+ def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "si", "vfwcvt_f">;
+ def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "si", "vfwcvt_f">;
+ let RequiredFeatures = ["Zvfh"] in {
+ let Name = "vfwcvt_f_xu_v",
+ IRName = "vfwcvt_f_xu_v",
+ MaskedIRName = "vfwcvt_f_xu_v_mask" in
+ def : RVVConvBuiltin<"Fw", "FwUv", "c", "vfwcvt_f">;
+ let Name = "vfwcvt_f_x_v",
+ IRName = "vfwcvt_f_x_v",
+ MaskedIRName = "vfwcvt_f_x_v_mask" in
+ def : RVVConvBuiltin<"Fw", "Fwv", "c", "vfwcvt_f">;
+ }
def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "f", "vfwcvt_f">;
let RequiredFeatures = ["Zvfhmin"] in
def vfwcvt_f_f_v_fp16 : RVVConvBuiltin<"w", "wv", "x", "vfwcvt_f"> {
@@ -1927,6 +1943,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">;
def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">;
+ let RequiredFeatures = ["Zvfh"] in {
+ let Name = "vfncvt_rtz_xu_f_w",
+ IRName = "vfncvt_rtz_xu_f_w",
+ MaskedIRName = "vfncvt_rtz_xu_f_w_mask" in
+ def : RVVConvBuiltin<"Uv", "UvFw", "c", "vfncvt_rtz_xu">;
+ let Name = "vfncvt_rtz_x_f_w",
+ IRName = "vfncvt_rtz_x_f_w",
+ MaskedIRName = "vfncvt_rtz_x_f_w_mask" in
+ def : RVVConvBuiltin<"Iv", "IvFw", "c", "vfncvt_rtz_x">;
+ }
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
}
@@ -2005,10 +2031,18 @@ let ManualCodegen = [{
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
let OverloadedName = "vfncvt_x" in
defm :
- RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFwu"]]>;
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>;
let OverloadedName = "vfncvt_xu" in
defm :
- RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFwu"]]>;
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>;
+ let RequiredFeatures = ["Zvfh"] in {
+ let OverloadedName = "vfncvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>;
+ let OverloadedName = "vfncvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>;
+ }
let OverloadedName = "vfncvt_f" in {
defm :
RVVConvBuiltinSet<"vfncvt_f_x_w", "xf", [["v", "vIwu"]]>;
@@ -2055,10 +2089,18 @@ let ManualCodegen = [{
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
let OverloadedName = "vfncvt_x" in
defm :
- RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFw"]]>;
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>;
let OverloadedName = "vfncvt_xu" in
defm :
- RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFw"]]>;
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["Zvfh"] in {
+ let OverloadedName = "vfncvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let OverloadedName = "vfncvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ }
let OverloadedName = "vfncvt_f" in {
defm :
RVVConvBuiltinSet<"vfncvt_f_x_w", "xf", [["v", "vIw"]]>;
@@ -2256,10 +2298,22 @@ defm vfslide1down : RVVFloatingBinVFBuiltinSet;
// 16.4. Vector Register Gather Instructions
// signed and floating type
-defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd",
+defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilfd",
[["vv", "v", "vvUv"]]>;
-defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd",
+defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilfd",
[["vx", "v", "vvz"]]>;
+let RequiredFeatures = ["Zvfhmin"] in {
+ defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "x",
+ [["vv", "v", "vvUv"]]>;
+ defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "x",
+ [["vx", "v", "vvz"]]>;
+}
+let RequiredFeatures = ["Zvfbfmin"] in {
+ defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "y",
+ [["vv", "v", "vvUv"]]>;
+ defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "y",
+ [["vx", "v", "vvz"]]>;
+}
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd",
[["vv", "v", "vv(Log2EEW:4)Uv"]]>;
// unsigned type
@@ -2282,8 +2336,14 @@ let HasMasked = false,
IntrinsicTypes = {ResultType, Ops.back()->getType()};
}] in {
// signed and floating type
- defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
+ defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd",
[["vm", "v", "vvm"]]>;
+ let RequiredFeatures = ["Zvfhmin"] in
+ defm vcompress : RVVOutBuiltinSet<"vcompress", "x",
+ [["vm", "v", "vvm"]]>;
+ let RequiredFeatures = ["Zvfbfmin"] in
+ defm vcompress : RVVOutBuiltinSet<"vcompress", "y",
+ [["vm", "v", "vvm"]]>;
// unsigned type
defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
[["vm", "Uv", "UvUvm"]]>;
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 040db6f0..33f6441 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -604,10 +604,10 @@ class RVVConvToWidenUnsignedBuiltin<string overloaded_name>
: RVVConvBuiltin<"Uw", "Uwv", "xf", overloaded_name>;
class RVVConvToNarrowingSignedBuiltin<string overloaded_name>
- : RVVConvBuiltin<"Iv", "IvFw", "csi", overloaded_name>;
+ : RVVConvBuiltin<"Iv", "IvFw", "si", overloaded_name>;
class RVVConvToNarrowingUnsignedBuiltin<string overloaded_name>
- : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>;
+ : RVVConvBuiltin<"Uv", "UvFw", "si", overloaded_name>;
let HasMaskedOffOperand = true in {
multiclass RVVSignedReductionBuiltin {
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 8471b89d..51ec29f 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -3105,7 +3105,7 @@ def fmodules_user_build_path : Separate<["-"], "fmodules-user-build-path">, Grou
HelpText<"Specify the module user build path">,
MarshallingInfoString<HeaderSearchOpts<"ModuleUserBuildPath">>;
def fprebuilt_module_path : Joined<["-"], "fprebuilt-module-path=">, Group<i_Group>,
- Flags<[]>, Visibility<[ClangOption, CC1Option]>,
+ Flags<[]>, Visibility<[ClangOption, CLOption, CC1Option]>,
MetaVarName<"<directory>">,
HelpText<"Specify the prebuilt module path">;
defm prebuilt_implicit_modules : BoolFOption<"prebuilt-implicit-modules",
@@ -3114,11 +3114,11 @@ defm prebuilt_implicit_modules : BoolFOption<"prebuilt-implicit-modules",
NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
def fmodule_output_EQ : Joined<["-"], "fmodule-output=">,
- Flags<[NoXarchOption]>, Visibility<[ClangOption, CC1Option]>,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption, CC1Option]>,
MarshallingInfoString<FrontendOpts<"ModuleOutputPath">>,
HelpText<"Save intermediate module file results when compiling a standard C++ module unit.">;
def fmodule_output : Flag<["-"], "fmodule-output">, Flags<[NoXarchOption]>,
- Visibility<[ClangOption, CC1Option]>,
+ Visibility<[ClangOption, CLOption, CC1Option]>,
HelpText<"Save intermediate module file results when compiling a standard C++ module unit.">;
defm skip_odr_check_in_gmf : BoolOption<"f", "skip-odr-check-in-gmf",
@@ -3302,8 +3302,10 @@ def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-sy
Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<LangOpts<"RetainCommentsFromSystemHeaders">>;
def fmodule_header : Flag <["-"], "fmodule-header">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Build a C++20 Header Unit from a header">;
def fmodule_header_EQ : Joined<["-"], "fmodule-header=">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption]>,
MetaVarName<"<kind>">,
HelpText<"Build a C++20 Header Unit from a header that should be found in the user (fmodule-header=user) or system (fmodule-header=system) search path.">;
@@ -4254,11 +4256,13 @@ defm ptrauth_vtable_pointer_type_discrimination :
OptInCC1FFlag<"ptrauth-vtable-pointer-type-discrimination", "Enable type discrimination of vtable pointers">;
defm ptrauth_type_info_vtable_pointer_discrimination :
OptInCC1FFlag<"ptrauth-type-info-vtable-pointer-discrimination", "Enable type and address discrimination of vtable pointer of std::type_info">;
-defm ptrauth_init_fini : OptInCC1FFlag<"ptrauth-init-fini", "Enable signing of function pointers in init/fini arrays">;
defm ptrauth_function_pointer_type_discrimination : OptInCC1FFlag<"ptrauth-function-pointer-type-discrimination",
"Enable type discrimination on C function pointers">;
defm ptrauth_indirect_gotos : OptInCC1FFlag<"ptrauth-indirect-gotos",
"Enable signing and authentication of indirect goto targets">;
+defm ptrauth_init_fini : OptInCC1FFlag<"ptrauth-init-fini", "Enable signing of function pointers in init/fini arrays">;
+defm ptrauth_init_fini_address_discrimination : OptInCC1FFlag<"ptrauth-init-fini-address-discrimination",
+ "Enable address discrimination of function pointers in init/fini arrays">;
}
def fenable_matrix : Flag<["-"], "fenable-matrix">, Group<f_Group>,
@@ -5503,7 +5507,7 @@ def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_te
def nobuiltininc : Flag<["-"], "nobuiltininc">,
Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
Group<IncludePath_Group>,
- HelpText<"Disable builtin #include directories">,
+ HelpText<"Disable builtin #include directories only">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"UseBuiltinIncludes">>;
def nogpuinc : Flag<["-"], "nogpuinc">, Group<IncludePath_Group>,
HelpText<"Do not add include paths for CUDA/HIP and"
@@ -5530,8 +5534,10 @@ def noprofilelib : Flag<["-"], "noprofilelib">;
def noseglinkedit : Flag<["-"], "noseglinkedit">;
def nostartfiles : Flag<["-"], "nostartfiles">, Group<Link_Group>;
def nostdinc : Flag<["-"], "nostdinc">,
- Visibility<[ClangOption, CLOption, DXCOption]>, Group<IncludePath_Group>;
-def nostdlibinc : Flag<["-"], "nostdlibinc">, Group<IncludePath_Group>;
+ Visibility<[ClangOption, CLOption, DXCOption]>, Group<IncludePath_Group>,
+ HelpText<"Disable both standard system #include directories and builtin #include directores">;
+def nostdlibinc : Flag<["-"], "nostdlibinc">, Group<IncludePath_Group>,
+ HelpText<"Disable standard system #include directories only">;
def nostdincxx : Flag<["-"], "nostdinc++">, Visibility<[ClangOption, CC1Option]>,
Group<IncludePath_Group>,
HelpText<"Disable standard #include directories for the C++ standard library">,
@@ -5953,6 +5959,7 @@ def _output : Separate<["--"], "output">, Alias<o>;
def _param : Separate<["--"], "param">, Group<CompileOnly_Group>;
def _param_EQ : Joined<["--"], "param=">, Alias<_param>;
def _precompile : Flag<["--"], "precompile">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
Group<Action_Group>, HelpText<"Only precompile the input">;
def _prefix_EQ : Joined<["--"], "prefix=">, Alias<B>;
def _prefix : Separate<["--"], "prefix">, Alias<B>;
diff --git a/clang/include/clang/Lex/Preprocessor.h b/clang/include/clang/Lex/Preprocessor.h
index 623f868..1307659 100644
--- a/clang/include/clang/Lex/Preprocessor.h
+++ b/clang/include/clang/Lex/Preprocessor.h
@@ -2139,8 +2139,8 @@ public:
}
/// Given a Token \p Tok that is a numeric constant with length 1,
- /// return the character.
- char
+ /// return the value of constant as an unsigned 8-bit integer.
+ uint8_t
getSpellingOfSingleCharacterNumericConstant(const Token &Tok,
bool *Invalid = nullptr) const {
assert((Tok.is(tok::numeric_constant) || Tok.is(tok::binary_data)) &&
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 2ec6367..b7bd6c2 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -15071,9 +15071,6 @@ public:
///
/// \param FD The FieldDecl to apply the attribute to
/// \param E The count expression on the attribute
- /// \param[out] Decls If the attribute is semantically valid \p Decls
- /// is populated with TypeCoupledDeclRefInfo objects, each
- /// describing Decls referred to in \p E.
/// \param CountInBytes If true the attribute is from the "sized_by" family of
/// attributes. If the false the attribute is from
/// "counted_by" family of attributes.
@@ -15086,10 +15083,8 @@ public:
/// `counted_by_or_null` attribute.
///
/// \returns false iff semantically valid.
- bool CheckCountedByAttrOnField(
- FieldDecl *FD, Expr *E,
- llvm::SmallVectorImpl<TypeCoupledDeclRefInfo> &Decls, bool CountInBytes,
- bool OrNull);
+ bool CheckCountedByAttrOnField(FieldDecl *FD, Expr *E, bool CountInBytes,
+ bool OrNull);
///@}
};
diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h
index e23c0cd..0ceb5fc 100644
--- a/clang/include/clang/Sema/SemaOpenMP.h
+++ b/clang/include/clang/Sema/SemaOpenMP.h
@@ -1259,7 +1259,8 @@ public:
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = std::nullopt);
/// Called on well-formed 'num_teams' clause.
- OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
+ OMPClause *ActOnOpenMPNumTeamsClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 0ef353b..0825ecb 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -45,14 +45,8 @@ class FunctionSummariesTy;
class ExprEngine;
//===----------------------------------------------------------------------===//
-/// CoreEngine - Implements the core logic of the graph-reachability
-/// analysis. It traverses the CFG and generates the ExplodedGraph.
-/// Program "states" are treated as opaque void pointers.
-/// The template class CoreEngine (which subclasses CoreEngine)
-/// provides the matching component to the engine that knows the actual types
-/// for states. Note that this engine only dispatches to transfer functions
-/// at the statement and block-level. The analyses themselves must implement
-/// any transfer function logic and the sub-expression level (if any).
+/// CoreEngine - Implements the core logic of the graph-reachability analysis.
+/// It traverses the CFG and generates the ExplodedGraph.
class CoreEngine {
friend class CommonNodeBuilder;
friend class EndOfFunctionNodeBuilder;
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index b4ff617..9a6a209 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -502,7 +502,8 @@ enum RVVRequire : uint32_t {
RVV_REQ_Zvksh = 1 << 15,
RVV_REQ_Zvfbfwma = 1 << 16,
RVV_REQ_Zvfbfmin = 1 << 17,
- RVV_REQ_Experimental = 1 << 18,
+ RVV_REQ_Zvfh = 1 << 18,
+ RVV_REQ_Experimental = 1 << 19,
LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Experimental)
};
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index c835b72..198bc34 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -3640,6 +3640,10 @@ public:
return {};
}
+ std::optional<bool> VisitUnaryTransformType(const UnaryTransformType *T) {
+ return CheckType(T->getBaseType());
+ }
+
std::optional<bool>
VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
// The "associated declaration" can be the same as ParentDC.
@@ -3717,13 +3721,10 @@ bool ASTNodeImporter::hasReturnTypeDeclaredInside(FunctionDecl *D) {
const auto *FromFPT = FromTy->getAs<FunctionProtoType>();
assert(FromFPT && "Must be called on FunctionProtoType");
- auto IsCXX11LambdaWithouTrailingReturn = [&]() {
+ auto IsCXX11Lambda = [&]() {
if (Importer.FromContext.getLangOpts().CPlusPlus14) // C++14 or later
return false;
- if (FromFPT->hasTrailingReturn())
- return false;
-
if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
return cast<CXXRecordDecl>(MD->getDeclContext())->isLambda();
@@ -3731,7 +3732,7 @@ bool ASTNodeImporter::hasReturnTypeDeclaredInside(FunctionDecl *D) {
};
QualType RetT = FromFPT->getReturnType();
- if (isa<AutoType>(RetT.getTypePtr()) || IsCXX11LambdaWithouTrailingReturn()) {
+ if (isa<AutoType>(RetT.getTypePtr()) || IsCXX11Lambda()) {
FunctionDecl *Def = D->getDefinition();
IsTypeDeclaredInsideVisitor Visitor(Def ? Def : D);
return Visitor.CheckType(RetT);
@@ -8633,13 +8634,15 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
return UnresolvedLookupExpr::Create(
Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
*ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo,
- ToDecls.begin(), ToDecls.end(), KnownDependent);
+ ToDecls.begin(), ToDecls.end(), KnownDependent,
+ /*KnownInstantiationDependent=*/E->isInstantiationDependent());
}
return UnresolvedLookupExpr::Create(
Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
ToNameInfo, E->requiresADL(), ToDecls.begin(), ToDecls.end(),
- /*KnownDependent=*/E->isTypeDependent());
+ /*KnownDependent=*/E->isTypeDependent(),
+ /*KnownInstantiationDependent=*/E->isInstantiationDependent());
}
ExpectedStmt
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index 6212989..83ce404 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -402,10 +402,11 @@ UnresolvedLookupExpr::UnresolvedLookupExpr(
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End, bool KnownDependent)
+ UnresolvedSetIterator End, bool KnownDependent,
+ bool KnownInstantiationDependent)
: OverloadExpr(UnresolvedLookupExprClass, Context, QualifierLoc,
TemplateKWLoc, NameInfo, TemplateArgs, Begin, End,
- KnownDependent, false, false),
+ KnownDependent, KnownInstantiationDependent, false),
NamingClass(NamingClass) {
UnresolvedLookupExprBits.RequiresADL = RequiresADL;
}
@@ -420,7 +421,7 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
const ASTContext &Context, CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
bool RequiresADL, UnresolvedSetIterator Begin, UnresolvedSetIterator End,
- bool KnownDependent) {
+ bool KnownDependent, bool KnownInstantiationDependent) {
unsigned NumResults = End - Begin;
unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
TemplateArgumentLoc>(NumResults, 0, 0);
@@ -428,7 +429,8 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
return new (Mem) UnresolvedLookupExpr(
Context, NamingClass, QualifierLoc,
/*TemplateKWLoc=*/SourceLocation(), NameInfo, RequiresADL,
- /*TemplateArgs=*/nullptr, Begin, End, KnownDependent);
+ /*TemplateArgs=*/nullptr, Begin, End, KnownDependent,
+ KnownInstantiationDependent);
}
UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
@@ -436,7 +438,8 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End, bool KnownDependent) {
+ UnresolvedSetIterator End, bool KnownDependent,
+ bool KnownInstantiationDependent) {
unsigned NumResults = End - Begin;
bool HasTemplateKWAndArgsInfo = Args || TemplateKWLoc.isValid();
unsigned NumTemplateArgs = Args ? Args->size() : 0;
@@ -444,9 +447,9 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
TemplateArgumentLoc>(
NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
- return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc,
- TemplateKWLoc, NameInfo, RequiresADL,
- Args, Begin, End, KnownDependent);
+ return new (Mem) UnresolvedLookupExpr(
+ Context, NamingClass, QualifierLoc, TemplateKWLoc, NameInfo, RequiresADL,
+ Args, Begin, End, KnownDependent, KnownInstantiationDependent);
}
UnresolvedLookupExpr *UnresolvedLookupExpr::CreateEmpty(
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index d4b9095..4d2d053 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -222,6 +222,11 @@ namespace {
ArraySize = 2;
MostDerivedLength = I + 1;
IsArray = true;
+ } else if (const auto *VT = Type->getAs<VectorType>()) {
+ Type = VT->getElementType();
+ ArraySize = VT->getNumElements();
+ MostDerivedLength = I + 1;
+ IsArray = true;
} else if (const FieldDecl *FD = getAsField(Path[I])) {
Type = FD->getType();
ArraySize = 0;
@@ -268,7 +273,6 @@ namespace {
/// If the current array is an unsized array, the value of this is
/// undefined.
uint64_t MostDerivedArraySize;
-
/// The type of the most derived object referred to by this address.
QualType MostDerivedType;
@@ -442,6 +446,16 @@ namespace {
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
+
+ void addVectorElementUnchecked(QualType EltTy, uint64_t Size,
+ uint64_t Idx) {
+ Entries.push_back(PathEntry::ArrayIndex(Idx));
+ MostDerivedType = EltTy;
+ MostDerivedPathLength = Entries.size();
+ MostDerivedArraySize = 0;
+ MostDerivedIsArrayElement = false;
+ }
+
void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
const APSInt &N);
@@ -1737,6 +1751,11 @@ namespace {
if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
Designator.addComplexUnchecked(EltTy, Imag);
}
+ void addVectorElement(EvalInfo &Info, const Expr *E, QualType EltTy,
+ uint64_t Size, uint64_t Idx) {
+ if (checkSubobject(Info, E, CSK_VectorElement))
+ Designator.addVectorElementUnchecked(EltTy, Size, Idx);
+ }
void clearIsNullPointer() {
IsNullPtr = false;
}
@@ -3310,6 +3329,19 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
return true;
}
+static bool HandleLValueVectorElement(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ uint64_t Size, uint64_t Idx) {
+ if (Idx) {
+ CharUnits SizeOfElement;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfElement))
+ return false;
+ LVal.Offset += SizeOfElement * Idx;
+ }
+ LVal.addVectorElement(Info, E, EltTy, Size, Idx);
+ return true;
+}
+
/// Try to evaluate the initializer for a variable declaration.
///
/// \param Info Information about the ongoing evaluation.
@@ -3855,6 +3887,27 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
return handler.found(Index ? O->getComplexFloatImag()
: O->getComplexFloatReal(), ObjType);
}
+ } else if (const auto *VT = ObjType->getAs<VectorType>()) {
+ uint64_t Index = Sub.Entries[I].getAsArrayIndex();
+ unsigned NumElements = VT->getNumElements();
+ if (Index == NumElements) {
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.FFDiag(E, diag::note_constexpr_access_past_end)
+ << handler.AccessKind;
+ else
+ Info.FFDiag(E);
+ return handler.failed();
+ }
+
+ if (Index > NumElements) {
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << Index << /*array*/ 0 << NumElements;
+ return handler.failed();
+ }
+
+ ObjType = VT->getElementType();
+ assert(I == N - 1 && "extracting subobject of scalar?");
+ return handler.found(O->getVectorElt(Index), ObjType);
} else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
if (Field->isMutable() &&
!Obj.mayAccessMutableMembers(Info, handler.AccessKind)) {
@@ -8509,6 +8562,7 @@ public:
bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E);
bool VisitUnaryDeref(const UnaryOperator *E);
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
@@ -8850,15 +8904,63 @@ bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
}
+bool LValueExprEvaluator::VisitExtVectorElementExpr(
+ const ExtVectorElementExpr *E) {
+ bool Success = true;
+
+ APValue Val;
+ if (!Evaluate(Val, Info, E->getBase())) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
+ }
+
+ SmallVector<uint32_t, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+ // FIXME: support accessing more than one element
+ if (Indices.size() > 1)
+ return false;
+
+ if (Success) {
+ Result.setFrom(Info.Ctx, Val);
+ const auto *VT = E->getBase()->getType()->castAs<VectorType>();
+ HandleLValueVectorElement(Info, E, Result, VT->getElementType(),
+ VT->getNumElements(), Indices[0]);
+ }
+
+ return Success;
+}
+
bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
- // FIXME: Deal with vectors as array subscript bases.
- if (E->getBase()->getType()->isVectorType() ||
- E->getBase()->getType()->isSveVLSBuiltinType())
+ if (E->getBase()->getType()->isSveVLSBuiltinType())
return Error(E);
APSInt Index;
bool Success = true;
+ if (const auto *VT = E->getBase()->getType()->getAs<VectorType>()) {
+ APValue Val;
+ if (!Evaluate(Val, Info, E->getBase())) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
+ }
+
+ if (!EvaluateInteger(E->getIdx(), Index, Info)) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
+ }
+
+ if (Success) {
+ Result.setFrom(Info.Ctx, Val);
+ HandleLValueVectorElement(Info, E, Result, VT->getElementType(),
+ VT->getNumElements(), Index.getExtValue());
+ }
+
+ return Success;
+ }
+
// C++17's rules require us to evaluate the LHS first, regardless of which
// side is the base.
for (const Expr *SubExpr : {E->getLHS(), E->getRHS()}) {
diff --git a/clang/lib/AST/Interp/Compiler.cpp b/clang/lib/AST/Interp/Compiler.cpp
index e528049..11fe2ac 100644
--- a/clang/lib/AST/Interp/Compiler.cpp
+++ b/clang/lib/AST/Interp/Compiler.cpp
@@ -335,6 +335,10 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
if (!PointeeType.isNull()) {
if (std::optional<PrimType> T = classify(PointeeType))
Desc = P.createDescriptor(SubExpr, *T);
+ else
+ Desc = P.createDescriptor(SubExpr, PointeeType.getTypePtr(),
+ std::nullopt, true, false,
+ /*IsMutable=*/false, nullptr);
}
return this->emitNull(classifyPrim(CE->getType()), Desc, CE);
}
@@ -476,19 +480,25 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
}
}
+ auto maybeNegate = [&]() -> bool {
+ if (CE->getCastKind() == CK_BooleanToSignedIntegral)
+ return this->emitNeg(*ToT, CE);
+ return true;
+ };
+
if (ToT == PT_IntAP)
- return this->emitCastAP(*FromT, Ctx.getBitWidth(CE->getType()), CE);
+ return this->emitCastAP(*FromT, Ctx.getBitWidth(CE->getType()), CE) &&
+ maybeNegate();
if (ToT == PT_IntAPS)
- return this->emitCastAPS(*FromT, Ctx.getBitWidth(CE->getType()), CE);
+ return this->emitCastAPS(*FromT, Ctx.getBitWidth(CE->getType()), CE) &&
+ maybeNegate();
if (FromT == ToT)
return true;
if (!this->emitCast(*FromT, *ToT, CE))
return false;
- if (CE->getCastKind() == CK_BooleanToSignedIntegral)
- return this->emitNeg(*ToT, CE);
- return true;
+ return maybeNegate();
}
case CK_PointerToBoolean:
@@ -3157,10 +3167,11 @@ bool Compiler<Emitter>::VisitExtVectorElementExpr(
template <class Emitter>
bool Compiler<Emitter>::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
if (!E->isExpressibleAsConstantInitializer())
- return this->emitInvalid(E);
+ return this->discard(SubExpr) && this->emitInvalid(E);
- return this->delegate(E->getSubExpr());
+ return this->delegate(SubExpr);
}
template <class Emitter>
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 0252dd0..0f72b86 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -628,7 +628,12 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1)
<< DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
- S.Note(DiagDecl->getLocation(), diag::note_declared_at);
+
+ if (DiagDecl->getDefinition())
+ S.Note(DiagDecl->getDefinition()->getLocation(),
+ diag::note_declared_at);
+ else
+ S.Note(DiagDecl->getLocation(), diag::note_declared_at);
}
} else {
S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index 04f88ef..2eed0d3 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -1504,6 +1504,12 @@ inline bool GetPtrField(InterpState &S, CodePtr OpPC, uint32_t Off) {
if (Ptr.isBlockPointer() && Off > Ptr.block()->getSize())
return false;
+
+ if (Ptr.isIntegralPointer()) {
+ S.Stk.push<Pointer>(Ptr.asIntPointer().atOffset(S.getCtx(), Off));
+ return true;
+ }
+
S.Stk.push<Pointer>(Ptr.atField(Off));
return true;
}
@@ -1527,6 +1533,11 @@ inline bool GetPtrFieldPop(InterpState &S, CodePtr OpPC, uint32_t Off) {
if (Ptr.isBlockPointer() && Off > Ptr.block()->getSize())
return false;
+ if (Ptr.isIntegralPointer()) {
+ S.Stk.push<Pointer>(Ptr.asIntPointer().atOffset(S.getCtx(), Off));
+ return true;
+ }
+
S.Stk.push<Pointer>(Ptr.atField(Off));
return true;
}
diff --git a/clang/lib/AST/Interp/Pointer.cpp b/clang/lib/AST/Interp/Pointer.cpp
index 2b1f8b4..ba9683a 100644
--- a/clang/lib/AST/Interp/Pointer.cpp
+++ b/clang/lib/AST/Interp/Pointer.cpp
@@ -597,3 +597,30 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
return std::nullopt;
return Result;
}
+
+IntPointer IntPointer::atOffset(const ASTContext &ASTCtx,
+ unsigned Offset) const {
+ if (!this->Desc)
+ return *this;
+ const Record *R = this->Desc->ElemRecord;
+ if (!R)
+ return *this;
+
+ const Record::Field *F = nullptr;
+ for (auto &It : R->fields()) {
+ if (It.Offset == Offset) {
+ F = &It;
+ break;
+ }
+ }
+ if (!F)
+ return *this;
+
+ const FieldDecl *FD = F->Decl;
+ const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(FD->getParent());
+ unsigned FieldIndex = FD->getFieldIndex();
+ uint64_t FieldOffset =
+ ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
+ .getQuantity();
+ return IntPointer{this->Desc, FieldOffset};
+}
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index 6f69834..b7b4f82 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -44,6 +44,8 @@ struct BlockPointer {
struct IntPointer {
const Descriptor *Desc;
uint64_t Value;
+
+ IntPointer atOffset(const ASTContext &ASTCtx, unsigned Offset) const;
};
enum class Storage { Block, Int, Fn };
@@ -88,6 +90,9 @@ public:
PointeeStorage.Int.Value = 0;
PointeeStorage.Int.Desc = nullptr;
}
+ Pointer(IntPointer &&IntPtr) : StorageKind(Storage::Int) {
+ PointeeStorage.Int = std::move(IntPtr);
+ }
Pointer(Block *B);
Pointer(Block *B, uint64_t BaseAndOffset);
Pointer(const Pointer &P);
@@ -161,9 +166,8 @@ public:
/// Creates a pointer to a field.
[[nodiscard]] Pointer atField(unsigned Off) const {
+ assert(isBlockPointer());
unsigned Field = Offset + Off;
- if (isIntegralPointer())
- return Pointer(asIntPointer().Value + Field, asIntPointer().Desc);
return Pointer(asBlockPointer().Pointee, Field, Field);
}
diff --git a/clang/lib/AST/Interp/State.h b/clang/lib/AST/Interp/State.h
index f1e8e36..44d6c03 100644
--- a/clang/lib/AST/Interp/State.h
+++ b/clang/lib/AST/Interp/State.h
@@ -44,7 +44,8 @@ enum CheckSubobjectKind {
CSK_ArrayToPointer,
CSK_ArrayIndex,
CSK_Real,
- CSK_Imag
+ CSK_Imag,
+ CSK_VectorElement
};
namespace interp {
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index ae4fc11..6bdc86f 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -1755,6 +1755,24 @@ OMPContainsClause *OMPContainsClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPContainsClause(K);
}
+OMPNumTeamsClause *OMPNumTeamsClause::Create(
+ const ASTContext &C, OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, Stmt *PreInit) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ OMPNumTeamsClause *Clause =
+ new (Mem) OMPNumTeamsClause(C, StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPreInitStmt(PreInit, CaptureRegion);
+ return Clause;
+}
+
+OMPNumTeamsClause *OMPNumTeamsClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPNumTeamsClause(N);
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
@@ -2055,9 +2073,11 @@ void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
}
void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
- OS << "num_teams(";
- Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
+ if (!Node->varlist_empty()) {
+ OS << "num_teams";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
}
void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 09562aa..bf46984 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -857,9 +857,8 @@ void OMPClauseProfiler::VisitOMPAllocateClause(const OMPAllocateClause *C) {
VisitOMPClauseList(C);
}
void OMPClauseProfiler::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
+ VisitOMPClauseList(C);
VistOMPClauseWithPreInit(C);
- if (C->getNumTeams())
- Profiler->VisitStmt(C->getNumTeams());
}
void OMPClauseProfiler::VisitOMPThreadLimitClause(
const OMPThreadLimitClause *C) {
diff --git a/clang/lib/Basic/Attributes.cpp b/clang/lib/Basic/Attributes.cpp
index a39eb85..867d241 100644
--- a/clang/lib/Basic/Attributes.cpp
+++ b/clang/lib/Basic/Attributes.cpp
@@ -153,40 +153,6 @@ std::string AttributeCommonInfo::getNormalizedFullName() const {
normalizeName(getAttrName(), getScopeName(), getSyntax()));
}
-static StringRef getSyntaxName(AttributeCommonInfo::Syntax SyntaxUsed) {
- switch (SyntaxUsed) {
- case AttributeCommonInfo::AS_GNU:
- return "GNU";
- case AttributeCommonInfo::AS_CXX11:
- return "CXX11";
- case AttributeCommonInfo::AS_C23:
- return "C23";
- case AttributeCommonInfo::AS_Declspec:
- return "Declspec";
- case AttributeCommonInfo::AS_Microsoft:
- return "Microsoft";
- case AttributeCommonInfo::AS_Keyword:
- return "Keyword";
- case AttributeCommonInfo::AS_Pragma:
- return "Pragma";
- case AttributeCommonInfo::AS_ContextSensitiveKeyword:
- return "ContextSensitiveKeyword";
- case AttributeCommonInfo::AS_HLSLAnnotation:
- return "HLSLAnnotation";
- case AttributeCommonInfo::AS_Implicit:
- return "Implicit";
- }
- llvm_unreachable("Invalid attribute syntax");
-}
-
-std::string AttributeCommonInfo::normalizeFullNameWithSyntax(
- const IdentifierInfo *Name, const IdentifierInfo *ScopeName,
- Syntax SyntaxUsed) {
- return (Twine(getSyntaxName(SyntaxUsed)) +
- "::" + normalizeName(Name, ScopeName, SyntaxUsed))
- .str();
-}
-
unsigned AttributeCommonInfo::calculateAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
diff --git a/clang/lib/Basic/CMakeLists.txt b/clang/lib/Basic/CMakeLists.txt
index f306805..e7ebc8f 100644
--- a/clang/lib/Basic/CMakeLists.txt
+++ b/clang/lib/Basic/CMakeLists.txt
@@ -102,7 +102,6 @@ add_clang_library(clangBasic
Targets/DirectX.cpp
Targets/Hexagon.cpp
Targets/Lanai.cpp
- Targets/Le64.cpp
Targets/LoongArch.cpp
Targets/M68k.cpp
Targets/MSP430.cpp
diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp
index 4f7ccaf..9cf081e 100644
--- a/clang/lib/Basic/IdentifierTable.cpp
+++ b/clang/lib/Basic/IdentifierTable.cpp
@@ -81,49 +81,49 @@ IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
// Constants for TokenKinds.def
namespace {
- enum TokenKey : unsigned {
- KEYC99 = 0x1,
- KEYCXX = 0x2,
- KEYCXX11 = 0x4,
- KEYGNU = 0x8,
- KEYMS = 0x10,
- BOOLSUPPORT = 0x20,
- KEYALTIVEC = 0x40,
- KEYNOCXX = 0x80,
- KEYBORLAND = 0x100,
- KEYOPENCLC = 0x200,
- KEYC23 = 0x400,
- KEYNOMS18 = 0x800,
- KEYNOOPENCL = 0x1000,
- WCHARSUPPORT = 0x2000,
- HALFSUPPORT = 0x4000,
- CHAR8SUPPORT = 0x8000,
- KEYOBJC = 0x10000,
- KEYZVECTOR = 0x20000,
- KEYCOROUTINES = 0x40000,
- KEYMODULES = 0x80000,
- KEYCXX20 = 0x100000,
- KEYOPENCLCXX = 0x200000,
- KEYMSCOMPAT = 0x400000,
- KEYSYCL = 0x800000,
- KEYCUDA = 0x1000000,
- KEYHLSL = 0x2000000,
- KEYFIXEDPOINT = 0x4000000,
- KEYMAX = KEYFIXEDPOINT, // The maximum key
- KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
- KEYALL = (KEYMAX | (KEYMAX-1)) & ~KEYNOMS18 &
- ~KEYNOOPENCL // KEYNOMS18 and KEYNOOPENCL are used to exclude.
- };
-
- /// How a keyword is treated in the selected standard. This enum is ordered
- /// intentionally so that the value that 'wins' is the most 'permissive'.
- enum KeywordStatus {
- KS_Unknown, // Not yet calculated. Used when figuring out the status.
- KS_Disabled, // Disabled
- KS_Future, // Is a keyword in future standard
- KS_Extension, // Is an extension
- KS_Enabled, // Enabled
- };
+enum TokenKey : unsigned {
+ KEYC99 = 0x1,
+ KEYCXX = 0x2,
+ KEYCXX11 = 0x4,
+ KEYGNU = 0x8,
+ KEYMS = 0x10,
+ BOOLSUPPORT = 0x20,
+ KEYALTIVEC = 0x40,
+ KEYNOCXX = 0x80,
+ KEYBORLAND = 0x100,
+ KEYOPENCLC = 0x200,
+ KEYC23 = 0x400,
+ KEYNOMS18 = 0x800,
+ KEYNOOPENCL = 0x1000,
+ WCHARSUPPORT = 0x2000,
+ HALFSUPPORT = 0x4000,
+ CHAR8SUPPORT = 0x8000,
+ KEYOBJC = 0x10000,
+ KEYZVECTOR = 0x20000,
+ KEYCOROUTINES = 0x40000,
+ KEYMODULES = 0x80000,
+ KEYCXX20 = 0x100000,
+ KEYOPENCLCXX = 0x200000,
+ KEYMSCOMPAT = 0x400000,
+ KEYSYCL = 0x800000,
+ KEYCUDA = 0x1000000,
+ KEYHLSL = 0x2000000,
+ KEYFIXEDPOINT = 0x4000000,
+ KEYMAX = KEYFIXEDPOINT, // The maximum key
+ KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
+ KEYALL = (KEYMAX | (KEYMAX - 1)) & ~KEYNOMS18 &
+ ~KEYNOOPENCL // KEYNOMS18 and KEYNOOPENCL are used to exclude.
+};
+
+/// How a keyword is treated in the selected standard. This enum is ordered
+/// intentionally so that the value that 'wins' is the most 'permissive'.
+enum KeywordStatus {
+ KS_Unknown, // Not yet calculated. Used when figuring out the status.
+ KS_Disabled, // Disabled
+ KS_Future, // Is a keyword in future standard
+ KS_Extension, // Is an extension
+ KS_Enabled, // Enabled
+};
} // namespace
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index 29133f9..0b8e565 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -23,7 +23,6 @@
#include "Targets/DirectX.h"
#include "Targets/Hexagon.h"
#include "Targets/Lanai.h"
-#include "Targets/Le64.h"
#include "Targets/LoongArch.h"
#include "Targets/M68k.h"
#include "Targets/MSP430.h"
@@ -344,17 +343,6 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
return std::make_unique<M68kTargetInfo>(Triple, Opts);
}
- case llvm::Triple::le32:
- switch (os) {
- case llvm::Triple::NaCl:
- return std::make_unique<NaClTargetInfo<PNaClTargetInfo>>(Triple, Opts);
- default:
- return nullptr;
- }
-
- case llvm::Triple::le64:
- return std::make_unique<Le64TargetInfo>(Triple, Opts);
-
case llvm::Triple::ppc:
switch (os) {
case llvm::Triple::Linux:
diff --git a/clang/lib/Basic/Targets/Le64.cpp b/clang/lib/Basic/Targets/Le64.cpp
deleted file mode 100644
index f7afa0e..0000000
--- a/clang/lib/Basic/Targets/Le64.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//===--- Le64.cpp - Implement Le64 target feature support -----------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements Le64 TargetInfo objects.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Le64.h"
-#include "Targets.h"
-#include "clang/Basic/Builtins.h"
-#include "clang/Basic/MacroBuilder.h"
-#include "clang/Basic/TargetBuiltins.h"
-
-using namespace clang;
-using namespace clang::targets;
-
-ArrayRef<Builtin::Info> Le64TargetInfo::getTargetBuiltins() const {
- return {};
-}
-
-void Le64TargetInfo::getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- DefineStd(Builder, "unix", Opts);
- defineCPUMacros(Builder, "le64", /*Tuning=*/false);
-}
diff --git a/clang/lib/Basic/Targets/Le64.h b/clang/lib/Basic/Targets/Le64.h
deleted file mode 100644
index 45f6a4e..0000000
--- a/clang/lib/Basic/Targets/Le64.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//===--- Le64.h - Declare Le64 target feature support -----------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares Le64 TargetInfo objects.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_LE64_H
-#define LLVM_CLANG_LIB_BASIC_TARGETS_LE64_H
-
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/TargetOptions.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/TargetParser/Triple.h"
-
-namespace clang {
-namespace targets {
-
-class LLVM_LIBRARY_VISIBILITY Le64TargetInfo : public TargetInfo {
-
-public:
- Le64TargetInfo(const llvm::Triple &Triple, const TargetOptions &)
- : TargetInfo(Triple) {
- NoAsmVariants = true;
- LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
- resetDataLayout("e-m:e-v128:32-v16:16-v32:32-v96:32-n8:16:32:64-S128");
- }
-
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override;
-
- ArrayRef<Builtin::Info> getTargetBuiltins() const override;
-
- BuiltinVaListKind getBuiltinVaListKind() const override {
- return TargetInfo::PNaClABIBuiltinVaList;
- }
-
- std::string_view getClobbers() const override { return ""; }
-
- ArrayRef<const char *> getGCCRegNames() const override {
- return std::nullopt;
- }
-
- ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return std::nullopt;
- }
-
- bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const override {
- return false;
- }
-
- bool hasProtectedVisibility() const override { return false; }
-};
-
-} // namespace targets
-} // namespace clang
-#endif // LLVM_CLANG_LIB_BASIC_TARGETS_LE64_H
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index 5f27c34..0a4f069 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -841,9 +841,6 @@ public:
"i64:64-i128:128-n8:16:32:64-S128");
} else if (Triple.getArch() == llvm::Triple::mipsel) {
// Handled on mips' setDataLayout.
- } else {
- assert(Triple.getArch() == llvm::Triple::le32);
- this->resetDataLayout("e-p:32:32-i64:64");
}
}
};
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index d8203f7..b5f9adf 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -105,6 +105,9 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
}
static void defineXLCompatMacros(MacroBuilder &Builder) {
+ Builder.defineMacro("__cdtbcd", "__builtin_ppc_cdtbcd");
+ Builder.defineMacro("__cbcdtd", "__builtin_ppc_cbcdtd");
+ Builder.defineMacro("__addg6s", "__builtin_ppc_addg6s");
Builder.defineMacro("__popcntb", "__builtin_ppc_popcntb");
Builder.defineMacro("__poppar4", "__builtin_ppc_poppar4");
Builder.defineMacro("__poppar8", "__builtin_ppc_poppar8");
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index de04b79..fc8fcae 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -6036,8 +6036,9 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
- const Expr *NumTeams =
- NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
+ const Expr *NumTeams = NestedDir->getSingleClause<OMPNumTeamsClause>()
+ ->getNumTeams()
+ .front();
if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant =
NumTeams->getIntegerConstantExpr(CGF.getContext()))
@@ -6062,7 +6063,7 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
case OMPD_target_teams_distribute_parallel_for_simd: {
if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
const Expr *NumTeams =
- D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
+ D.getSingleClause<OMPNumTeamsClause>()->getNumTeams().front();
if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
MinTeamsVal = MaxTeamsVal = Constant->getExtValue();
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 96012af..6841ceb 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -6859,7 +6859,7 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
if (NT || TL) {
- const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr;
+ const Expr *NumTeams = NT ? NT->getNumTeams().front() : nullptr;
const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 7f729d3..def8c4a 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -1013,7 +1013,7 @@ void CodeGenVTables::RemoveHwasanMetadata(llvm::GlobalValue *GV) const {
// the VTable does not need a relocation and move into rodata. A frequent
// time this can occur is for classes that should be made public from a DSO
// (like in libc++). For cases like these, we can make the vtable hidden or
-// private and create a public alias with the same visibility and linkage as
+// internal and create a public alias with the same visibility and linkage as
// the original vtable type.
void CodeGenVTables::GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
llvm::StringRef AliasNameRef) {
@@ -1050,15 +1050,18 @@ void CodeGenVTables::GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
VTableAlias->setVisibility(VTable->getVisibility());
VTableAlias->setUnnamedAddr(VTable->getUnnamedAddr());
- // Both of these imply dso_local for the vtable.
+ // Both of these will now imply dso_local for the vtable.
if (!VTable->hasComdat()) {
- // If this is in a comdat, then we shouldn't make the linkage private due to
- // an issue in lld where private symbols can be used as the key symbol when
- // choosing the prevelant group. This leads to "relocation refers to a
- // symbol in a discarded section".
- VTable->setLinkage(llvm::GlobalValue::PrivateLinkage);
+ VTable->setLinkage(llvm::GlobalValue::InternalLinkage);
} else {
- // We should at least make this hidden since we don't want to expose it.
+ // If a relocation targets an internal linkage symbol, MC will generate the
+ // relocation against the symbol's section instead of the symbol itself
+ // (see ELFObjectWriter::shouldRelocateWithSymbol). If an internal symbol is
+ // in a COMDAT section group, that section might be discarded, and then the
+ // relocation to that section will generate a linker error. We therefore
+ // make COMDAT vtables hidden instead of internal: they'll still not be
+ // public, but relocations will reference the symbol instead of the section
+ // and COMDAT deduplication will thus work as expected.
VTable->setVisibility(llvm::GlobalValue::HiddenVisibility);
}
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 31f6632..9aaf90c 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -116,8 +116,6 @@ createTargetCodeGenInfo(CodeGenModule &CGM) {
default:
return createDefaultTargetCodeGenInfo(CGM);
- case llvm::Triple::le32:
- return createPNaClTargetCodeGenInfo(CGM);
case llvm::Triple::m68k:
return createM68kTargetCodeGenInfo(CGM);
case llvm::Triple::mips:
@@ -1218,8 +1216,12 @@ void CodeGenModule::Release() {
(LangOpts.PointerAuthVTPtrTypeDiscrimination
<< AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) |
(LangOpts.PointerAuthInitFini
- << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI);
- static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI ==
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI) |
+ (LangOpts.PointerAuthInitFiniAddressDiscrimination
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC) |
+ (LangOpts.PointerAuthELFGOT
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT);
+ static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT ==
AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST,
"Update when new enum items are defined");
if (PAuthABIVersion != 0) {
@@ -2082,37 +2084,53 @@ void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
if (Fns.empty()) return;
- // Ctor function type is void()*.
- llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
- TheModule.getDataLayout().getProgramAddressSpace());
+ const PointerAuthSchema &InitFiniAuthSchema =
+ getCodeGenOpts().PointerAuth.InitFiniPointers;
- // Get the type of a ctor entry, { i32, void ()*, i8* }.
- llvm::StructType *CtorStructTy = llvm::StructType::get(
- Int32Ty, CtorPFTy, VoidPtrTy);
+ // Ctor function type is ptr.
+ llvm::PointerType *PtrTy = llvm::PointerType::get(
+ getLLVMContext(), TheModule.getDataLayout().getProgramAddressSpace());
+
+ // Get the type of a ctor entry, { i32, ptr, ptr }.
+ llvm::StructType *CtorStructTy = llvm::StructType::get(Int32Ty, PtrTy, PtrTy);
// Construct the constructor and destructor arrays.
- ConstantInitBuilder builder(*this);
- auto ctors = builder.beginArray(CtorStructTy);
+ ConstantInitBuilder Builder(*this);
+ auto Ctors = Builder.beginArray(CtorStructTy);
for (const auto &I : Fns) {
- auto ctor = ctors.beginStruct(CtorStructTy);
- ctor.addInt(Int32Ty, I.Priority);
- ctor.add(I.Initializer);
+ auto Ctor = Ctors.beginStruct(CtorStructTy);
+ Ctor.addInt(Int32Ty, I.Priority);
+ if (InitFiniAuthSchema) {
+ llvm::Constant *StorageAddress =
+ (InitFiniAuthSchema.isAddressDiscriminated()
+ ? llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(
+ IntPtrTy,
+ llvm::ConstantPtrAuth::AddrDiscriminator_CtorsDtors),
+ PtrTy)
+ : nullptr);
+ llvm::Constant *SignedCtorPtr = getConstantSignedPointer(
+ I.Initializer, InitFiniAuthSchema.getKey(), StorageAddress,
+ llvm::ConstantInt::get(
+ SizeTy, InitFiniAuthSchema.getConstantDiscrimination()));
+ Ctor.add(SignedCtorPtr);
+ } else {
+ Ctor.add(I.Initializer);
+ }
if (I.AssociatedData)
- ctor.add(I.AssociatedData);
+ Ctor.add(I.AssociatedData);
else
- ctor.addNullPointer(VoidPtrTy);
- ctor.finishAndAddTo(ctors);
+ Ctor.addNullPointer(PtrTy);
+ Ctor.finishAndAddTo(Ctors);
}
- auto list =
- ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
- /*constant*/ false,
- llvm::GlobalValue::AppendingLinkage);
+ auto List = Ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::AppendingLinkage);
// The LTO linker doesn't seem to like it when we set an alignment
// on appending variables. Take it off as a workaround.
- list->setAlignment(std::nullopt);
+ List->setAlignment(std::nullopt);
Fns.clear();
}
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index e5d0cc4..9b52277 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -582,13 +582,6 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
return new XLCXXABI(CGM);
case TargetCXXABI::GenericItanium:
- if (CGM.getContext().getTargetInfo().getTriple().getArch()
- == llvm::Triple::le32) {
- // For PNaCl, use ARM-style method pointers so that PNaCl code
- // does not assume anything about the alignment of function
- // pointers.
- return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
- }
return new ItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 20a555a..2d50c2c 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -1381,7 +1381,8 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
SanitizerKind::Nullability | SanitizerKind::LocalBounds;
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64 ||
- getTriple().getArch() == llvm::Triple::arm || getTriple().isWasm() ||
+ getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::thumb || getTriple().isWasm() ||
getTriple().isAArch64() || getTriple().isRISCV() ||
getTriple().isLoongArch64())
Res |= SanitizerKind::CFIICall;
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 55bedc4..2054c8f 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1847,14 +1847,17 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
Args.addOptInFlag(
CmdArgs, options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination);
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_init_fini,
- options::OPT_fno_ptrauth_init_fini);
Args.addOptInFlag(
CmdArgs, options::OPT_fptrauth_function_pointer_type_discrimination,
options::OPT_fno_ptrauth_function_pointer_type_discrimination);
Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_indirect_gotos,
options::OPT_fno_ptrauth_indirect_gotos);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_init_fini,
+ options::OPT_fno_ptrauth_init_fini);
+ Args.addOptInFlag(CmdArgs,
+ options::OPT_fptrauth_init_fini_address_discrimination,
+ options::OPT_fno_ptrauth_init_fini_address_discrimination);
}
void Clang::AddLoongArchTargetArgs(const ArgList &Args,
@@ -3925,12 +3928,6 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
if (UseBuiltins)
A->render(Args, CmdArgs);
}
-
- // le32-specific flags:
- // -fno-math-builtin: clang should not convert math builtins to intrinsics
- // by default.
- if (TC.getArch() == llvm::Triple::le32)
- CmdArgs.push_back("-fno-math-builtin");
}
bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index 17b6074..ee6890d 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -2923,22 +2923,45 @@ bool Darwin::isAlignedAllocationUnavailable() const {
return TargetVersion < alignedAllocMinVersion(OS);
}
-static bool sdkSupportsBuiltinModules(const Darwin::DarwinPlatformKind &TargetPlatform, const std::optional<DarwinSDKInfo> &SDKInfo) {
+static bool sdkSupportsBuiltinModules(
+ const Darwin::DarwinPlatformKind &TargetPlatform,
+ const Darwin::DarwinEnvironmentKind &TargetEnvironment,
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
+ if (TargetEnvironment == Darwin::NativeEnvironment ||
+ TargetEnvironment == Darwin::Simulator ||
+ TargetEnvironment == Darwin::MacCatalyst) {
+ // Standard xnu/Mach/Darwin based environments
+ // depend on the SDK version.
+ } else {
+ // All other environments support builtin modules from the start.
+ return true;
+ }
+
if (!SDKInfo)
+ // If there is no SDK info, assume this is building against a
+ // pre-SDK version of macOS (i.e. before Mac OS X 10.4). Those
+ // don't support modules anyway, but the headers definitely
+ // don't support builtin modules either. It might also be some
+ // kind of degenerate build environment, err on the side of
+ // the old behavior which is to not use builtin modules.
return false;
VersionTuple SDKVersion = SDKInfo->getVersion();
switch (TargetPlatform) {
+ // Existing SDKs added support for builtin modules in the fall
+ // 2024 major releases.
case Darwin::MacOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(15U);
case Darwin::IPhoneOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(18U);
case Darwin::TvOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(18U);
case Darwin::WatchOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(11U);
case Darwin::XROS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(2U);
+
+ // New SDKs support builtin modules from the start.
default:
return true;
}
@@ -3030,7 +3053,7 @@ void Darwin::addClangTargetOptions(
// i.e. when the builtin stdint.h is in the Darwin module too, the cycle
// goes away. Note that -fbuiltin-headers-in-system-modules does nothing
// to fix the same problem with C++ headers, and is generally fragile.
- if (!sdkSupportsBuiltinModules(TargetPlatform, SDKInfo))
+ if (!sdkSupportsBuiltinModules(TargetPlatform, TargetEnvironment, SDKInfo))
CC1Args.push_back("-fbuiltin-headers-in-system-modules");
if (!DriverArgs.hasArgNoClaim(options::OPT_fdefine_target_os_macros,
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index f6b6c44..225bd641 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -1503,6 +1503,12 @@ void CompilerInvocation::setDefaultPointerAuthOptions(
PointerAuthSchema(Key::ASIA, true, Discrimination::Decl);
Opts.CXXMemberFunctionPointers =
PointerAuthSchema(Key::ASIA, false, Discrimination::Type);
+
+ if (LangOpts.PointerAuthInitFini) {
+ Opts.InitFiniPointers = PointerAuthSchema(
+ Key::ASIA, LangOpts.PointerAuthInitFiniAddressDiscrimination,
+ Discrimination::Constant, InitFiniPointerConstantDiscriminator);
+ }
}
Opts.IndirectGotos = LangOpts.PointerAuthIndirectGotos;
}
@@ -3425,11 +3431,12 @@ static void GeneratePointerAuthArgs(const LangOptions &Opts,
GenerateArg(Consumer, OPT_fptrauth_vtable_pointer_type_discrimination);
if (Opts.PointerAuthTypeInfoVTPtrDiscrimination)
GenerateArg(Consumer, OPT_fptrauth_type_info_vtable_pointer_discrimination);
-
- if (Opts.PointerAuthInitFini)
- GenerateArg(Consumer, OPT_fptrauth_init_fini);
if (Opts.PointerAuthFunctionTypeDiscrimination)
GenerateArg(Consumer, OPT_fptrauth_function_pointer_type_discrimination);
+ if (Opts.PointerAuthInitFini)
+ GenerateArg(Consumer, OPT_fptrauth_init_fini);
+ if (Opts.PointerAuthInitFiniAddressDiscrimination)
+ GenerateArg(Consumer, OPT_fptrauth_init_fini_address_discrimination);
}
static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
@@ -3445,10 +3452,11 @@ static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
Args.hasArg(OPT_fptrauth_vtable_pointer_type_discrimination);
Opts.PointerAuthTypeInfoVTPtrDiscrimination =
Args.hasArg(OPT_fptrauth_type_info_vtable_pointer_discrimination);
-
- Opts.PointerAuthInitFini = Args.hasArg(OPT_fptrauth_init_fini);
Opts.PointerAuthFunctionTypeDiscrimination =
Args.hasArg(OPT_fptrauth_function_pointer_type_discrimination);
+ Opts.PointerAuthInitFini = Args.hasArg(OPT_fptrauth_init_fini);
+ Opts.PointerAuthInitFiniAddressDiscrimination =
+ Args.hasArg(OPT_fptrauth_init_fini_address_discrimination);
}
/// Check if input file kind and language standard are compatible.
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index f3d19e3..b61aeca 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -149,8 +149,10 @@ set(x86_files
amxintrin.h
avx10_2_512minmaxintrin.h
avx10_2_512niintrin.h
+ avx10_2_512satcvtintrin.h
avx10_2minmaxintrin.h
avx10_2niintrin.h
+ avx10_2satcvtintrin.h
avx2intrin.h
avx512bf16intrin.h
avx512bitalgintrin.h
diff --git a/clang/lib/Headers/avx10_2_512satcvtintrin.h b/clang/lib/Headers/avx10_2_512satcvtintrin.h
new file mode 100644
index 0000000..0dadadb
--- /dev/null
+++ b/clang/lib/Headers/avx10_2_512satcvtintrin.h
@@ -0,0 +1,301 @@
+/*===------ avx10_2_512satcvtintrin.h - AVX10_2_512SATCVT intrinsics -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avx10_2_512satcvtintrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AVX10_2_512SATCVTINTRIN_H
+#define __AVX10_2_512SATCVTINTRIN_H
+
+#define _mm512_ipcvtnebf16_epi8(A) \
+ ((__m512i)__builtin_ia32_vcvtnebf162ibs512((__v32bf)(__m512bh)(A)))
+
+#define _mm512_mask_ipcvtnebf16_epi8(W, U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvtnebf16_epi8(A), \
+ (__v32hi)(__m512i)(W)))
+
+#define _mm512_maskz_ipcvtnebf16_epi8(U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvtnebf16_epi8(A), \
+ (__v32hi)_mm512_setzero_si512()))
+
+#define _mm512_ipcvtnebf16_epu8(A) \
+ ((__m512i)__builtin_ia32_vcvtnebf162iubs512((__v32bf)(__m512bh)(A)))
+
+#define _mm512_mask_ipcvtnebf16_epu8(W, U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvtnebf16_epu8(A), \
+ (__v32hi)(__m512i)(W)))
+
+#define _mm512_maskz_ipcvtnebf16_epu8(U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvtnebf16_epu8(A), \
+ (__v32hi)_mm512_setzero_si512()))
+
+#define _mm512_ipcvttnebf16_epi8(A) \
+ ((__m512i)__builtin_ia32_vcvttnebf162ibs512((__v32bf)(__m512bh)(A)))
+
+#define _mm512_mask_ipcvttnebf16_epi8(W, U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvttnebf16_epi8(A), \
+ (__v32hi)(__m512i)(W)))
+
+#define _mm512_maskz_ipcvttnebf16_epi8(U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvttnebf16_epi8(A), \
+ (__v32hi)_mm512_setzero_si512()))
+
+#define _mm512_ipcvttnebf16_epu8(A) \
+ ((__m512i)__builtin_ia32_vcvttnebf162iubs512((__v32bf)(__m512bh)(A)))
+
+#define _mm512_mask_ipcvttnebf16_epu8(W, U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvttnebf16_epu8(A), \
+ (__v32hi)(__m512i)(W)))
+
+#define _mm512_maskz_ipcvttnebf16_epu8(U, A) \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_ipcvttnebf16_epu8(A), \
+ (__v32hi)_mm512_setzero_si512()))
+
+#define _mm512_ipcvtph_epi8(A) \
+ ((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvtph_epi8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
+ (__v32hu)(W), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvtph_epi8(U, A) \
+ ((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvt_roundph_epi8(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
+ (__v32hu)_mm512_setzero_si512(), \
+ (__mmask32)-1, (const int)R))
+
+#define _mm512_mask_ipcvt_roundph_epi8(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), (const int)R))
+
+#define _mm512_maskz_ipcvt_roundph_epi8(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
+ (__v32hu)_mm512_setzero_si512(), \
+ (__mmask32)(U), (const int)R))
+
+#define _mm512_ipcvtph_epu8(A) \
+ ((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvtph_epu8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvtph2iubs512_mask((__v32hf)(__m512h)(A), \
+ (__v32hu)(W), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvtph_epu8(U, A) \
+ ((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvt_roundph_epu8(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ (const int)R))
+
+#define _mm512_mask_ipcvt_roundph_epu8(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), (const int)R))
+
+#define _mm512_maskz_ipcvt_roundph_epu8(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ (const int)R))
+
+#define _mm512_ipcvtps_epi8(A) \
+ ((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvtps_epi8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
+ (__v16su)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvtps_epi8(U, A) \
+ ((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvt_roundps_epi8(A, R) \
+ ((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
+ (__v16su)_mm512_setzero_si512(), \
+ (__mmask16)-1, (const int)R))
+
+#define _mm512_mask_ipcvt_roundps_epi8(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)(W), (__mmask16)(U), (const int)R))
+
+#define _mm512_maskz_ipcvt_roundps_epi8(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
+ (__v16su)_mm512_setzero_si512(), \
+ (__mmask16)(U), (const int)R))
+
+#define _mm512_ipcvtps_epu8(A) \
+ ((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvtps_epu8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvtps2iubs512_mask((__v16sf)(__m512)(A), \
+ (__v16su)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvtps_epu8(U, A) \
+ ((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvt_roundps_epu8(A, R) \
+ ((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ (const int)R))
+
+#define _mm512_mask_ipcvt_roundps_epu8(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)(W), (__mmask16)(U), (const int)R))
+
+#define _mm512_maskz_ipcvt_roundps_epu8(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
+ (__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ (const int)R))
+
+#define _mm512_ipcvttph_epi8(A) \
+ ((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvttph_epi8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvttph2ibs512_mask((__v32hf)(__m512h)(A), \
+ (__v32hu)(W), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvttph_epi8(U, A) \
+ ((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvtt_roundph_epi8(A, S) \
+ ((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ S))
+
+#define _mm512_mask_ipcvtt_roundph_epi8(W, U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), S))
+
+#define _mm512_maskz_ipcvtt_roundph_epi8(U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ S))
+
+#define _mm512_ipcvttph_epu8(A) \
+ ((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvttph_epu8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvttph2iubs512_mask((__v32hf)(__m512h)(A), \
+ (__v32hu)(W), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvttph_epu8(U, A) \
+ ((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvtt_roundph_epu8(A, S) \
+ ((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
+ S))
+
+#define _mm512_mask_ipcvtt_roundph_epu8(W, U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), S))
+
+#define _mm512_maskz_ipcvtt_roundph_epu8(U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
+ S))
+
+#define _mm512_ipcvttps_epi8(A) \
+ ((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvttps_epi8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvttps2ibs512_mask((__v16sf)(__m512h)(A), \
+ (__v16su)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvttps_epi8(U, A) \
+ ((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvtt_roundps_epi8(A, S) \
+ ((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ S))
+
+#define _mm512_mask_ipcvtt_roundps_epi8(W, U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)(W), (__mmask16)(U), S))
+
+#define _mm512_maskz_ipcvtt_roundps_epi8(U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ S))
+
+#define _mm512_ipcvttps_epu8(A) \
+ ((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_ipcvttps_epu8(W, U, A) \
+ ((__m512i)__builtin_ia32_vcvttps2iubs512_mask((__v16sf)(__m512h)(A), \
+ (__v16su)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_ipcvttps_epu8(U, A) \
+ ((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_ipcvtt_roundps_epu8(A, S) \
+ ((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
+ S))
+
+#define _mm512_mask_ipcvtt_roundps_epu8(W, U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)(W), (__mmask16)(U), S))
+
+#define _mm512_maskz_ipcvtt_roundps_epu8(U, A, S) \
+ ((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
+ (__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
+ S))
+
+#endif // __AVX10_2_512SATCVTINTRIN_H
diff --git a/clang/lib/Headers/avx10_2satcvtintrin.h b/clang/lib/Headers/avx10_2satcvtintrin.h
new file mode 100644
index 0000000..dd5c44f
--- /dev/null
+++ b/clang/lib/Headers/avx10_2satcvtintrin.h
@@ -0,0 +1,444 @@
+/*===----------- avx10_2satcvtintrin.h - AVX10_2SATCVT intrinsics ----------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avx10_2satcvtintrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AVX10_2SATCVTINTRIN_H
+#define __AVX10_2SATCVTINTRIN_H
+
+#define _mm_ipcvtnebf16_epi8(A) \
+ ((__m128i)__builtin_ia32_vcvtnebf162ibs128((__v8bf)(__m128bh)(A)))
+
+#define _mm_mask_ipcvtnebf16_epi8(W, U, A) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_ipcvtnebf16_epi8(A), (__v8hi)(__m128i)(W)))
+
+#define _mm_maskz_ipcvtnebf16_epi8(U, A) \
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_ipcvtnebf16_epi8(A), \
+ (__v8hi)_mm_setzero_si128()))
+
+#define _mm256_ipcvtnebf16_epi8(A) \
+ ((__m256i)__builtin_ia32_vcvtnebf162ibs256((__v16bf)(__m256bh)(A)))
+
+#define _mm256_mask_ipcvtnebf16_epi8(W, U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvtnebf16_epi8(A), \
+ (__v16hi)(__m256i)(W)))
+
+#define _mm256_maskz_ipcvtnebf16_epi8(U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvtnebf16_epi8(A), \
+ (__v16hi)_mm256_setzero_si256()))
+
+#define _mm_ipcvtnebf16_epu8(A) \
+ ((__m128i)__builtin_ia32_vcvtnebf162iubs128((__v8bf)(__m128bh)(A)))
+
+#define _mm_mask_ipcvtnebf16_epu8(W, U, A) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_ipcvtnebf16_epu8(A), (__v8hi)(__m128i)(W)))
+
+#define _mm_maskz_ipcvtnebf16_epu8(U, A) \
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_ipcvtnebf16_epu8(A), \
+ (__v8hi)_mm_setzero_si128()))
+
+#define _mm256_ipcvtnebf16_epu8(A) \
+ ((__m256i)__builtin_ia32_vcvtnebf162iubs256((__v16bf)(__m256bh)(A)))
+
+#define _mm256_mask_ipcvtnebf16_epu8(W, U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvtnebf16_epu8(A), \
+ (__v16hi)(__m256i)(W)))
+
+#define _mm256_maskz_ipcvtnebf16_epu8(U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvtnebf16_epu8(A), \
+ (__v16hi)_mm256_setzero_si256()))
+
+#define _mm_ipcvtph_epi8(A) \
+ ((__m128i)__builtin_ia32_vcvtph2ibs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvtph_epi8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvtph2ibs128_mask((__v8hf)(__m128h)(A), \
+ (__v8hu)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvtph_epi8(U, A) \
+ ((__m128i)__builtin_ia32_vcvtph2ibs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvtph_epi8(A) \
+ ((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvtph_epi8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
+ (__v16hu)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvtph_epi8(U, A) \
+ ((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
+ (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvt_roundph_epi8(A, R) \
+ ((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
+ (__v16hu)_mm256_setzero_si256(), \
+ (__mmask16)-1, (const int)R))
+
+#define _mm256_mask_ipcvt_roundph_epi8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
+
+#define _mm256_maskz_ipcvt_roundph_epi8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
+ (__v16hu)_mm256_setzero_si256(), \
+ (__mmask16)(U), (const int)R))
+
+#define _mm_ipcvtph_epu8(A) \
+ ((__m128i)__builtin_ia32_vcvtph2iubs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvtph_epu8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvtph2iubs128_mask((__v8hf)(__m128h)(A), \
+ (__v8hu)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvtph_epu8(U, A) \
+ ((__m128i)__builtin_ia32_vcvtph2iubs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvtph_epu8(A) \
+ ((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvtph_epu8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvtph2iubs256_mask((__v16hf)(__m256h)(A), \
+ (__v16hu)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvtph_epu8(U, A) \
+ ((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
+ (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvt_roundph_epu8(A, R) \
+ ((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ (const int)R))
+
+#define _mm256_mask_ipcvt_roundph_epu8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
+
+#define _mm256_maskz_ipcvt_roundph_epu8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
+ (const int)R))
+
+#define _mm_ipcvtps_epi8(A) \
+ ((__m128i)__builtin_ia32_vcvtps2ibs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvtps_epi8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvtps2ibs128_mask((__v4sf)(__m128)(A), \
+ (__v4su)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvtps_epi8(U, A) \
+ ((__m128i)__builtin_ia32_vcvtps2ibs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvtps_epi8(A) \
+ ((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvtps_epi8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)(W), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvtps_epi8(U, A) \
+ ((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvt_roundps_epi8(A, R) \
+ ((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)_mm256_setzero_si256(), \
+ (__mmask8)-1, (const int)R))
+
+#define _mm256_mask_ipcvt_roundps_epi8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
+
+#define _mm256_maskz_ipcvt_roundps_epi8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)_mm256_setzero_si256(), \
+ (__mmask8)(U), (const int)R))
+
+#define _mm_ipcvtps_epu8(A) \
+ ((__m128i)__builtin_ia32_vcvtps2iubs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvtps_epu8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvtps2iubs128_mask((__v4sf)(__m128)(A), \
+ (__v4su)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvtps_epu8(U, A) \
+ ((__m128i)__builtin_ia32_vcvtps2iubs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvtps_epu8(A) \
+ ((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvtps_epu8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)(W), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvtps_epu8(U, A) \
+ ((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvt_roundps_epu8(A, R) \
+ ((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)_mm256_setzero_si256(), \
+ (__mmask8)-1, (const int)R))
+
+#define _mm256_mask_ipcvt_roundps_epu8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
+
+#define _mm256_maskz_ipcvt_roundps_epu8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)_mm256_setzero_si256(), \
+ (__mmask8)(U), (const int)R))
+
+#define _mm_ipcvttnebf16_epi8(A) \
+ ((__m128i)__builtin_ia32_vcvttnebf162ibs128((__v8bf)(__m128bh)(A)))
+
+#define _mm_mask_ipcvttnebf16_epi8(W, U, A) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_ipcvttnebf16_epi8(A), (__v8hi)(__m128i)(W)))
+
+#define _mm_maskz_ipcvttnebf16_epi8(U, A) \
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_ipcvttnebf16_epi8(A), \
+ (__v8hi)_mm_setzero_si128()))
+
+#define _mm256_ipcvttnebf16_epi8(A) \
+ ((__m256i)__builtin_ia32_vcvttnebf162ibs256((__v16bf)(__m256bh)(A)))
+
+#define _mm256_mask_ipcvttnebf16_epi8(W, U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvttnebf16_epi8(A), \
+ (__v16hi)(__m256i)(W)))
+
+#define _mm256_maskz_ipcvttnebf16_epi8(U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvttnebf16_epi8(A), \
+ (__v16hi)_mm256_setzero_si256()))
+
+#define _mm_ipcvttnebf16_epu8(A) \
+ ((__m128i)__builtin_ia32_vcvttnebf162iubs128((__v8bf)(__m128bh)(A)))
+
+#define _mm_mask_ipcvttnebf16_epu8(W, U, A) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_ipcvttnebf16_epu8(A), (__v8hi)(__m128i)(W)))
+
+#define _mm_maskz_ipcvttnebf16_epu8(U, A) \
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_ipcvttnebf16_epu8(A), \
+ (__v8hi)_mm_setzero_si128()))
+
+#define _mm256_ipcvttnebf16_epu8(A) \
+ ((__m256i)__builtin_ia32_vcvttnebf162iubs256((__v16bf)(__m256bh)(A)))
+
+#define _mm256_mask_ipcvttnebf16_epu8(W, U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvttnebf16_epu8(A), \
+ (__v16hi)(__m256i)(W)))
+
+#define _mm256_maskz_ipcvttnebf16_epu8(U, A) \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_ipcvttnebf16_epu8(A), \
+ (__v16hi)_mm256_setzero_si256()))
+
+#define _mm_ipcvttph_epi8(A) \
+ ((__m128i)__builtin_ia32_vcvttph2ibs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvttph_epi8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvttph2ibs128_mask((__v8hf)(__m128h)(A), \
+ (__v8hu)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvttph_epi8(U, A) \
+ ((__m128i)__builtin_ia32_vcvttph2ibs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvttph_epi8(A) \
+ ((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvttph_epi8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvttph2ibs256_mask((__v16hf)(__m256h)(A), \
+ (__v16hu)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvttph_epi8(U, A) \
+ ((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
+ (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvtt_roundph_epi8(A, R) \
+ ((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ (const int)R))
+
+#define _mm256_mask_ipcvtt_roundph_epi8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
+
+#define _mm256_maskz_ipcvtt_roundph_epi8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
+ (const int)R))
+
+#define _mm_ipcvttph_epu8(A) \
+ ((__m128i)__builtin_ia32_vcvttph2iubs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvttph_epu8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvttph2iubs128_mask((__v8hf)(__m128h)(A), \
+ (__v8hu)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvttph_epu8(U, A) \
+ ((__m128i)__builtin_ia32_vcvttph2iubs128_mask( \
+ (__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvttph_epu8(A) \
+ ((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvttph_epu8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvttph2iubs256_mask((__v16hf)(__m256h)(A), \
+ (__v16hu)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvttph_epu8(U, A) \
+ ((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
+ (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvtt_roundph_epu8(A, R) \
+ ((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
+ (const int)R))
+
+#define _mm256_mask_ipcvtt_roundph_epu8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
+
+#define _mm256_maskz_ipcvtt_roundph_epu8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
+ (__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
+ (const int)R))
+
+#define _mm_ipcvttps_epi8(A) \
+ ((__m128i)__builtin_ia32_vcvttps2ibs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvttps_epi8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvttps2ibs128_mask((__v4sf)(__m128)(A), \
+ (__v4su)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvttps_epi8(U, A) \
+ ((__m128i)__builtin_ia32_vcvttps2ibs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvttps_epi8(A) \
+ ((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvttps_epi8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)(W), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvttps_epi8(U, A) \
+ ((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvtt_roundps_epi8(A, R) \
+ ((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)_mm256_setzero_si256(), \
+ (__mmask8)-1, (const int)R))
+
+#define _mm256_mask_ipcvtt_roundps_epi8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
+
+#define _mm256_maskz_ipcvtt_roundps_epi8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)_mm256_setzero_si256(), \
+ (__mmask8)(U), (const int)R))
+
+#define _mm_ipcvttps_epu8(A) \
+ ((__m128i)__builtin_ia32_vcvttps2iubs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_ipcvttps_epu8(W, U, A) \
+ ((__m128i)__builtin_ia32_vcvttps2iubs128_mask((__v4sf)(__m128)(A), \
+ (__v4su)(W), (__mmask8)(U)))
+
+#define _mm_maskz_ipcvttps_epu8(U, A) \
+ ((__m128i)__builtin_ia32_vcvttps2iubs128_mask( \
+ (__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
+
+#define _mm256_ipcvttps_epu8(A) \
+ ((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_mask_ipcvttps_epu8(W, U, A) \
+ ((__m256i)__builtin_ia32_vcvttps2iubs256_mask((__v8sf)(__m256)(A), \
+ (__v8su)(W), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_maskz_ipcvttps_epu8(U, A) \
+ ((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm256_ipcvtt_roundps_epu8(A, R) \
+ ((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
+ (const int)R))
+
+#define _mm256_mask_ipcvtt_roundps_epu8(W, U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
+
+#define _mm256_maskz_ipcvtt_roundps_epu8(U, A, R) \
+ ((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
+ (__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)(U), \
+ (const int)R))
+#endif // __AVX10_2SATCVTINTRIN_H
diff --git a/clang/lib/Headers/immintrin.h b/clang/lib/Headers/immintrin.h
index 6d46bde..f570c57 100644
--- a/clang/lib/Headers/immintrin.h
+++ b/clang/lib/Headers/immintrin.h
@@ -651,11 +651,13 @@ _storebe_i64(void * __P, long long __D) {
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX10_2__)
#include <avx10_2minmaxintrin.h>
#include <avx10_2niintrin.h>
+#include <avx10_2satcvtintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX10_2_512__)
#include <avx10_2_512minmaxintrin.h>
#include <avx10_2_512niintrin.h>
+#include <avx10_2_512satcvtintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__)
diff --git a/clang/lib/Headers/ptrauth.h b/clang/lib/Headers/ptrauth.h
index 4724155..6cbdccf 100644
--- a/clang/lib/Headers/ptrauth.h
+++ b/clang/lib/Headers/ptrauth.h
@@ -36,6 +36,9 @@ typedef enum {
The extra data is always 0. */
ptrauth_key_cxx_vtable_pointer = ptrauth_key_process_independent_data,
+ /* The key used to sign pointers in ELF .init_array/.fini_array. */
+ ptrauth_key_init_fini_pointer = ptrauth_key_process_independent_code,
+
/* Other pointers signed under the ABI use private ABI rules. */
} ptrauth_key;
@@ -247,6 +250,9 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
[[clang::ptrauth_vtable_pointer(key, address_discrimination, \
extra_discrimination)]]
+/* The value is ptrauth_string_discriminator("init_fini") */
+#define __ptrauth_init_fini_discriminator 0xd9d4
+
#else
#define ptrauth_strip(__value, __key) \
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 6c5f4ac..a8a9d3f 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -314,27 +314,24 @@ void Parser::ParseGNUAttributes(ParsedAttributes &Attrs,
}
/// Determine whether the given attribute has an identifier argument.
-static bool attributeHasIdentifierArg(const IdentifierInfo &II,
+static bool attributeHasIdentifierArg(const llvm::Triple &T,
+ const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_IDENTIFIER_ARG_LIST
- return llvm::StringSwitch<bool>(FullName)
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(false);
#undef CLANG_ATTR_IDENTIFIER_ARG_LIST
}
-/// Determine whether the given attribute has an identifier argument.
+/// Determine whether the given attribute has string arguments.
static ParsedAttributeArgumentsProperties
attributeStringLiteralListArg(const llvm::Triple &T, const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_STRING_LITERAL_ARG_LIST
- return llvm::StringSwitch<uint32_t>(FullName)
+ return llvm::StringSwitch<uint32_t>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(0);
#undef CLANG_ATTR_STRING_LITERAL_ARG_LIST
@@ -344,10 +341,8 @@ attributeStringLiteralListArg(const llvm::Triple &T, const IdentifierInfo &II,
static bool attributeHasVariadicIdentifierArg(const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
- return llvm::StringSwitch<bool>(FullName)
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(false);
#undef CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
@@ -357,10 +352,8 @@ static bool attributeHasVariadicIdentifierArg(const IdentifierInfo &II,
static bool attributeTreatsKeywordThisAsIdentifier(const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
- return llvm::StringSwitch<bool>(FullName)
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(false);
#undef CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
@@ -370,10 +363,8 @@ static bool attributeTreatsKeywordThisAsIdentifier(const IdentifierInfo &II,
static bool attributeAcceptsExprPack(const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_ACCEPTS_EXPR_PACK
- return llvm::StringSwitch<bool>(FullName)
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(false);
#undef CLANG_ATTR_ACCEPTS_EXPR_PACK
@@ -383,42 +374,22 @@ static bool attributeAcceptsExprPack(const IdentifierInfo &II,
static bool attributeIsTypeArgAttr(const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_TYPE_ARG_LIST
- return llvm::StringSwitch<bool>(FullName)
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(false);
#undef CLANG_ATTR_TYPE_ARG_LIST
}
-/// Determine whether the given attribute takes identifier arguments.
+/// Determine whether the given attribute takes a strict identifier argument.
static bool attributeHasStrictIdentifierArgs(const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
-#define CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
- return (llvm::StringSwitch<uint64_t>(FullName)
-#include "clang/Parse/AttrParserStringSwitches.inc"
- .Default(0)) != 0;
-#undef CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
-}
-
-/// Determine whether the given attribute takes an identifier argument at a
-/// specific index
-static bool attributeHasStrictIdentifierArgAtIndex(const IdentifierInfo &II,
- ParsedAttr::Syntax Syntax,
- IdentifierInfo *ScopeName,
- size_t argIndex) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
-#define CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
- return (llvm::StringSwitch<uint64_t>(FullName)
+#define CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
- .Default(0)) &
- (1ull << argIndex);
-#undef CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
+ .Default(false);
+#undef CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST
}
/// Determine whether the given attribute requires parsing its arguments
@@ -426,10 +397,8 @@ static bool attributeHasStrictIdentifierArgAtIndex(const IdentifierInfo &II,
static bool attributeParsedArgsUnevaluated(const IdentifierInfo &II,
ParsedAttr::Syntax Syntax,
IdentifierInfo *ScopeName) {
- std::string FullName =
- AttributeCommonInfo::normalizeFullNameWithSyntax(&II, ScopeName, Syntax);
#define CLANG_ATTR_ARG_CONTEXT_LIST
- return llvm::StringSwitch<bool>(FullName)
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
.Default(false);
#undef CLANG_ATTR_ARG_CONTEXT_LIST
@@ -575,7 +544,8 @@ unsigned Parser::ParseAttributeArgsCommon(
// If this attribute wants an 'identifier' argument, make it so.
bool IsIdentifierArg =
AttributeHasVariadicIdentifierArg ||
- attributeHasIdentifierArg(*AttrName, Form.getSyntax(), ScopeName);
+ attributeHasIdentifierArg(getTargetInfo().getTriple(), *AttrName,
+ Form.getSyntax(), ScopeName);
ParsedAttr::Kind AttrKind =
ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
@@ -619,13 +589,6 @@ unsigned Parser::ParseAttributeArgsCommon(
if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
Tok.setKind(tok::identifier);
- if (Tok.is(tok::identifier) &&
- attributeHasStrictIdentifierArgAtIndex(
- *AttrName, Form.getSyntax(), ScopeName, ArgExprs.size())) {
- ArgExprs.push_back(ParseIdentifierLoc());
- continue;
- }
-
ExprResult ArgExpr;
if (Tok.is(tok::identifier)) {
ArgExprs.push_back(ParseIdentifierLoc());
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index d4dc06d..5732ee7 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -3175,7 +3175,6 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_simdlen:
case OMPC_collapse:
case OMPC_ordered:
- case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
case OMPC_grainsize:
@@ -3332,6 +3331,13 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
? ParseOpenMPSimpleClause(CKind, WrongDirective)
: ParseOpenMPClause(CKind, WrongDirective);
break;
+ case OMPC_num_teams:
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+ [[fallthrough]];
case OMPC_private:
case OMPC_firstprivate:
case OMPC_lastprivate:
diff --git a/clang/lib/Sema/SemaBoundsSafety.cpp b/clang/lib/Sema/SemaBoundsSafety.cpp
index 290c820..d63a238 100644
--- a/clang/lib/Sema/SemaBoundsSafety.cpp
+++ b/clang/lib/Sema/SemaBoundsSafety.cpp
@@ -48,10 +48,8 @@ enum class CountedByInvalidPointeeTypeKind {
VALID,
};
-bool Sema::CheckCountedByAttrOnField(
- FieldDecl *FD, Expr *E,
- llvm::SmallVectorImpl<TypeCoupledDeclRefInfo> &Decls, bool CountInBytes,
- bool OrNull) {
+bool Sema::CheckCountedByAttrOnField(FieldDecl *FD, Expr *E, bool CountInBytes,
+ bool OrNull) {
// Check the context the attribute is used in
unsigned Kind = getCountAttrKind(CountInBytes, OrNull);
@@ -185,8 +183,6 @@ bool Sema::CheckCountedByAttrOnField(
return true;
}
}
-
- Decls.push_back(TypeCoupledDeclRefInfo(CountFD, /*IsDref*/ false));
return false;
}
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index 68ad6e3..1bb8955 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -838,7 +838,8 @@ ExprResult Sema::BuildOperatorCoawaitLookupExpr(Scope *S, SourceLocation Loc) {
Expr *CoawaitOp = UnresolvedLookupExpr::Create(
Context, /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, Functions.begin(),
- Functions.end(), /*KnownDependent=*/false);
+ Functions.end(), /*KnownDependent=*/false,
+ /*KnownInstantiationDependent=*/false);
assert(CoawaitOp);
return CoawaitOp;
}
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 1f4bfa2..1da16d6 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -1219,7 +1219,7 @@ Corrected:
return NameClassification::OverloadSet(UnresolvedLookupExpr::Create(
Context, Result.getNamingClass(), SS.getWithLocInContext(Context),
Result.getLookupNameInfo(), ADL, Result.begin(), Result.end(),
- /*KnownDependent=*/false));
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
}
ExprResult
@@ -7964,8 +7964,9 @@ NamedDecl *Sema::ActOnVariableDeclarator(
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
} else {
// If this is an explicit specialization of a static data member, check it.
- if (IsMemberSpecialization && !IsVariableTemplateSpecialization &&
- !NewVD->isInvalidDecl() && CheckMemberSpecialization(NewVD, Previous))
+ if (IsMemberSpecialization && !IsVariableTemplate &&
+ !IsVariableTemplateSpecialization && !NewVD->isInvalidDecl() &&
+ CheckMemberSpecialization(NewVD, Previous))
NewVD->setInvalidDecl();
// Merge the decl with the existing one if appropriate.
@@ -10466,7 +10467,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Previous))
NewFD->setInvalidDecl();
}
- } else if (isMemberSpecialization && isa<CXXMethodDecl>(NewFD)) {
+ } else if (isMemberSpecialization && !FunctionTemplate) {
if (CheckMemberSpecialization(NewFD, Previous))
NewFD->setInvalidDecl();
}
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 9011fa5..bcb1424 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -5898,8 +5898,7 @@ static void handleCountedByAttrField(Sema &S, Decl *D, const ParsedAttr &AL) {
llvm_unreachable("unexpected counted_by family attribute");
}
- llvm::SmallVector<TypeCoupledDeclRefInfo, 1> Decls;
- if (S.CheckCountedByAttrOnField(FD, CountExpr, Decls, CountInBytes, OrNull))
+ if (S.CheckCountedByAttrOnField(FD, CountExpr, CountInBytes, OrNull))
return;
QualType CAT = S.BuildCountAttributedArrayOrPointerType(
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 5782daa..cb0a589 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -1289,7 +1289,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
S.Context, nullptr, NestedNameSpecifierLoc(), SourceLocation(),
DeclarationNameInfo(GetDN, Loc), /*RequiresADL=*/true, &Args,
UnresolvedSetIterator(), UnresolvedSetIterator(),
- /*KnownDependent=*/false);
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false);
Expr *Arg = E.get();
E = S.BuildCallExpr(nullptr, Get, Loc, Arg, Loc);
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index b1b0d1c..21c8ae6 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -3188,7 +3188,7 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(
Context, R.getNamingClass(), SS.getWithLocInContext(Context),
R.getLookupNameInfo(), NeedsADL, R.begin(), R.end(),
- /*KnownDependent=*/false);
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false);
return ULE;
}
@@ -3652,7 +3652,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// Fast path for a single digit (which is quite common). A single digit
// cannot have a trigraph, escaped newline, radix prefix, or suffix.
if (Tok.getLength() == 1 || Tok.getKind() == tok::binary_data) {
- const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
+ const uint8_t Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
return ActOnIntegerConstant(Tok.getLocation(), Val);
}
diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp
index 2070f3b..f1ba26f 100644
--- a/clang/lib/Sema/SemaExprMember.cpp
+++ b/clang/lib/Sema/SemaExprMember.cpp
@@ -331,7 +331,8 @@ ExprResult Sema::BuildPossibleImplicitMemberExpr(
return UnresolvedLookupExpr::Create(
Context, R.getNamingClass(), SS.getWithLocInContext(Context),
TemplateKWLoc, R.getLookupNameInfo(), /*RequiresADL=*/false,
- TemplateArgs, R.begin(), R.end(), /*KnownDependent=*/true);
+ TemplateArgs, R.begin(), R.end(), /*KnownDependent=*/true,
+ /*KnownInstantiationDependent=*/true);
case IMA_Error_StaticOrExplicitContext:
case IMA_Error_Unrelated:
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index 601077e..b697918 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -1021,6 +1021,8 @@ void Sema::CompleteLambdaCallOperator(
getGenericLambdaTemplateParameterList(LSI, *this);
DeclContext *DC = Method->getLexicalDeclContext();
+ // DeclContext::addDecl() assumes that the DeclContext we're adding to is the
+ // lexical context of the Method. Do so.
Method->setLexicalDeclContext(LSI->Lambda);
if (TemplateParams) {
FunctionTemplateDecl *TemplateMethod =
@@ -1105,6 +1107,8 @@ void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
CXXMethodDecl *Method = CreateLambdaCallOperator(Intro.Range, Class);
LSI->CallOperator = Method;
+ // Temporarily set the lexical declaration context to the current
+ // context, so that the Scope stack matches the lexical nesting.
Method->setLexicalDeclContext(CurContext);
PushDeclContext(CurScope, Method);
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 9b60afd..b5978dd 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -13034,6 +13034,25 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetUpdateDirective(
Clauses, AStmt);
}
+/// This checks whether a \p ClauseType clause \p C has at most \p Max
+/// expression. If not, a diag of number \p Diag will be emitted.
+template <typename ClauseType>
+static bool checkNumExprsInClause(SemaBase &SemaRef,
+ ArrayRef<OMPClause *> Clauses,
+ unsigned MaxNum, unsigned Diag) {
+ auto ClauseItr = llvm::find_if(Clauses, llvm::IsaPred<ClauseType>);
+ if (ClauseItr == Clauses.end())
+ return true;
+ const auto *C = cast<ClauseType>(*ClauseItr);
+ auto VarList = C->getVarRefs();
+ if (VarList.size() > MaxNum) {
+ SemaRef.Diag(VarList[MaxNum]->getBeginLoc(), Diag)
+ << getOpenMPClauseName(C->getClauseKind());
+ return false;
+ }
+ return true;
+}
+
StmtResult SemaOpenMP::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -13041,6 +13060,10 @@ StmtResult SemaOpenMP::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
+ if (!checkNumExprsInClause<OMPNumTeamsClause>(
+ *this, Clauses, /*MaxNum=*/1, diag::err_omp_multi_expr_not_allowed))
+ return StmtError();
+
// Report affected OpenMP target offloading behavior when in HIP lang-mode.
if (getLangOpts().HIP && (DSAStack->getParentDirective() == OMPD_target))
Diag(StartLoc, diag::warn_hip_omp_target_directives);
@@ -13815,6 +13838,14 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDirective(
return StmtError();
}
+ unsigned ClauseMaxNumExprs = HasBareClause ? 3 : 1;
+ unsigned DiagNo = HasBareClause
+ ? diag::err_ompx_more_than_three_expr_not_allowed
+ : diag::err_omp_multi_expr_not_allowed;
+ if (!checkNumExprsInClause<OMPNumTeamsClause>(*this, Clauses,
+ ClauseMaxNumExprs, DiagNo))
+ return StmtError();
+
return OMPTargetTeamsDirective::Create(getASTContext(), StartLoc, EndLoc,
Clauses, AStmt);
}
@@ -13825,6 +13856,10 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeDirective(
if (!AStmt)
return StmtError();
+ if (!checkNumExprsInClause<OMPNumTeamsClause>(
+ *this, Clauses, /*MaxNum=*/1, diag::err_omp_multi_expr_not_allowed))
+ return StmtError();
+
CapturedStmt *CS =
setBranchProtectedScope(SemaRef, OMPD_target_teams_distribute, AStmt);
@@ -13851,6 +13886,10 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
if (!AStmt)
return StmtError();
+ if (!checkNumExprsInClause<OMPNumTeamsClause>(
+ *this, Clauses, /*MaxNum=*/1, diag::err_omp_multi_expr_not_allowed))
+ return StmtError();
+
CapturedStmt *CS = setBranchProtectedScope(
SemaRef, OMPD_target_teams_distribute_parallel_for, AStmt);
@@ -13878,6 +13917,10 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
if (!AStmt)
return StmtError();
+ if (!checkNumExprsInClause<OMPNumTeamsClause>(
+ *this, Clauses, /*MaxNum=*/1, diag::err_omp_multi_expr_not_allowed))
+ return StmtError();
+
CapturedStmt *CS = setBranchProtectedScope(
SemaRef, OMPD_target_teams_distribute_parallel_for_simd, AStmt);
@@ -13908,6 +13951,10 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeSimdDirective(
if (!AStmt)
return StmtError();
+ if (!checkNumExprsInClause<OMPNumTeamsClause>(
+ *this, Clauses, /*MaxNum=*/1, diag::err_omp_multi_expr_not_allowed))
+ return StmtError();
+
CapturedStmt *CS = setBranchProtectedScope(
SemaRef, OMPD_target_teams_distribute_simd, AStmt);
@@ -14955,9 +15002,6 @@ OMPClause *SemaOpenMP::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
case OMPC_ordered:
Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Expr);
break;
- case OMPC_num_teams:
- Res = ActOnOpenMPNumTeamsClause(Expr, StartLoc, LParenLoc, EndLoc);
- break;
case OMPC_thread_limit:
Res = ActOnOpenMPThreadLimitClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
@@ -15064,6 +15108,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
case OMPC_affinity:
case OMPC_when:
case OMPC_bind:
+ case OMPC_num_teams:
default:
llvm_unreachable("Clause is not allowed.");
}
@@ -16927,6 +16972,9 @@ OMPClause *SemaOpenMP::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
static_cast<OpenMPDoacrossClauseModifier>(ExtraModifier),
ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_num_teams:
+ Res = ActOnOpenMPNumTeamsClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_depobj:
case OMPC_final:
@@ -16957,7 +17005,6 @@ OMPClause *SemaOpenMP::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
case OMPC_device:
case OMPC_threads:
case OMPC_simd:
- case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
case OMPC_grainsize:
@@ -17885,7 +17932,8 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
return UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), ReductionId,
- /*ADL=*/true, ResSet.begin(), ResSet.end(), /*KnownDependent=*/false);
+ /*ADL=*/true, ResSet.begin(), ResSet.end(), /*KnownDependent=*/false,
+ /*KnownInstantiationDependent=*/false);
}
// Lookup inside the classes.
// C++ [over.match.oper]p3:
@@ -20751,7 +20799,8 @@ static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
return UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
MapperIdScopeSpec.getWithLocInContext(SemaRef.Context), MapperId,
- /*ADL=*/false, URS.begin(), URS.end(), /*KnownDependent=*/false);
+ /*ADL=*/false, URS.begin(), URS.end(), /*KnownDependent=*/false,
+ /*KnownInstantiationDependent=*/false);
}
SourceLocation Loc = MapperId.getLoc();
// [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
@@ -21834,32 +21883,40 @@ const ValueDecl *SemaOpenMP::getOpenMPDeclareMapperVarName() const {
return cast<DeclRefExpr>(DSAStack->getDeclareMapperVarRef())->getDecl();
}
-OMPClause *SemaOpenMP::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
+OMPClause *SemaOpenMP::ActOnOpenMPNumTeamsClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- Expr *ValExpr = NumTeams;
- Stmt *HelperValStmt = nullptr;
-
- // OpenMP [teams Constrcut, Restrictions]
- // The num_teams expression must evaluate to a positive integer value.
- if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_num_teams,
- /*StrictlyPositive=*/true))
+ if (VarList.empty())
return nullptr;
+ for (Expr *ValExpr : VarList) {
+ // OpenMP [teams Constrcut, Restrictions]
+ // The num_teams expression must evaluate to a positive integer value.
+ if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_num_teams,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+ }
+
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
DKind, OMPC_num_teams, getLangOpts().OpenMP);
- if (CaptureRegion != OMPD_unknown &&
- !SemaRef.CurContext->isDependentContext()) {
+ if (CaptureRegion == OMPD_unknown || SemaRef.CurContext->isDependentContext())
+ return OMPNumTeamsClause::Create(getASTContext(), CaptureRegion, StartLoc,
+ LParenLoc, EndLoc, VarList,
+ /*PreInit=*/nullptr);
+
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
+ SmallVector<Expr *, 3> Vars;
+ for (Expr *ValExpr : VarList) {
ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
- llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(getASTContext(), Captures);
+ Vars.push_back(ValExpr);
}
- return new (getASTContext()) OMPNumTeamsClause(
- ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
+ Stmt *PreInit = buildPreInits(getASTContext(), Captures);
+ return OMPNumTeamsClause::Create(getASTContext(), CaptureRegion, StartLoc,
+ LParenLoc, EndLoc, Vars, PreInit);
}
OMPClause *SemaOpenMP::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 54a9bba..fd88b6a 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -14093,9 +14093,9 @@ ExprResult Sema::CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL) {
- return UnresolvedLookupExpr::Create(Context, NamingClass, NNSLoc, DNI,
- PerformADL, Fns.begin(), Fns.end(),
- /*KnownDependent=*/false);
+ return UnresolvedLookupExpr::Create(
+ Context, NamingClass, NNSLoc, DNI, PerformADL, Fns.begin(), Fns.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false);
}
ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
diff --git a/clang/lib/Sema/SemaPPC.cpp b/clang/lib/Sema/SemaPPC.cpp
index 5b764ed..e0a978e 100644
--- a/clang/lib/Sema/SemaPPC.cpp
+++ b/clang/lib/Sema/SemaPPC.cpp
@@ -61,6 +61,9 @@ static bool isPPC_64Builtin(unsigned BuiltinID) {
case PPC::BI__builtin_bpermd:
case PPC::BI__builtin_pdepd:
case PPC::BI__builtin_pextd:
+ case PPC::BI__builtin_ppc_cdtbcd:
+ case PPC::BI__builtin_ppc_cbcdtd:
+ case PPC::BI__builtin_ppc_addg6s:
case PPC::BI__builtin_ppc_ldarx:
case PPC::BI__builtin_ppc_stdcx:
case PPC::BI__builtin_ppc_tdw:
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index f1c7c05..abf8e4a 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -222,6 +222,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
{"zvksh", RVV_REQ_Zvksh},
{"zvfbfwma", RVV_REQ_Zvfbfwma},
{"zvfbfmin", RVV_REQ_Zvfbfmin},
+ {"zvfh", RVV_REQ_Zvfh},
{"experimental", RVV_REQ_Experimental}};
// Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
@@ -280,6 +281,11 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
continue;
+ // TODO: Remove the check below and use RequiredFeatures in
+ // riscv_vector.td to check the intrinsics instead, the type check should
+ // be done in checkRVVTypeSupport. This check also not able to work on the
+ // intrinsics that have Float16 but the BaseType is not Float16 such as
+ // `vfcvt_f_x_v`.
if (BaseType == BasicType::Float16) {
if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
if (!TI.hasFeature("zvfhmin"))
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 1346a4a..cd3ee31 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -4436,7 +4436,8 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(
Context, R.getNamingClass(), SS.getWithLocInContext(Context),
TemplateKWLoc, R.getLookupNameInfo(), RequiresADL, TemplateArgs,
- R.begin(), R.end(), KnownDependent);
+ R.begin(), R.end(), KnownDependent,
+ /*KnownInstantiationDependent=*/false);
// Model the templates with UnresolvedTemplateTy. The expression should then
// either be transformed in an instantiation or be diagnosed in
@@ -9051,7 +9052,8 @@ bool Sema::CheckFunctionTemplateSpecialization(
bool
Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
- assert(!isa<TemplateDecl>(Member) && "Only for non-template members");
+ assert(!Member->isTemplateDecl() && !Member->getDescribedTemplate() &&
+ "Only for non-template members");
// Try to find the member we are instantiating.
NamedDecl *FoundInstantiation = nullptr;
@@ -9062,21 +9064,25 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
if (Previous.empty()) {
// Nowhere to look anyway.
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Member)) {
- SmallVector<FunctionDecl *> Candidates;
- bool Ambiguous = false;
- for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
- I != E; ++I) {
- CXXMethodDecl *Method =
- dyn_cast<CXXMethodDecl>((*I)->getUnderlyingDecl());
+ UnresolvedSet<8> Candidates;
+ for (NamedDecl *Candidate : Previous) {
+ auto *Method = dyn_cast<CXXMethodDecl>(Candidate->getUnderlyingDecl());
+ // Ignore any candidates that aren't member functions.
if (!Method)
continue;
+
QualType Adjusted = Function->getType();
if (!hasExplicitCallingConv(Adjusted))
Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
+ // Ignore any candidates with the wrong type.
// This doesn't handle deduced return types, but both function
// declarations should be undeduced at this point.
+ // FIXME: The exception specification should probably be ignored when
+ // comparing the types.
if (!Context.hasSameType(Adjusted, Method->getType()))
continue;
+
+ // Ignore any candidates with unsatisfied constraints.
if (ConstraintSatisfaction Satisfaction;
Method->getTrailingRequiresClause() &&
(CheckFunctionConstraints(Method, Satisfaction,
@@ -9084,29 +9090,53 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
/*ForOverloadResolution=*/true) ||
!Satisfaction.IsSatisfied))
continue;
- Candidates.push_back(Method);
- FunctionDecl *MoreConstrained =
- Instantiation ? getMoreConstrainedFunction(
- Method, cast<FunctionDecl>(Instantiation))
- : Method;
- if (!MoreConstrained) {
- Ambiguous = true;
- continue;
+
+ Candidates.addDecl(Candidate);
+ }
+
+ // If we have no viable candidates left after filtering, we are done.
+ if (Candidates.empty())
+ return false;
+
+ // Find the function that is more constrained than every other function it
+ // has been compared to.
+ UnresolvedSetIterator Best = Candidates.begin();
+ CXXMethodDecl *BestMethod = nullptr;
+ for (UnresolvedSetIterator I = Candidates.begin(), E = Candidates.end();
+ I != E; ++I) {
+ auto *Method = cast<CXXMethodDecl>(I->getUnderlyingDecl());
+ if (I == Best ||
+ getMoreConstrainedFunction(Method, BestMethod) == Method) {
+ Best = I;
+ BestMethod = Method;
}
- if (MoreConstrained == Method) {
- Ambiguous = false;
- FoundInstantiation = *I;
- Instantiation = Method;
- InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
- MSInfo = Method->getMemberSpecializationInfo();
+ }
+
+ FoundInstantiation = *Best;
+ Instantiation = BestMethod;
+ InstantiatedFrom = BestMethod->getInstantiatedFromMemberFunction();
+ MSInfo = BestMethod->getMemberSpecializationInfo();
+
+ // Make sure the best candidate is more constrained than all of the others.
+ bool Ambiguous = false;
+ for (UnresolvedSetIterator I = Candidates.begin(), E = Candidates.end();
+ I != E; ++I) {
+ auto *Method = cast<CXXMethodDecl>(I->getUnderlyingDecl());
+ if (I != Best &&
+ getMoreConstrainedFunction(Method, BestMethod) != BestMethod) {
+ Ambiguous = true;
+ break;
}
}
+
if (Ambiguous) {
Diag(Member->getLocation(), diag::err_function_member_spec_ambiguous)
<< Member << (InstantiatedFrom ? InstantiatedFrom : Instantiation);
- for (FunctionDecl *Candidate : Candidates)
+ for (NamedDecl *Candidate : Candidates) {
+ Candidate = Candidate->getUnderlyingDecl();
Diag(Candidate->getLocation(), diag::note_function_member_spec_matched)
<< Candidate;
+ }
return true;
}
} else if (isa<VarDecl>(Member)) {
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index de47073..9a6cd2c 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -39,6 +39,7 @@
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/TimeProfiler.h"
#include <optional>
@@ -1657,11 +1658,12 @@ namespace {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
Sema::ConstraintEvalRAII<TemplateInstantiator> RAII(*this);
- ExprResult Result = inherited::TransformLambdaExpr(E);
- if (Result.isInvalid())
- return Result;
+ return inherited::TransformLambdaExpr(E);
+ }
- CXXMethodDecl *MD = Result.getAs<LambdaExpr>()->getCallOperator();
+ ExprResult RebuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
+ LambdaScopeInfo *LSI) {
+ CXXMethodDecl *MD = LSI->CallOperator;
for (ParmVarDecl *PVD : MD->parameters()) {
assert(PVD && "null in a parameter list");
if (!PVD->hasDefaultArg())
@@ -1680,8 +1682,7 @@ namespace {
PVD->setDefaultArg(ErrorResult.get());
}
}
-
- return Result;
+ return inherited::RebuildLambdaExpr(StartLoc, EndLoc, LSI);
}
StmtResult TransformLambdaBody(LambdaExpr *E, Stmt *Body) {
@@ -1694,11 +1695,8 @@ namespace {
// `true` to temporarily fix this issue.
// FIXME: This temporary fix can be removed after fully implementing
// p0588r1.
- bool Prev = EvaluateConstraints;
- EvaluateConstraints = true;
- StmtResult Stmt = inherited::TransformLambdaBody(E, Body);
- EvaluateConstraints = Prev;
- return Stmt;
+ llvm::SaveAndRestore _(EvaluateConstraints, true);
+ return inherited::TransformLambdaBody(E, Body);
}
ExprResult TransformRequiresExpr(RequiresExpr *E) {
diff --git a/clang/lib/Sema/SemaX86.cpp b/clang/lib/Sema/SemaX86.cpp
index f36b5ea..a0756f1 100644
--- a/clang/lib/Sema/SemaX86.cpp
+++ b/clang/lib/Sema/SemaX86.cpp
@@ -88,6 +88,14 @@ bool SemaX86::CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vgetexppd256_round_mask:
case X86::BI__builtin_ia32_vgetexpps256_round_mask:
case X86::BI__builtin_ia32_vgetexpph256_round_mask:
+ case X86::BI__builtin_ia32_vcvttph2ibs256_mask:
+ case X86::BI__builtin_ia32_vcvttph2iubs256_mask:
+ case X86::BI__builtin_ia32_vcvttps2ibs256_mask:
+ case X86::BI__builtin_ia32_vcvttps2iubs256_mask:
+ case X86::BI__builtin_ia32_vcvttph2ibs512_mask:
+ case X86::BI__builtin_ia32_vcvttph2iubs512_mask:
+ case X86::BI__builtin_ia32_vcvttps2ibs512_mask:
+ case X86::BI__builtin_ia32_vcvttps2iubs512_mask:
ArgNum = 3;
break;
case X86::BI__builtin_ia32_cmppd512_mask:
@@ -302,6 +310,14 @@ bool SemaX86::CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vcvtph2uqq256_round_mask:
case X86::BI__builtin_ia32_vcvtqq2ph256_round_mask:
case X86::BI__builtin_ia32_vcvtuqq2ph256_round_mask:
+ case X86::BI__builtin_ia32_vcvtph2ibs256_mask:
+ case X86::BI__builtin_ia32_vcvtph2iubs256_mask:
+ case X86::BI__builtin_ia32_vcvtps2ibs256_mask:
+ case X86::BI__builtin_ia32_vcvtps2iubs256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ibs512_mask:
+ case X86::BI__builtin_ia32_vcvtph2iubs512_mask:
+ case X86::BI__builtin_ia32_vcvtps2ibs512_mask:
+ case X86::BI__builtin_ia32_vcvtps2iubs512_mask:
ArgNum = 3;
HasRC = true;
break;
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index bf38e59..8f6f304 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -2079,10 +2079,11 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
+ OMPClause *RebuildOMPNumTeamsClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().OpenMP().ActOnOpenMPNumTeamsClause(NumTeams, StartLoc,
+ return getSema().OpenMP().ActOnOpenMPNumTeamsClause(VarList, StartLoc,
LParenLoc, EndLoc);
}
@@ -4028,6 +4029,20 @@ public:
NumExpansions);
}
+ ExprResult RebuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
+ LambdaScopeInfo *LSI) {
+ for (ParmVarDecl *PVD : LSI->CallOperator->parameters()) {
+ if (Expr *Init = PVD->getInit())
+ LSI->ContainsUnexpandedParameterPack |=
+ Init->containsUnexpandedParameterPack();
+ else if (PVD->hasUninstantiatedDefaultArg())
+ LSI->ContainsUnexpandedParameterPack |=
+ PVD->getUninstantiatedDefaultArg()
+ ->containsUnexpandedParameterPack();
+ }
+ return getSema().BuildLambdaExpr(StartLoc, EndLoc, LSI);
+ }
+
/// Build an empty C++1z fold-expression with the given operator.
///
/// By default, produces the fallback value for the fold-expression, or
@@ -5825,7 +5840,8 @@ QualType TreeTransform<Derived>::TransformDependentAddressSpaceType(
TypeLocBuilder &TLB, DependentAddressSpaceTypeLoc TL) {
const DependentAddressSpaceType *T = TL.getTypePtr();
- QualType pointeeType = getDerived().TransformType(T->getPointeeType());
+ QualType pointeeType =
+ getDerived().TransformType(TLB, TL.getPointeeTypeLoc());
if (pointeeType.isNull())
return QualType();
@@ -5858,9 +5874,7 @@ QualType TreeTransform<Derived>::TransformDependentAddressSpaceType(
NewTL.setAttrNameLoc(TL.getAttrNameLoc());
} else {
- TypeSourceInfo *DI = getSema().Context.getTrivialTypeSourceInfo(
- Result, getDerived().getBaseLocation());
- TransformType(TLB, DI->getTypeLoc());
+ TLB.TypeWasModifiedSafely(Result);
}
return Result;
@@ -8284,6 +8298,7 @@ StmtResult
TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
bool DeclChanged = false;
SmallVector<Decl *, 4> Decls;
+ LambdaScopeInfo *LSI = getSema().getCurLambda();
for (auto *D : S->decls()) {
Decl *Transformed = getDerived().TransformDefinition(D->getLocation(), D);
if (!Transformed)
@@ -8292,6 +8307,15 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
if (Transformed != D)
DeclChanged = true;
+ if (LSI && isa<TypeDecl>(Transformed))
+ LSI->ContainsUnexpandedParameterPack |=
+ getSema()
+ .getASTContext()
+ .getTypeDeclType(cast<TypeDecl>(Transformed))
+ .getCanonicalType()
+ .getTypePtr()
+ ->containsUnexpandedParameterPack();
+
Decls.push_back(Transformed);
}
@@ -10662,7 +10686,7 @@ TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) {
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), NameInfo,
/*ADL=*/true, Decls.begin(), Decls.end(),
- /*KnownDependent=*/false));
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else
UnresolvedReductions.push_back(nullptr);
}
@@ -10709,7 +10733,7 @@ OMPClause *TreeTransform<Derived>::TransformOMPTaskReductionClause(
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), NameInfo,
/*ADL=*/true, Decls.begin(), Decls.end(),
- /*KnownDependent=*/false));
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else
UnresolvedReductions.push_back(nullptr);
}
@@ -10755,7 +10779,7 @@ TreeTransform<Derived>::TransformOMPInReductionClause(OMPInReductionClause *C) {
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), NameInfo,
/*ADL=*/true, Decls.begin(), Decls.end(),
- /*KnownDependent=*/false));
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else
UnresolvedReductions.push_back(nullptr);
}
@@ -10937,7 +10961,7 @@ bool transformOMPMappableExprListClause(
TT.getSema().Context, /*NamingClass=*/nullptr,
MapperIdScopeSpec.getWithLocInContext(TT.getSema().Context),
MapperIdInfo, /*ADL=*/true, Decls.begin(), Decls.end(),
- /*KnownDependent=*/false));
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else {
UnresolvedMappers.push_back(nullptr);
}
@@ -10994,7 +11018,7 @@ TreeTransform<Derived>::TransformOMPAllocateClause(OMPAllocateClause *C) {
template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPNumTeamsClause(OMPNumTeamsClause *C) {
- ExprResult E = getDerived().TransformExpr(C->getNumTeams());
+ ExprResult E = getDerived().TransformExpr(C->getNumTeams().front());
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPNumTeamsClause(
@@ -14523,7 +14547,6 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
CXXMethodDecl *NewCallOperator =
getSema().CreateLambdaCallOperator(E->getIntroducerRange(), Class);
- NewCallOperator->setLexicalDeclContext(getSema().CurContext);
// Enter the scope of the lambda.
getSema().buildLambdaScope(LSI, NewCallOperator, E->getIntroducerRange(),
@@ -14591,6 +14614,13 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
NewVDs.push_back(NewVD);
getSema().addInitCapture(LSI, NewVD, C->getCaptureKind() == LCK_ByRef);
+ // Cases we want to tackle:
+ // ([C(Pack)] {}, ...)
+ // But rule out cases e.g.
+ // [...C = Pack()] {}
+ if (NewC.EllipsisLoc.isInvalid())
+ LSI->ContainsUnexpandedParameterPack |=
+ Init.get()->containsUnexpandedParameterPack();
}
if (Invalid)
@@ -14658,6 +14688,11 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
continue;
}
+ // This is not an init-capture; however it contains an unexpanded pack e.g.
+ // ([Pack] {}(), ...)
+ if (auto *VD = dyn_cast<VarDecl>(CapturedVar); VD && !C->isPackExpansion())
+ LSI->ContainsUnexpandedParameterPack |= VD->isParameterPack();
+
// Capture the transformed variable.
getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind,
EllipsisLoc);
@@ -14669,9 +14704,12 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
auto TPL = getDerived().TransformTemplateParameterList(
E->getTemplateParameterList());
LSI->GLTemplateParameterList = TPL;
- if (TPL)
+ if (TPL) {
getSema().AddTemplateParametersToLambdaCallOperator(NewCallOperator, Class,
TPL);
+ LSI->ContainsUnexpandedParameterPack |=
+ TPL->containsUnexpandedParameterPack();
+ }
// Transform the type of the original lambda's call operator.
// The transformation MUST be done in the CurrentInstantiationScope since
@@ -14710,6 +14748,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
if (NewCallOpType.isNull())
return ExprError();
+ LSI->ContainsUnexpandedParameterPack |=
+ NewCallOpType->containsUnexpandedParameterPack();
NewCallOpTSI =
NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context, NewCallOpType);
}
@@ -14824,8 +14864,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
Class->setTypeForDecl(nullptr);
getSema().Context.getTypeDeclType(Class);
- return getSema().BuildLambdaExpr(E->getBeginLoc(), Body.get()->getEndLoc(),
- &LSICopy);
+ return getDerived().RebuildLambdaExpr(E->getBeginLoc(),
+ Body.get()->getEndLoc(), &LSICopy);
}
template<typename Derived>
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index fccdece..ad9cc9f 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -104,6 +104,7 @@
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -10596,7 +10597,7 @@ OMPClause *OMPClauseReader::readClause() {
break;
}
case llvm::omp::OMPC_num_teams:
- C = new (Context) OMPNumTeamsClause();
+ C = OMPNumTeamsClause::CreateEmpty(Context, Record.readInt());
break;
case llvm::omp::OMPC_thread_limit:
C = new (Context) OMPThreadLimitClause();
@@ -11418,8 +11419,15 @@ void OMPClauseReader::VisitOMPAllocateClause(OMPAllocateClause *C) {
void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
VisitOMPClauseWithPreInit(C);
- C->setNumTeams(Record.readSubExpr());
C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for ([[maybe_unused]] auto _ : llvm::seq<unsigned>(NumVars)) {
+ (void)_;
+ Vars.push_back(Record.readSubExpr());
+ }
+ C->setVarRefs(Vars);
}
void OMPClauseReader::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index c167bb8..3082b98 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -7556,9 +7556,11 @@ void OMPClauseWriter::VisitOMPAllocateClause(OMPAllocateClause *C) {
}
void OMPClauseWriter::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ Record.push_back(C->varlist_size());
VisitOMPClauseWithPreInit(C);
- Record.AddStmt(C->getNumTeams());
Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlist())
+ Record.AddStmt(VE);
}
void OMPClauseWriter::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index 612bd552..98cd172 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -114,7 +114,7 @@ static_assert(*(&arr[0]) == 1, "");
static_assert(*(&arr[1]) == 2, "");
constexpr const int *OOB = (arr + 3) - 3; // both-error {{must be initialized by a constant expression}} \
- // both-note {{cannot refer to element 3 of array of 2}}
+ // both-note {{cannot refer to element 3 of array of 2 elements}}
template<typename T>
constexpr T getElementOf(T* array, int i) {
diff --git a/clang/test/AST/Interp/builtin-functions.cpp b/clang/test/AST/Interp/builtin-functions.cpp
index 0a17106..b179298 100644
--- a/clang/test/AST/Interp/builtin-functions.cpp
+++ b/clang/test/AST/Interp/builtin-functions.cpp
@@ -866,11 +866,11 @@ namespace convertvector {
constexpr vector8BitInt128 from_vector8BitInt128_to_vector8BitInt128_var =
__builtin_convertvector((vector8BitInt128){0, 1, 2, 3, 4, 5, 6, 7},
vector8BitInt128);
- static_assert(from_vector8BitInt128_to_vector8BitInt128_var[0] == 0, ""); // ref-error {{not an integral constant expression}}
- static_assert(from_vector8BitInt128_to_vector8BitInt128_var[1] == 1, ""); // ref-error {{not an integral constant expression}}
- static_assert(from_vector8BitInt128_to_vector8BitInt128_var[2] == 2, ""); // ref-error {{not an integral constant expression}}
- static_assert(from_vector8BitInt128_to_vector8BitInt128_var[3] == 3, ""); // ref-error {{not an integral constant expression}}
- static_assert(from_vector8BitInt128_to_vector8BitInt128_var[4] == 4, ""); // ref-error {{not an integral constant expression}}
+ static_assert(from_vector8BitInt128_to_vector8BitInt128_var[0] == 0, "");
+ static_assert(from_vector8BitInt128_to_vector8BitInt128_var[1] == 1, "");
+ static_assert(from_vector8BitInt128_to_vector8BitInt128_var[2] == 2, "");
+ static_assert(from_vector8BitInt128_to_vector8BitInt128_var[3] == 3, "");
+ static_assert(from_vector8BitInt128_to_vector8BitInt128_var[4] == 4, "");
}
namespace shufflevector {
@@ -890,14 +890,14 @@ namespace shufflevector {
constexpr vector8char vectorShuffle6 = __builtin_shufflevector(
vector4charConst1, vector4charConst2, 0, 2, 4, 6, 1, 3, 5, 7);
- static_assert(vectorShuffle6[0] == 0, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[1] == 2, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[2] == 4, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[3] == 6, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[4] == 1, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[5] == 3, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[6] == 5, "");// ref-error {{not an integral constant expression}}
- static_assert(vectorShuffle6[7] == 7, "");// ref-error {{not an integral constant expression}}
+ static_assert(vectorShuffle6[0] == 0, "");
+ static_assert(vectorShuffle6[1] == 2, "");
+ static_assert(vectorShuffle6[2] == 4, "");
+ static_assert(vectorShuffle6[3] == 6, "");
+ static_assert(vectorShuffle6[4] == 1, "");
+ static_assert(vectorShuffle6[5] == 3, "");
+ static_assert(vectorShuffle6[6] == 5, "");
+ static_assert(vectorShuffle6[7] == 7, "");
constexpr vector4char vectorShuffleFail1 = __builtin_shufflevector( // both-error {{must be initialized by a constant expression}}\
// ref-error {{index for __builtin_shufflevector not within the bounds of the input vectors; index of -1 found at position 0 is not permitted in a constexpr context}}
diff --git a/clang/test/AST/Interp/c.c b/clang/test/AST/Interp/c.c
index 9ec305d..13a5e08 100644
--- a/clang/test/AST/Interp/c.c
+++ b/clang/test/AST/Interp/c.c
@@ -101,8 +101,6 @@ int somefunc(int i) {
// all-warning {{overflow in expression; result is 131'073 with type 'int'}}
}
-/// FIXME: The following test is incorrect in the new interpreter.
-/// The null pointer returns 16 from its getIntegerRepresentation().
#pragma clang diagnostic ignored "-Wpointer-to-int-cast"
struct ArrayStruct {
char n[1];
@@ -111,10 +109,7 @@ char name2[(int)&((struct ArrayStruct*)0)->n]; // expected-warning {{folded to c
// pedantic-expected-warning {{folded to constant array}} \
// ref-warning {{folded to constant array}} \
// pedantic-ref-warning {{folded to constant array}}
-_Static_assert(sizeof(name2) == 0, ""); // expected-error {{failed}} \
- // expected-note {{evaluates to}} \
- // pedantic-expected-error {{failed}} \
- // pedantic-expected-note {{evaluates to}}
+_Static_assert(sizeof(name2) == 0, "");
#ifdef __SIZEOF_INT128__
void *PR28739d = &(&PR28739d)[(__int128)(unsigned long)-1]; // all-warning {{refers past the last possible element}}
diff --git a/clang/test/AST/Interp/cxx20.cpp b/clang/test/AST/Interp/cxx20.cpp
index da80454..27dbd28 100644
--- a/clang/test/AST/Interp/cxx20.cpp
+++ b/clang/test/AST/Interp/cxx20.cpp
@@ -841,3 +841,20 @@ namespace VariadicCallOperator {
}
constexpr int A = foo();
}
+
+namespace DefinitionLoc {
+
+ struct NonConstexprCopy {
+ constexpr NonConstexprCopy() = default;
+ NonConstexprCopy(const NonConstexprCopy &);
+ constexpr NonConstexprCopy(NonConstexprCopy &&) = default;
+
+ int n = 42;
+ };
+
+ NonConstexprCopy::NonConstexprCopy(const NonConstexprCopy &) = default; // both-note {{here}}
+
+ constexpr NonConstexprCopy ncc1 = NonConstexprCopy(NonConstexprCopy());
+ constexpr NonConstexprCopy ncc2 = ncc1; // both-error {{constant expression}} \
+ // both-note {{non-constexpr constructor}}
+}
diff --git a/clang/test/AST/Interp/objc.mm b/clang/test/AST/Interp/objc.mm
index 6402c8a..f6c4ba1 100644
--- a/clang/test/AST/Interp/objc.mm
+++ b/clang/test/AST/Interp/objc.mm
@@ -11,3 +11,28 @@
@end
constexpr NSString *t0 = @"abc";
constexpr NSString *t1 = @("abc");
+
+
+#if __LP64__
+typedef unsigned long NSUInteger;
+typedef long NSInteger;
+#else
+typedef unsigned int NSUInteger;
+typedef int NSInteger;
+#endif
+
+
+@class NSNumber;
+
+
+@interface NSObject
++ (NSObject*)nsobject;
+@end
+
+@interface NSNumber : NSObject
++ (NSNumber *)numberWithInt:(int)value;
+@end
+
+int main(void) {
+ NSNumber *bv = @(1391126400 * 1000); // both-warning {{overflow in expression; result is -443'003'904 with type 'int'}}
+}
diff --git a/clang/test/AST/Interp/vectors.cpp b/clang/test/AST/Interp/vectors.cpp
index 6991a34..cf10d94 100644
--- a/clang/test/AST/Interp/vectors.cpp
+++ b/clang/test/AST/Interp/vectors.cpp
@@ -3,25 +3,24 @@
typedef int __attribute__((vector_size(16))) VI4;
constexpr VI4 A = {1,2,3,4};
-static_assert(A[0] == 1, ""); // ref-error {{not an integral constant expression}}
-static_assert(A[1] == 2, ""); // ref-error {{not an integral constant expression}}
-static_assert(A[2] == 3, ""); // ref-error {{not an integral constant expression}}
-static_assert(A[3] == 4, ""); // ref-error {{not an integral constant expression}}
+static_assert(A[0] == 1, "");
+static_assert(A[1] == 2, "");
+static_assert(A[2] == 3, "");
+static_assert(A[3] == 4, "");
/// FIXME: It would be nice if the note said 'vector' instead of 'array'.
-static_assert(A[12] == 4, ""); // ref-error {{not an integral constant expression}} \
- // expected-error {{not an integral constant expression}} \
- // expected-note {{cannot refer to element 12 of array of 4 elements in a constant expression}}
+static_assert(A[12] == 4, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{cannot refer to element 12 of array of 4 elements in a constant expression}}
/// VectorSplat casts
typedef __attribute__(( ext_vector_type(4) )) float float4;
constexpr float4 vec4_0 = (float4)0.5f;
-static_assert(vec4_0[0] == 0.5, ""); // ref-error {{not an integral constant expression}}
-static_assert(vec4_0[1] == 0.5, ""); // ref-error {{not an integral constant expression}}
-static_assert(vec4_0[2] == 0.5, ""); // ref-error {{not an integral constant expression}}
-static_assert(vec4_0[3] == 0.5, ""); // ref-error {{not an integral constant expression}}
+static_assert(vec4_0[0] == 0.5, "");
+static_assert(vec4_0[1] == 0.5, "");
+static_assert(vec4_0[2] == 0.5, "");
+static_assert(vec4_0[3] == 0.5, "");
constexpr int vec4_0_discarded = ((float4)12.0f, 0);
@@ -29,14 +28,14 @@ constexpr int vec4_0_discarded = ((float4)12.0f, 0);
constexpr float4 arr4[2] = {
{1,2,3,4},
};
-static_assert(arr4[0][0] == 1, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[0][1] == 2, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[0][2] == 3, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[0][3] == 4, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[1][0] == 0, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[1][0] == 0, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[1][0] == 0, ""); // ref-error {{not an integral constant expression}}
-static_assert(arr4[1][0] == 0, ""); // ref-error {{not an integral constant expression}}
+static_assert(arr4[0][0] == 1, "");
+static_assert(arr4[0][1] == 2, "");
+static_assert(arr4[0][2] == 3, "");
+static_assert(arr4[0][3] == 4, "");
+static_assert(arr4[1][0] == 0, "");
+static_assert(arr4[1][0] == 0, "");
+static_assert(arr4[1][0] == 0, "");
+static_assert(arr4[1][0] == 0, "");
/// From constant-expression-cxx11.cpp
@@ -65,10 +64,10 @@ namespace {
namespace BoolToSignedIntegralCast{
typedef __attribute__((__ext_vector_type__(4))) unsigned int int4;
constexpr int4 intsT = (int4)true;
- static_assert(intsT[0] == -1, "");// ref-error {{not an integral constant expression}}
- static_assert(intsT[1] == -1, "");// ref-error {{not an integral constant expression}}
- static_assert(intsT[2] == -1, "");// ref-error {{not an integral constant expression}}
- static_assert(intsT[3] == -1, "");// ref-error {{not an integral constant expression}}
+ static_assert(intsT[0] == -1, "");
+ static_assert(intsT[1] == -1, "");
+ static_assert(intsT[2] == -1, "");
+ static_assert(intsT[3] == -1, "");
}
namespace VectorElementExpr {
@@ -78,8 +77,8 @@ namespace VectorElementExpr {
static_assert(oneElt == 3);
constexpr int2 twoElts = ((int4){11, 22, 33, 44}).yz;
- static_assert(twoElts.x == 22, ""); // ref-error {{not an integral constant expression}}
- static_assert(twoElts.y == 33, ""); // ref-error {{not an integral constant expression}}
+ static_assert(twoElts.x == 22, "");
+ static_assert(twoElts.y == 33, "");
}
namespace Temporaries {
@@ -91,3 +90,14 @@ namespace Temporaries {
};
int &&s = S().w[1];
}
+
+#ifdef __SIZEOF_INT128__
+namespace bigint {
+ typedef __attribute__((__ext_vector_type__(4))) __int128 bigint4;
+ constexpr bigint4 A = (bigint4)true;
+ static_assert(A[0] == -1, "");
+ static_assert(A[1] == -1, "");
+ static_assert(A[2] == -1, "");
+ static_assert(A[3] == -1, "");
+}
+#endif
diff --git a/clang/test/CXX/temp/temp.spec/temp.expl.spec/p14-23.cpp b/clang/test/CXX/temp/temp.spec/temp.expl.spec/p14-23.cpp
index dc17cea..a023bf8 100644
--- a/clang/test/CXX/temp/temp.spec/temp.expl.spec/p14-23.cpp
+++ b/clang/test/CXX/temp/temp.spec/temp.expl.spec/p14-23.cpp
@@ -1,60 +1,90 @@
// RUN: %clang_cc1 -std=c++20 -verify %s
-template<int I>
-concept C = I >= 4;
+namespace N0 {
+ template<int I>
+ concept C = I >= 4;
-template<int I>
-concept D = I < 8;
+ template<int I>
+ concept D = I < 8;
-template<int I>
-struct A {
- constexpr static int f() { return 0; }
- constexpr static int f() requires C<I> && D<I> { return 1; }
- constexpr static int f() requires C<I> { return 2; }
+ template<int I>
+ struct A {
+ constexpr static int f() { return 0; }
+ constexpr static int f() requires C<I> && D<I> { return 1; }
+ constexpr static int f() requires C<I> { return 2; }
- constexpr static int g() requires C<I> { return 0; } // #candidate-0
- constexpr static int g() requires D<I> { return 1; } // #candidate-1
+ constexpr static int g() requires C<I> { return 0; } // #candidate-0
+ constexpr static int g() requires D<I> { return 1; } // #candidate-1
- constexpr static int h() requires C<I> { return 0; } // expected-note {{member declaration nearly matches}}
-};
+ constexpr static int h() requires C<I> { return 0; } // expected-note {{member declaration nearly matches}}
+ };
-template<>
-constexpr int A<2>::f() { return 3; }
+ template<>
+ constexpr int A<2>::f() { return 3; }
-template<>
-constexpr int A<4>::f() { return 4; }
+ template<>
+ constexpr int A<4>::f() { return 4; }
-template<>
-constexpr int A<8>::f() { return 5; }
+ template<>
+ constexpr int A<8>::f() { return 5; }
-static_assert(A<3>::f() == 0);
-static_assert(A<5>::f() == 1);
-static_assert(A<9>::f() == 2);
-static_assert(A<2>::f() == 3);
-static_assert(A<4>::f() == 4);
-static_assert(A<8>::f() == 5);
+ static_assert(A<3>::f() == 0);
+ static_assert(A<5>::f() == 1);
+ static_assert(A<9>::f() == 2);
+ static_assert(A<2>::f() == 3);
+ static_assert(A<4>::f() == 4);
+ static_assert(A<8>::f() == 5);
-template<>
-constexpr int A<0>::g() { return 2; }
+ template<>
+ constexpr int A<0>::g() { return 2; }
-template<>
-constexpr int A<8>::g() { return 3; }
+ template<>
+ constexpr int A<8>::g() { return 3; }
-template<>
-constexpr int A<6>::g() { return 4; } // expected-error {{ambiguous member function specialization 'A<6>::g' of 'A::g'}}
- // expected-note@#candidate-0 {{member function specialization matches 'g'}}
- // expected-note@#candidate-1 {{member function specialization matches 'g'}}
+ template<>
+ constexpr int A<6>::g() { return 4; } // expected-error {{ambiguous member function specialization 'N0::A<6>::g' of 'N0::A::g'}}
+ // expected-note@#candidate-0 {{member function specialization matches 'g'}}
+ // expected-note@#candidate-1 {{member function specialization matches 'g'}}
-static_assert(A<9>::g() == 0);
-static_assert(A<1>::g() == 1);
-static_assert(A<0>::g() == 2);
-static_assert(A<8>::g() == 3);
+ static_assert(A<9>::g() == 0);
+ static_assert(A<1>::g() == 1);
+ static_assert(A<0>::g() == 2);
+ static_assert(A<8>::g() == 3);
-template<>
-constexpr int A<4>::h() { return 1; }
+ template<>
+ constexpr int A<4>::h() { return 1; }
-template<>
-constexpr int A<0>::h() { return 2; } // expected-error {{out-of-line definition of 'h' does not match any declaration in 'A<0>'}}
+ template<>
+ constexpr int A<0>::h() { return 2; } // expected-error {{out-of-line definition of 'h' does not match any declaration in 'N0::A<0>'}}
-static_assert(A<5>::h() == 0);
-static_assert(A<4>::h() == 1);
+ static_assert(A<5>::h() == 0);
+ static_assert(A<4>::h() == 1);
+} // namespace N0
+
+namespace N1 {
+ template<int I>
+ concept C = I > 0;
+
+ template<int I>
+ concept D = I > 1;
+
+ template<int I>
+ concept E = I > 2;
+
+ template<int I>
+ struct A {
+ void f() requires C<I> && D<I>; // expected-note {{member function specialization matches 'f'}}
+ void f() requires C<I> && E<I>; // expected-note {{member function specialization matches 'f'}}
+ void f() requires C<I> && D<I> && true; // expected-note {{member function specialization matches 'f'}}
+
+ void g() requires C<I> && E<I>; // expected-note {{member function specialization matches 'g'}}
+ void g() requires C<I> && D<I>; // expected-note {{member function specialization matches 'g'}}
+ void g() requires C<I> && D<I> && true; // expected-note {{member function specialization matches 'g'}}
+ };
+
+ template<>
+ void A<3>::f(); // expected-error {{ambiguous member function specialization 'N1::A<3>::f' of 'N1::A::f'}}
+
+ template<>
+ void A<3>::g(); // expected-error {{ambiguous member function specialization 'N1::A<3>::g' of 'N1::A::g'}}
+} // namespace N1
diff --git a/clang/test/CodeGen/PowerPC/builtins-bcd-assist.c b/clang/test/CodeGen/PowerPC/builtins-bcd-assist.c
new file mode 100644
index 0000000..f346bcf
--- /dev/null
+++ b/clang/test/CodeGen/PowerPC/builtins-bcd-assist.c
@@ -0,0 +1,58 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: powerpc-registered-target
+// RUN: %clang_cc1 -triple powerpc64le-unknown-linux -O2 -target-cpu pwr7 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64-unknown-aix -O2 -target-cpu pwr7 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc-unknown-aix -O2 -target-cpu pwr7 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+
+// CHECK-LABEL: define{{.*}} i64 @cdtbcd_test(i64
+// CHECK: [[CONV:%.*]] = trunc i64 {{.*}} to i32
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.ppc.cdtbcd(i32 [[CONV]])
+// CHECK-NEXT: [[CONV1:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: ret i64 [[CONV1]]
+long long cdtbcd_test(long long ll) {
+ return __builtin_cdtbcd (ll);
+}
+
+// CHECK-LABEL: define{{.*}} i32 @cdtbcd_test_ui(i32
+// CHECK: [[TMP0:%.*]] = tail call i32 @llvm.ppc.cdtbcd(i32
+// CHECK-NEXT: ret i32 [[TMP0]]
+unsigned int cdtbcd_test_ui(unsigned int ui) {
+ return __builtin_cdtbcd (ui);
+}
+
+// CHECK-LABEL: define{{.*}} i64 @cbcdtd_test(i64
+// CHECK: [[CONV:%.*]] = trunc i64 {{.*}} to i32
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.ppc.cbcdtd(i32 [[CONV]])
+// CHECK-NEXT: [[CONV1:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: ret i64 [[CONV1]]
+long long cbcdtd_test(long long ll) {
+ return __builtin_cbcdtd (ll);
+}
+
+// CHECK-LABEL: define{{.*}} i32 @cbcdtd_test_ui(i32
+// CHECK: [[TMP0:%.*]] = tail call i32 @llvm.ppc.cbcdtd(i32
+// CHECK-NEXT: ret i32 [[TMP0]]
+unsigned int cbcdtd_test_ui(unsigned int ui) {
+ return __builtin_cbcdtd (ui);
+}
+
+// CHECK-LABEL: define{{.*}} i64 @addg6s_test(i64
+// CHECK: [[CONV:%.*]] = trunc i64 {{.*}} to i32
+// CHECK-NEXT: [[CONV1:%.*]] = trunc i64 {{.*}} to i32
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.ppc.addg6s(i32 [[CONV]], i32 [[CONV1]])
+// CHECK-NEXT: [[CONV2:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: ret i64 [[CONV2]]
+//
+long long addg6s_test(long long ll, long long ll2) {
+ return __builtin_addg6s (ll, ll2);
+}
+
+// CHECK-LABEL: define{{.*}} i32 @addg6s_test_ui(i32
+// CHECK: [[TMP0:%.*]] = tail call i32 @llvm.ppc.addg6s(i32 {{.*}}, i32
+// CHECK-NEXT: ret i32 [[TMP0]]
+unsigned int addg6s_test_ui(unsigned int ui, unsigned int ui2) {
+ return __builtin_addg6s (ui, ui2);
+}
diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.c b/clang/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.c
new file mode 100644
index 0000000..79d2da2
--- /dev/null
+++ b/clang/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: powerpc-registered-target
+// RUN: %clang_cc1 -triple powerpc64le-unknown-linux -O2 -target-cpu pwr7 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64-unknown-aix -O2 -target-cpu pwr7 \
+// RUN: -emit-llvm %s -o - | FileCheck %s
+// RUN: not %clang_cc1 -triple powerpc-unknown-aix -O2 -target-cpu pwr7 \
+// RUN: -emit-llvm %s -o - 2>&1 | FileCheck %s --check-prefix=CHECK-32-ERROR
+
+// CHECK-LABEL: define{{.*}} i64 @cdtbcd_test(i64
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.ppc.cdtbcdd(i64
+// CHECK-NEXT: ret i64 [[TMP0]]
+// CHECK-32-ERROR: error: this builtin is only available on 64-bit targets
+// CHECK-32-ERROR: #define __cdtbcd __builtin_ppc_cdtbcd
+long long cdtbcd_test(long long ll) {
+ return __cdtbcd (ll);
+}
+
+// CHECK-LABEL: define{{.*}} i32 @cdtbcd_test_ui(i32
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[CONV:%.*]] = zext i32 {{.*}} to i64
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.ppc.cdtbcdd(i64 [[CONV]])
+// CHECK-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK-NEXT: ret i32 [[CONV1]]
+// CHECK-32-ERROR: error: this builtin is only available on 64-bit targets
+// CHECK-32-ERROR: #define __cdtbcd __builtin_ppc_cdtbcd
+unsigned int cdtbcd_test_ui(unsigned int ui) {
+ return __cdtbcd (ui);
+}
+
+// CHECK-LABEL: define{{.*}} i64 @cbcdtd_test(i64
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.ppc.cbcdtdd(i64
+// CHECK-NEXT: ret i64 [[TMP0]]
+// CHECK-32-ERROR: error: this builtin is only available on 64-bit targets
+// CHECK-32-ERROR: #define __cbcdtd __builtin_ppc_cbcdtd
+long long cbcdtd_test(long long ll) {
+ return __cbcdtd (ll);
+}
+
+// CHECK-LABEL: define{{.*}} i32 @cbcdtd_test_ui(i32
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[CONV:%.*]] = zext i32 {{.*}} to i64
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.ppc.cbcdtdd(i64 [[CONV]])
+// CHECK-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK-NEXT: ret i32 [[CONV1]]
+// CHECK-32-ERROR: error: this builtin is only available on 64-bit targets
+// CHECK-32-ERROR: #define __cbcdtd __builtin_ppc_cbcdtd
+unsigned int cbcdtd_test_ui(unsigned int ui) {
+ return __cbcdtd (ui);
+}
+
+// CHECK-LABEL: define{{.*}} i64 @addg6s_test(i64
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.ppc.addg6sd(i64 {{.*}}, i64 {{.*}})
+// CHECK-NEXT: ret i64 [[TMP0]]
+// CHECK-32-ERROR: error: this builtin is only available on 64-bit targets
+// CHECK-32-ERROR: #define __addg6s __builtin_ppc_addg6s
+long long addg6s_test(long long ll, long long ll2) {
+ return __addg6s (ll, ll2);
+}
+
+// CHECK-LABEL: define{{.*}} i32 @addg6s_test_ui(i32
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[CONV:%.*]] = zext i32 {{.*}} to i64
+// CHECK-NEXT: [[CONV1:%.*]] = zext i32 {{.*}} to i64
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.ppc.addg6sd(i64 {{.*}}, i64
+// CHECK-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK-NEXT: ret i32 [[CONV2]]
+// CHECK-32-ERROR: error: this builtin is only available on 64-bit targets
+// CHECK-32-ERROR: #define __addg6s __builtin_ppc_addg6s
+unsigned int addg6s_test_ui(unsigned int ui, unsigned int ui2) {
+ return __addg6s (ui, ui2);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
index 43ed94b..31b7da6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
return __riscv_vcompress_vm_u64m8(src, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16mf4(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16mf2(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m1(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m2(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m4(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m8(src, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
index fb41a07..d2eb01c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1037,3 +1037,62 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t
return __riscv_vmerge_vvm_f64m8(op1, op2, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmerge_vvm_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16mf4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmerge_vvm_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16mf2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmerge_vvm_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m1(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmerge_vvm_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmerge_vvm_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmerge_vvm_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m8(op1, op2, mask, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
index c25719a..7004c18 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1917,3 +1917,63 @@ vuint64m8_t test_vmv_s_x_u64m8(uint64_t src, size_t vl) {
return __riscv_vmv_s_x_u64m8(src, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmv_v_v_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmv_v_v_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmv_v_v_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmv_v_v_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmv_v_v_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmv_v_v_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m8(src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
index 5da76dd..bb73d62 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -2367,3 +2367,243 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t inde
return __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m1(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m1(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m8(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m8(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m1_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m1_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m8_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m8_m(mask, op1, index, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
index b272903..bb030ee 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
return __riscv_vcompress(src, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+ return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+ return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+ return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+ return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+ return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+ return __riscv_vcompress(src, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
index d67aa70..2db27fdc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1037,3 +1037,63 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t
return __riscv_vmerge(op1, op2, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmerge_vvm_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+ return __riscv_vmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmerge_vvm_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+ return __riscv_vmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmerge_vvm_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) {
+ return __riscv_vmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmerge_vvm_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) {
+ return __riscv_vmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmerge_vvm_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) {
+ return __riscv_vmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmerge_vvm_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) {
+ return __riscv_vmerge(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
index 44f34c4..7b1fc33 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1037,3 +1037,63 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
return __riscv_vmv_x(src);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmv_v_v_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t src, size_t vl) {
+ return __riscv_vmv_v(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmv_v_v_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t src, size_t vl) {
+ return __riscv_vmv_v(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmv_v_v_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t src, size_t vl) {
+ return __riscv_vmv_v(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmv_v_v_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t src, size_t vl) {
+ return __riscv_vmv_v(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmv_v_v_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t src, size_t vl) {
+ return __riscv_vmv_v(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmv_v_v_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t src, size_t vl) {
+ return __riscv_vmv_v(src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
index 0ea6382..c6f9cb4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -2367,3 +2367,243 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t inde
return __riscv_vrgather(mask, op1, index, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather(mask, op1, index, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
index a39e76c..97440d6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, v
return __riscv_vcompress_vm_u64m8_tu(maskedoff, src, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16mf4_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16mf2_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m1_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m2_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m4_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+ return __riscv_vcompress_vm_bf16m8_tu(maskedoff, src, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
index 4f723c57..7667c37 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1037,3 +1037,63 @@ vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
return __riscv_vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmerge_vvm_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16mf4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmerge_vvm_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16mf2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmerge_vvm_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m1_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmerge_vvm_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmerge_vvm_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmerge_vvm_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) {
+ return __riscv_vmerge_vvm_bf16m8_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c
index 2a5a0f4..123aace 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1477,3 +1477,63 @@ vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl
return __riscv_vmv_s_x_u64m8_tu(maskedoff, src, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmv_v_v_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmv_v_v_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmv_v_v_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmv_v_v_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmv_v_v_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmv_v_v_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, size_t vl) {
+ return __riscv_vmv_v_v_bf16m8_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
index dec79b7..3c4316d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -4727,3 +4727,243 @@ vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin
return __riscv_vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m1_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m1_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m8_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m8_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16mf2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16mf2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m1_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m1_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather_vv_bf16m8_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_vx_bf16m8_mu(mask, maskedoff, op1, index, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
index 0de71a7..e1525e5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, v
return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
index 8149be4..ba1838b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1037,3 +1037,63 @@ vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmerge_vvm_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmerge_vvm_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmerge_vvm_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) {
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmerge_vvm_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) {
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmerge_vvm_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) {
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmerge_vvm_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) {
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c
index ac95c77..2f15b66 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfhmin -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -1477,3 +1477,63 @@ vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl
return __riscv_vmv_s_tu(maskedoff, src, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vmv_v_v_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, size_t vl) {
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vmv_v_v_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, size_t vl) {
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vmv_v_v_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, size_t vl) {
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vmv_v_v_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, size_t vl) {
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vmv_v_v_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, size_t vl) {
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vmv_v_v_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, size_t vl) {
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
index f3ba55c..a094e84 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN: -target-feature +zvfh -disable-O0-optnone \
+// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
@@ -4727,3 +4727,243 @@ vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin
return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
diff --git a/clang/test/CodeGen/X86/avx10_2_512satcvt-builtins-error.c b/clang/test/CodeGen/X86/avx10_2_512satcvt-builtins-error.c
new file mode 100755
index 0000000..81bf591
--- /dev/null
+++ b/clang/test/CodeGen/X86/avx10_2_512satcvt-builtins-error.c
@@ -0,0 +1,198 @@
+// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64 -target-feature +avx10.2-512 \
+// RUN: -Wall -Werror -verify
+// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=i386 -target-feature +avx10.2-512 \
+// RUN: -Wall -Werror -verify
+
+#include <immintrin.h>
+
+__m512i test_mm512_ipcvt_roundph_epi8(__m512h __A) {
+ return _mm512_ipcvt_roundph_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvt_roundph_epi8(__m512i __S, __mmask32 __A, __m512h __B) {
+ return _mm512_mask_ipcvt_roundph_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvt_roundph_epi8(__mmask32 __A, __m512h __B) {
+ return _mm512_maskz_ipcvt_roundph_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvt_roundph_epu8(__m512h __A) {
+ return _mm512_ipcvt_roundph_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvt_roundph_epu8(__m512i __S, __mmask32 __A, __m512h __B) {
+ return _mm512_mask_ipcvt_roundph_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvt_roundph_epu8(__mmask32 __A, __m512h __B) {
+ return _mm512_maskz_ipcvt_roundph_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvt_roundps_epi8(__m512 __A) {
+ return _mm512_ipcvt_roundps_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvt_roundps_epi8(__m512i __S, __mmask16 __A, __m512 __B) {
+ return _mm512_mask_ipcvt_roundps_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvt_roundps_epi8(__mmask16 __A, __m512 __B) {
+ return _mm512_maskz_ipcvt_roundps_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvt_roundps_epu8(__m512 __A) {
+ return _mm512_ipcvt_roundps_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvt_roundps_epu8(__m512i __S, __mmask16 __A, __m512 __B) {
+ return _mm512_mask_ipcvt_roundps_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvt_roundps_epu8(__mmask16 __A, __m512 __B) {
+ return _mm512_maskz_ipcvt_roundps_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvtt_roundph_epi8(__m512h __A) {
+ return _mm512_ipcvtt_roundph_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvtt_roundph_epi8(__m512i __S, __mmask32 __A, __m512h __B) {
+ return _mm512_mask_ipcvtt_roundph_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundph_epi8(__mmask32 __A, __m512h __B) {
+ return _mm512_maskz_ipcvtt_roundph_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvtt_roundph_epu8(__m512h __A) {
+ return _mm512_ipcvtt_roundph_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvtt_roundph_epu8(__m512i __S, __mmask32 __A, __m512h __B) {
+ return _mm512_mask_ipcvtt_roundph_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundph_epu8(__mmask32 __A, __m512h __B) {
+ return _mm512_maskz_ipcvtt_roundph_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvtt_roundps_epi8(__m512 __A) {
+ return _mm512_ipcvtt_roundps_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvtt_roundps_epi8(__m512i __S, __mmask16 __A, __m512 __B) {
+ return _mm512_mask_ipcvtt_roundps_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundps_epi8(__mmask16 __A, __m512 __B) {
+ return _mm512_maskz_ipcvtt_roundps_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_ipcvtt_roundps_epu8(__m512 __A) {
+ return _mm512_ipcvtt_roundps_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_mask_ipcvtt_roundps_epu8(__m512i __S, __mmask16 __A, __m512 __B) {
+ return _mm512_mask_ipcvtt_roundps_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundps_epu8(__mmask16 __A, __m512 __B) {
+ return _mm512_maskz_ipcvtt_roundps_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvt_roundph_epi8(__m256h __A) {
+ return _mm256_ipcvt_roundph_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvt_roundph_epi8(__m256i __S, __mmask16 __A, __m256h __B) {
+ return _mm256_mask_ipcvt_roundph_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvt_roundph_epi8(__mmask16 __A, __m256h __B) {
+ return _mm256_maskz_ipcvt_roundph_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvt_roundph_epu8(__m256h __A) {
+ return _mm256_ipcvt_roundph_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvt_roundph_epu8(__m256i __S, __mmask16 __A, __m256h __B) {
+ return _mm256_mask_ipcvt_roundph_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvt_roundph_epu8(__mmask16 __A, __m256h __B) {
+ return _mm256_maskz_ipcvt_roundph_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvt_roundps_epi8(__m256 __A) {
+ return _mm256_ipcvt_roundps_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvt_roundps_epi8(__m256i __S, __mmask8 __A, __m256 __B) {
+ return _mm256_mask_ipcvt_roundps_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvt_roundps_epi8(__mmask8 __A, __m256 __B) {
+ return _mm256_maskz_ipcvt_roundps_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvt_roundps_epu8(__m256 __A) {
+ return _mm256_ipcvt_roundps_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvt_roundps_epu8(__m256i __S, __mmask8 __A, __m256 __B) {
+ return _mm256_mask_ipcvt_roundps_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvt_roundps_epu8(__mmask8 __A, __m256 __B) {
+ return _mm256_maskz_ipcvt_roundps_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvtt_roundph_epi8(__m256h __A) {
+ return _mm256_ipcvtt_roundph_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvtt_roundph_epi8(__m256i __S, __mmask16 __A, __m256h __B) {
+ return _mm256_mask_ipcvtt_roundph_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundph_epi8(__mmask16 __A, __m256h __B) {
+ return _mm256_maskz_ipcvtt_roundph_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvtt_roundph_epu8(__m256h __A) {
+ return _mm256_ipcvtt_roundph_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvtt_roundph_epu8(__m256i __S, __mmask16 __A, __m256h __B) {
+ return _mm256_mask_ipcvtt_roundph_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundph_epu8(__mmask16 __A, __m256h __B) {
+ return _mm256_maskz_ipcvtt_roundph_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvtt_roundps_epi8(__m256 __A) {
+ return _mm256_ipcvtt_roundps_epi8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvtt_roundps_epi8(__m256i __S, __mmask8 __A, __m256 __B) {
+ return _mm256_mask_ipcvtt_roundps_epi8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundps_epi8(__mmask8 __A, __m256 __B) {
+ return _mm256_maskz_ipcvtt_roundps_epi8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_ipcvtt_roundps_epu8(__m256 __A) {
+ return _mm256_ipcvtt_roundps_epu8(__A, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_mask_ipcvtt_roundps_epu8(__m256i __S, __mmask8 __A, __m256 __B) {
+ return _mm256_mask_ipcvtt_roundps_epu8(__S, __A, __B, 22); // expected-error {{invalid rounding argument}}
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundps_epu8(__mmask8 __A, __m256 __B) {
+ return _mm256_maskz_ipcvtt_roundps_epu8(__A, __B, 22); // expected-error {{invalid rounding argument}}
+}
diff --git a/clang/test/CodeGen/X86/avx10_2_512satcvt-builtins.c b/clang/test/CodeGen/X86/avx10_2_512satcvt-builtins.c
new file mode 100755
index 0000000..bf6acaa
--- /dev/null
+++ b/clang/test/CodeGen/X86/avx10_2_512satcvt-builtins.c
@@ -0,0 +1,379 @@
+// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64 -target-feature +avx10.2-512 \
+// RUN: -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=i386 -target-feature +avx10.2-512 \
+// RUN: -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+__m512i test_mm512_ipcvtnebf16_epi8(__m512bh __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs512
+ return _mm512_ipcvtnebf16_epi8(__A);
+}
+
+__m512i test_mm512_mask_ipcvtnebf16_epi8(__m512i __S, __mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_mask_ipcvtnebf16_epi8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvtnebf16_epi8(__mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtnebf16_epi8
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs512
+ // CHECK: zeroinitializer
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_maskz_ipcvtnebf16_epi8(__A, __B);
+}
+
+__m512i test_mm512_ipcvtnebf16_epu8(__m512bh __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs512
+ return _mm512_ipcvtnebf16_epu8(__A);
+}
+
+__m512i test_mm512_mask_ipcvtnebf16_epu8(__m512i __S, __mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_mask_ipcvtnebf16_epu8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvtnebf16_epu8(__mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtnebf16_epu8
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs512
+ // CHECK: zeroinitializer
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_maskz_ipcvtnebf16_epu8(__A, __B);
+}
+
+__m512i test_mm512_ipcvtph_epi8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs512
+ return _mm512_ipcvtph_epi8(__A);
+}
+
+__m512i test_mm512_mask_ipcvtph_epi8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs512
+ return _mm512_mask_ipcvtph_epi8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvtph_epi8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs512
+ return _mm512_maskz_ipcvtph_epi8(__A, __B);
+}
+
+__m512i test_mm512_ipcvt_roundph_epi8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs512
+ return _mm512_ipcvt_roundph_epi8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvt_roundph_epi8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvt_roundph_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs512
+ return _mm512_mask_ipcvt_roundph_epi8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvt_roundph_epi8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvt_roundph_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs512
+ return _mm512_maskz_ipcvt_roundph_epi8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvtph_epu8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs512
+ return _mm512_ipcvtph_epu8(__A);
+}
+
+__m512i test_mm512_mask_ipcvtph_epu8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs512
+ return _mm512_mask_ipcvtph_epu8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvtph_epu8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs512
+ return _mm512_maskz_ipcvtph_epu8(__A, __B);
+}
+
+__m512i test_mm512_ipcvt_roundph_epu8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs512
+ return _mm512_ipcvt_roundph_epu8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvt_roundph_epu8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvt_roundph_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs512
+ return _mm512_mask_ipcvt_roundph_epu8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvt_roundph_epu8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvt_roundph_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs512
+ return _mm512_maskz_ipcvt_roundph_epu8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvtps_epi8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs512
+ return _mm512_ipcvtps_epi8(__A);
+}
+
+__m512i test_mm512_mask_ipcvtps_epi8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs512
+ return _mm512_mask_ipcvtps_epi8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvtps_epi8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs512
+ return _mm512_maskz_ipcvtps_epi8(__A, __B);
+}
+
+__m512i test_mm512_ipcvt_roundps_epi8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs512
+ return _mm512_ipcvt_roundps_epi8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvt_roundps_epi8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvt_roundps_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs512
+ return _mm512_mask_ipcvt_roundps_epi8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvt_roundps_epi8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvt_roundps_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs512
+ return _mm512_maskz_ipcvt_roundps_epi8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvtps_epu8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs512
+ return _mm512_ipcvtps_epu8(__A);
+}
+
+__m512i test_mm512_mask_ipcvtps_epu8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs512
+ return _mm512_mask_ipcvtps_epu8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvtps_epu8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs512
+ return _mm512_maskz_ipcvtps_epu8(__A, __B);
+}
+
+__m512i test_mm512_ipcvt_roundps_epu8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs512
+ return _mm512_ipcvt_roundps_epu8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvt_roundps_epu8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvt_roundps_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs512
+ return _mm512_mask_ipcvt_roundps_epu8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvt_roundps_epu8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvt_roundps_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs512
+ return _mm512_maskz_ipcvt_roundps_epu8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvttnebf16_epi8(__m512bh __A) {
+ // CHECK-LABEL: @test_mm512_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs512(
+ return _mm512_ipcvttnebf16_epi8(__A);
+}
+
+__m512i test_mm512_mask_ipcvttnebf16_epi8(__m512i __S, __mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs512(
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_mask_ipcvttnebf16_epi8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvttnebf16_epi8(__mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvttnebf16_epi8
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs512(
+ // CHECK: zeroinitializer
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_maskz_ipcvttnebf16_epi8(__A, __B);
+}
+
+__m512i test_mm512_ipcvttnebf16_epu8(__m512bh __A) {
+ // CHECK-LABEL: @test_mm512_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs512(
+ return _mm512_ipcvttnebf16_epu8(__A);
+}
+
+__m512i test_mm512_mask_ipcvttnebf16_epu8(__m512i __S, __mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs512(
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_mask_ipcvttnebf16_epu8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvttnebf16_epu8(__mmask32 __A, __m512bh __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvttnebf16_epu8
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs512(
+ // CHECK: zeroinitializer
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
+ return _mm512_maskz_ipcvttnebf16_epu8(__A, __B);
+}
+
+__m512i test_mm512_ipcvttph_epi8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs512
+ return _mm512_ipcvttph_epi8(__A);
+}
+
+__m512i test_mm512_mask_ipcvttph_epi8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs512
+ return _mm512_mask_ipcvttph_epi8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvttph_epi8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvttph_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs512
+ return _mm512_maskz_ipcvttph_epi8(__A, __B);
+}
+
+__m512i test_mm512_ipcvtt_roundph_epi8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtt_roundph_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs512
+ return _mm512_ipcvtt_roundph_epi8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvtt_roundph_epi8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtt_roundph_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs512
+ return _mm512_mask_ipcvtt_roundph_epi8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundph_epi8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtt_roundph_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs512
+ return _mm512_maskz_ipcvtt_roundph_epi8(__A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvttph_epu8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs512
+ return _mm512_ipcvttph_epu8(__A);
+}
+
+__m512i test_mm512_mask_ipcvttph_epu8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs512
+ return _mm512_mask_ipcvttph_epu8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvttph_epu8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvttph_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs512
+ return _mm512_maskz_ipcvttph_epu8(__A, __B);
+}
+
+__m512i test_mm512_ipcvtt_roundph_epu8(__m512h __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtt_roundph_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs512
+ return _mm512_ipcvtt_roundph_epu8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvtt_roundph_epu8(__m512i __S, __mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtt_roundph_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs512
+ return _mm512_mask_ipcvtt_roundph_epu8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundph_epu8(__mmask32 __A, __m512h __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtt_roundph_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs512
+ return _mm512_maskz_ipcvtt_roundph_epu8(__A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvttps_epi8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs512
+ return _mm512_ipcvttps_epi8(__A);
+}
+
+__m512i test_mm512_mask_ipcvttps_epi8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs512
+ return _mm512_mask_ipcvttps_epi8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvttps_epi8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvttps_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs512
+ return _mm512_maskz_ipcvttps_epi8(__A, __B);
+}
+
+__m512i test_mm512_ipcvtt_roundps_epi8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtt_roundps_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs512
+ return _mm512_ipcvtt_roundps_epi8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvtt_roundps_epi8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtt_roundps_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs512
+ return _mm512_mask_ipcvtt_roundps_epi8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+
+__m512i test_mm512_maskz_ipcvtt_roundps_epi8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtt_roundps_epi8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs512
+ return _mm512_maskz_ipcvtt_roundps_epi8(__A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_ipcvttps_epu8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs512
+ return _mm512_ipcvttps_epu8(__A);
+}
+
+__m512i test_mm512_mask_ipcvttps_epu8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs512
+ return _mm512_mask_ipcvttps_epu8(__S, __A, __B);
+}
+
+__m512i test_mm512_maskz_ipcvttps_epu8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvttps_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs512
+ return _mm512_maskz_ipcvttps_epu8(__A, __B);
+}
+
+__m512i test_mm512_ipcvtt_roundps_epu8(__m512 __A) {
+ // CHECK-LABEL: @test_mm512_ipcvtt_roundps_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs512
+ return _mm512_ipcvtt_roundps_epu8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_mask_ipcvtt_roundps_epu8(__m512i __S, __mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_mask_ipcvtt_roundps_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs512
+ return _mm512_mask_ipcvtt_roundps_epu8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m512i test_mm512_maskz_ipcvtt_roundps_epu8(__mmask16 __A, __m512 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_ipcvtt_roundps_epu8
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs512
+ return _mm512_maskz_ipcvtt_roundps_epu8(__A, __B, _MM_FROUND_NO_EXC);
+}
diff --git a/clang/test/CodeGen/X86/avx10_2satcvt-builtins.c b/clang/test/CodeGen/X86/avx10_2satcvt-builtins.c
new file mode 100644
index 0000000..de9fbd4
--- /dev/null
+++ b/clang/test/CodeGen/X86/avx10_2satcvt-builtins.c
@@ -0,0 +1,603 @@
+// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64 -target-feature +avx10.2-256 \
+// RUN: -Wno-invalid-feature-combination -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=i386 -target-feature +avx10.2-256 \
+// RUN: -Wno-invalid-feature-combination -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+__m128i test_mm_ipcvtnebf16_epi8(__m128bh __A) {
+ // CHECK-LABEL: @test_mm_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs128
+ return _mm_ipcvtnebf16_epi8(__A);
+}
+
+__m128i test_mm_mask_ipcvtnebf16_epi8(__m128i __S, __mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_mask_ipcvtnebf16_epi8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvtnebf16_epi8(__mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs128
+ // CHECK: zeroinitializer
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_maskz_ipcvtnebf16_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtnebf16_epi8(__m256bh __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs256
+ return _mm256_ipcvtnebf16_epi8(__A);
+}
+
+__m256i test_mm256_mask_ipcvtnebf16_epi8(__m256i __S, __mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_mask_ipcvtnebf16_epi8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvtnebf16_epi8(__mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162ibs256
+ // CHECK: zeroinitializer
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_maskz_ipcvtnebf16_epi8(__A, __B);
+}
+
+__m128i test_mm_ipcvtnebf16_epu8(__m128bh __A) {
+ // CHECK-LABEL: @test_mm_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs128
+ return _mm_ipcvtnebf16_epu8(__A);
+}
+
+__m128i test_mm_mask_ipcvtnebf16_epu8(__m128i __S, __mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_mask_ipcvtnebf16_epu8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvtnebf16_epu8(__mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_maskz_ipcvtnebf16_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtnebf16_epu8(__m256bh __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs256
+ return _mm256_ipcvtnebf16_epu8(__A);
+}
+
+__m256i test_mm256_mask_ipcvtnebf16_epu8(__m256i __S, __mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_mask_ipcvtnebf16_epu8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvtnebf16_epu8(__mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvtnebf162iubs256
+ // CHECK: zeroinitializer
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_maskz_ipcvtnebf16_epu8(__A, __B);
+}
+
+__m128i test_mm_ipcvtph_epi8(__m128h __A) {
+ // CHECK-LABEL: @test_mm_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs128
+ return _mm_ipcvtph_epi8(__A);
+}
+
+__m128i test_mm_mask_ipcvtph_epi8(__m128i __S, __mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs128
+ return _mm_mask_ipcvtph_epi8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvtph_epi8(__mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs128
+ return _mm_maskz_ipcvtph_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtph_epi8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs256
+ return _mm256_ipcvtph_epi8(__A);
+}
+
+__m256i test_mm256_mask_ipcvtph_epi8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs256
+ return _mm256_mask_ipcvtph_epi8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvtph_epi8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs256
+ return _mm256_maskz_ipcvtph_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvt_roundph_epi8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs256
+ return _mm256_ipcvt_roundph_epi8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvt_roundph_epi8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs256
+ return _mm256_mask_ipcvt_roundph_epi8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+__m256i test_mm256_maskz_ipcvt_roundph_epi8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2ibs256
+ return _mm256_maskz_ipcvt_roundph_epi8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvtph_epu8(__m128h __A) {
+ // CHECK-LABEL: @test_mm_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs128
+ return _mm_ipcvtph_epu8(__A);
+}
+
+__m128i test_mm_mask_ipcvtph_epu8(__m128i __S, __mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs128
+ return _mm_mask_ipcvtph_epu8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvtph_epu8(__mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs128
+ return _mm_maskz_ipcvtph_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtph_epu8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs256
+ return _mm256_ipcvtph_epu8(__A);
+}
+
+__m256i test_mm256_mask_ipcvtph_epu8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs256
+ return _mm256_mask_ipcvtph_epu8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvtph_epu8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs256
+ return _mm256_maskz_ipcvtph_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvt_roundph_epu8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs256
+ return _mm256_ipcvt_roundph_epu8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvt_roundph_epu8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs256
+ return _mm256_mask_ipcvt_roundph_epu8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+__m256i test_mm256_maskz_ipcvt_roundph_epu8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtph2iubs256
+ return _mm256_maskz_ipcvt_roundph_epu8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvtps_epi8(__m128 __A) {
+ // CHECK-LABEL: @test_mm_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs128
+ return _mm_ipcvtps_epi8(__A);
+}
+
+__m128i test_mm_mask_ipcvtps_epi8(__m128i __S, __mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs128
+ return _mm_mask_ipcvtps_epi8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvtps_epi8(__mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs128
+ return _mm_maskz_ipcvtps_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtps_epi8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs256
+ return _mm256_ipcvtps_epi8(__A);
+}
+
+__m256i test_mm256_mask_ipcvtps_epi8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs256
+ return _mm256_mask_ipcvtps_epi8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvtps_epi8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs256
+ return _mm256_maskz_ipcvtps_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvt_roundps_epi8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs256
+ return _mm256_ipcvt_roundps_epi8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvt_roundps_epi8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs256
+ return _mm256_mask_ipcvt_roundps_epi8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_maskz_ipcvt_roundps_epi8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2ibs256
+ return _mm256_maskz_ipcvt_roundps_epi8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvtps_epu8(__m128 __A) {
+ // CHECK-LABEL: @test_mm_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs128
+ return _mm_ipcvtps_epu8(__A);
+}
+
+__m128i test_mm_mask_ipcvtps_epu8(__m128i __S, __mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs128
+ return _mm_mask_ipcvtps_epu8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvtps_epu8(__mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs128
+ return _mm_maskz_ipcvtps_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtps_epu8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs256
+ return _mm256_ipcvtps_epu8(__A);
+}
+
+__m256i test_mm256_mask_ipcvtps_epu8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs256
+ return _mm256_mask_ipcvtps_epu8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvtps_epu8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs256
+ return _mm256_maskz_ipcvtps_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvt_roundps_epu8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs256
+ return _mm256_ipcvt_roundps_epu8(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvt_roundps_epu8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs256
+ return _mm256_mask_ipcvt_roundps_epu8(__S, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_maskz_ipcvt_roundps_epu8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvtps2iubs256
+ return _mm256_maskz_ipcvt_roundps_epu8(__A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvttnebf16_epi8(__m128bh __A) {
+ // CHECK-LABEL: @test_mm_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs128
+ return _mm_ipcvttnebf16_epi8(__A);
+}
+
+__m128i test_mm_mask_ipcvttnebf16_epi8(__m128i __S, __mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_mask_ipcvttnebf16_epi8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvttnebf16_epi8(__mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs128
+ // CHECK: zeroinitializer
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_maskz_ipcvttnebf16_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvttnebf16_epi8(__m256bh __A) {
+ // CHECK-LABEL: @test_mm256_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs256
+ return _mm256_ipcvttnebf16_epi8(__A);
+}
+
+__m256i test_mm256_mask_ipcvttnebf16_epi8(__m256i __S, __mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_mask_ipcvttnebf16_epi8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvttnebf16_epi8(__mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvttnebf16_epi8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162ibs256
+ // CHECK: zeroinitializer
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_maskz_ipcvttnebf16_epi8(__A, __B);
+}
+
+__m128i test_mm_ipcvttnebf16_epu8(__m128bh __A) {
+ // CHECK-LABEL: @test_mm_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs128
+ return _mm_ipcvttnebf16_epu8(__A);
+}
+
+__m128i test_mm_mask_ipcvttnebf16_epu8(__m128i __S, __mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_mask_ipcvttnebf16_epu8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvttnebf16_epu8(__mmask8 __A, __m128bh __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs128
+ // CHECK: zeroinitializer
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ return _mm_maskz_ipcvttnebf16_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvttnebf16_epu8(__m256bh __A) {
+ // CHECK-LABEL: @test_mm256_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs256
+ return _mm256_ipcvttnebf16_epu8(__A);
+}
+
+__m256i test_mm256_mask_ipcvttnebf16_epu8(__m256i __S, __mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_mask_ipcvttnebf16_epu8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvttnebf16_epu8(__mmask16 __A, __m256bh __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvttnebf16_epu8(
+ // CHECK: @llvm.x86.avx10.vcvttnebf162iubs256
+ // CHECK: zeroinitializer
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ return _mm256_maskz_ipcvttnebf16_epu8(__A, __B);
+}
+
+__m128i test_mm_ipcvttph_epi8(__m128h __A) {
+ // CHECK-LABEL: @test_mm_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs128
+ return _mm_ipcvttph_epi8(__A);
+}
+
+__m128i test_mm_mask_ipcvttph_epi8(__m128i __S, __mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs128
+ return _mm_mask_ipcvttph_epi8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvttph_epi8(__mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs128
+ return _mm_maskz_ipcvttph_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvttph_epi8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs256
+ return _mm256_ipcvttph_epi8(__A);
+}
+
+__m256i test_mm256_mask_ipcvttph_epi8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs256
+ return _mm256_mask_ipcvttph_epi8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvttph_epi8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvttph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs256
+ return _mm256_maskz_ipcvttph_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtt_roundph_epi8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs256
+ return _mm256_ipcvtt_roundph_epi8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvtt_roundph_epi8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs256
+ return _mm256_mask_ipcvtt_roundph_epi8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundph_epi8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtt_roundph_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2ibs256
+ return _mm256_maskz_ipcvtt_roundph_epi8(__A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvttph_epu8(__m128h __A) {
+ // CHECK-LABEL: @test_mm_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs128
+ return _mm_ipcvttph_epu8(__A);
+}
+
+__m128i test_mm_mask_ipcvttph_epu8(__m128i __S, __mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs128
+ return _mm_mask_ipcvttph_epu8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvttph_epu8(__mmask8 __A, __m128h __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs128
+ return _mm_maskz_ipcvttph_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvttph_epu8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs256
+ return _mm256_ipcvttph_epu8(__A);
+}
+
+__m256i test_mm256_mask_ipcvttph_epu8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs256
+ return _mm256_mask_ipcvttph_epu8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvttph_epu8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvttph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs256
+ return _mm256_maskz_ipcvttph_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtt_roundph_epu8(__m256h __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs256
+ return _mm256_ipcvtt_roundph_epu8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvtt_roundph_epu8(__m256i __S, __mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs256
+ return _mm256_mask_ipcvtt_roundph_epu8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundph_epu8(__mmask16 __A, __m256h __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtt_roundph_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttph2iubs256
+ return _mm256_maskz_ipcvtt_roundph_epu8(__A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvttps_epi8(__m128 __A) {
+ // CHECK-LABEL: @test_mm_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs128
+ return _mm_ipcvttps_epi8(__A);
+}
+
+__m128i test_mm_mask_ipcvttps_epi8(__m128i __S, __mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs128
+ return _mm_mask_ipcvttps_epi8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvttps_epi8(__mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs128
+ return _mm_maskz_ipcvttps_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvttps_epi8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs256
+ return _mm256_ipcvttps_epi8(__A);
+}
+
+__m256i test_mm256_mask_ipcvttps_epi8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs256
+ return _mm256_mask_ipcvttps_epi8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvttps_epi8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvttps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs256
+ return _mm256_maskz_ipcvttps_epi8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtt_roundps_epi8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs256
+ return _mm256_ipcvtt_roundps_epi8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvtt_roundps_epi8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs256
+ return _mm256_mask_ipcvtt_roundps_epi8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundps_epi8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtt_roundps_epi8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2ibs256
+ return _mm256_maskz_ipcvtt_roundps_epi8(__A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m128i test_mm_ipcvttps_epu8(__m128 __A) {
+ // CHECK-LABEL: @test_mm_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs128
+ return _mm_ipcvttps_epu8(__A);
+}
+
+__m128i test_mm_mask_ipcvttps_epu8(__m128i __S, __mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_mask_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs128
+ return _mm_mask_ipcvttps_epu8(__S, __A, __B);
+}
+
+__m128i test_mm_maskz_ipcvttps_epu8(__mmask8 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm_maskz_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs128
+ return _mm_maskz_ipcvttps_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvttps_epu8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs256
+ return _mm256_ipcvttps_epu8(__A);
+}
+
+__m256i test_mm256_mask_ipcvttps_epu8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs256
+ return _mm256_mask_ipcvttps_epu8(__S, __A, __B);
+}
+
+__m256i test_mm256_maskz_ipcvttps_epu8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvttps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs256
+ return _mm256_maskz_ipcvttps_epu8(__A, __B);
+}
+
+__m256i test_mm256_ipcvtt_roundps_epu8(__m256 __A) {
+ // CHECK-LABEL: @test_mm256_ipcvtt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs256
+ return _mm256_ipcvtt_roundps_epu8(__A, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_mask_ipcvtt_roundps_epu8(__m256i __S, __mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_mask_ipcvtt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs256
+ return _mm256_mask_ipcvtt_roundps_epu8(__S, __A, __B, _MM_FROUND_NO_EXC);
+}
+
+__m256i test_mm256_maskz_ipcvtt_roundps_epu8(__mmask8 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm256_maskz_ipcvtt_roundps_epu8(
+ // CHECK: @llvm.x86.avx10.mask.vcvttps2iubs256
+ return _mm256_maskz_ipcvtt_roundps_epu8(__A, __B, _MM_FROUND_NO_EXC);
+}
diff --git a/clang/test/CodeGen/aarch64-elf-pauthabi.c b/clang/test/CodeGen/aarch64-elf-pauthabi.c
index aa83ee3..d60dcee 100644
--- a/clang/test/CodeGen/aarch64-elf-pauthabi.c
+++ b/clang/test/CodeGen/aarch64-elf-pauthabi.c
@@ -5,7 +5,8 @@
// RUN: -fptrauth-auth-traps \
// RUN: -fptrauth-vtable-pointer-address-discrimination \
// RUN: -fptrauth-vtable-pointer-type-discrimination \
-// RUN: -fptrauth-init-fini %s | \
+// RUN: -fptrauth-init-fini %s \
+// RUN: -fptrauth-init-fini-address-discrimination %s | \
// RUN: FileCheck %s --check-prefix=ALL
// RUN: %clang_cc1 -triple aarch64-linux -emit-llvm -o - \
@@ -32,8 +33,12 @@
// RUN: -fptrauth-calls -fptrauth-init-fini %s | \
// RUN: FileCheck %s --check-prefix=INITFINI
+// RUN: %clang_cc1 -triple aarch64-linux -emit-llvm -o - \
+// RUN: -fptrauth-calls -fptrauth-init-fini -fptrauth-init-fini-address-discrimination %s | \
+// RUN: FileCheck %s --check-prefix=INITFINIADDR
+
// ALL: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
-// ALL: !{i32 1, !"aarch64-elf-pauthabi-version", i32 127}
+// ALL: !{i32 1, !"aarch64-elf-pauthabi-version", i32 255}
// INTRIN: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
// INTRIN: !{i32 1, !"aarch64-elf-pauthabi-version", i32 1}
@@ -56,4 +61,7 @@
// INITFINI: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
// INITFINI: !{i32 1, !"aarch64-elf-pauthabi-version", i32 66}
+// INITFINIADDR: !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+// INITFINIADDR: !{i32 1, !"aarch64-elf-pauthabi-version", i32 194}
+
void foo() {}
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_clamp.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_clamp.c
index 30d963d..972a658 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_clamp.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_clamp.c
@@ -1,14 +1,14 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 \
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 \
// RUN: -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 \
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 \
// RUN: -Werror -emit-llvm -disable-O0-optnone -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 \
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 \
// RUN: -Werror -emit-llvm -disable-O0-optnone -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 \
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 \
// RUN: -Werror -emit-llvm -disable-O0-optnone -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 \
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 \
// RUN: -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sme.h>
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_max.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_max.c
index cc084f7..bd8d57e 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_max.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_max.c
@@ -1,9 +1,9 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
// REQUIRES: aarch64-registered-target
#include <arm_sme.h>
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_maxnm.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_maxnm.c
index f48c885..0765993 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_maxnm.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_maxnm.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sme.h>
#ifdef SVE_OVERLOADED_FORMS
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_min.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_min.c
index df93860..fe7b74c 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_min.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_min.c
@@ -1,9 +1,9 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
// REQUIRES: aarch64-registered-target
#include <arm_sme.h>
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_minnm.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_minnm.c
index 65d440d..3b221c0 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_minnm.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_minnm.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sme.h>
#ifdef SVE_OVERLOADED_FORMS
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfadd.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfadd.c
index 452b8fc..0f3b92f 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfadd.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfadd.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfclamp.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfclamp.c
index 57f025f..0955994 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfclamp.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfclamp.c
@@ -1,10 +1,10 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmax.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmax.c
index 2518ace..cc3207a 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmax.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmax.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmaxnm.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmaxnm.c
index ccfc638..7983943 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmaxnm.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmaxnm.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmin.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmin.c
index ebf5c24..97159f1 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmin.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmin.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfminnm.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfminnm.c
index d5869fc..4cadbdc 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfminnm.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfminnm.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla.c
index bd0dcf1..720853f 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla_lane.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla_lane.c
index d4cfceb5..8d13d83 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla_lane.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmla_lane.c
@@ -1,10 +1,10 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls.c
index 82d8134..98c4dc9 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls_lane.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls_lane.c
index 72ace1b..4adc388 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls_lane.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmls_lane.c
@@ -1,10 +1,10 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul.c
index 655bd90..e9443e3 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul_lane.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul_lane.c
index 2032881..adcd4b8 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul_lane.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfmul_lane.c
@@ -1,10 +1,10 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +b16b16 -target-feature +sve -target-feature -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2 -target-feature +sve2p1 -target-feature +sve-b16b16 -target-feature +sve -target-feature -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfsub.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfsub.c
index 9eae736..a58f004 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfsub.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_bfsub.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve-b16b16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
diff --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
index edda7b7..8608c5b 100644
--- a/clang/test/CodeGen/bitfield-access-pad.c
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -16,7 +16,6 @@
// Configs that have expensive unaligned access
// Little Endian
// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
-// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
// Big endian
// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
diff --git a/clang/test/CodeGen/bitfield-access-unit.c b/clang/test/CodeGen/bitfield-access-unit.c
index d0553c5..c1b0a43 100644
--- a/clang/test/CodeGen/bitfield-access-unit.c
+++ b/clang/test/CodeGen/bitfield-access-unit.c
@@ -53,8 +53,8 @@
// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
-// Both le64-elf and m68-elf are strict alignment ISAs with 4-byte aligned
-// 64-bit or 2-byte aligned 32-bit integer types. This more compex to describe here.
+// m68-elf is a strict alignment ISA with 4-byte aligned 64-bit or 2-byte
+// aligned 32-bit integer types. This more compex to describe here.
// If unaligned access is expensive don't stick these together.
struct A {
diff --git a/clang/test/CodeGen/ptrauth-init-fini.c b/clang/test/CodeGen/ptrauth-init-fini.c
new file mode 100644
index 0000000..d51ed1d
--- /dev/null
+++ b/clang/test/CodeGen/ptrauth-init-fini.c
@@ -0,0 +1,39 @@
+// REQUIRES: aarch64-registered-target
+
+// RUN: %clang -target aarch64-elf -march=armv8.3-a+pauth -fptrauth-calls -fptrauth-init-fini \
+// RUN: -S -emit-llvm %s -o - | FileCheck --check-prefix=SIGNED %s
+
+// RUN: %clang -target aarch64-elf -march=armv8.3-a+pauth -fptrauth-calls -fptrauth-init-fini \
+// RUN: -fptrauth-init-fini-address-discrimination -S -emit-llvm %s -o - | FileCheck --check-prefix=ADDRDISC %s
+
+// RUN: %clang -target aarch64-elf -march=armv8.3-a+pauth -fptrauth-calls -fno-ptrauth-init-fini \
+// RUN: -S -emit-llvm %s -o - | FileCheck --check-prefix=UNSIGNED %s
+
+// RUN: %clang -target aarch64-elf -march=armv8.3-a+pauth -fptrauth-calls -fptrauth-init-fini-address-discrimination \
+// RUN: -S -emit-llvm %s -o - | FileCheck --check-prefix=UNSIGNED %s
+
+// RUN: %clang -target aarch64-elf -march=armv8.3-a+pauth -fptrauth-init-fini \
+// RUN: -S -emit-llvm %s -o - | FileCheck --check-prefix=UNSIGNED %s
+
+// SIGNED: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @foo, i32 0, i64 55764), ptr null }]
+// SIGNED: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @bar, i32 0, i64 55764), ptr null }]
+
+// ADDRDISC: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @foo, i32 0, i64 55764, ptr inttoptr (i64 1 to ptr)), ptr null }]
+// ADDRDISC: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @bar, i32 0, i64 55764, ptr inttoptr (i64 1 to ptr)), ptr null }]
+
+// UNSIGNED: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @foo, ptr null }]
+// UNSIGNED: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @bar, ptr null }]
+
+volatile int x = 0;
+
+__attribute__((constructor)) void foo(void) {
+ x = 42;
+}
+
+__attribute__((destructor)) void bar(void) {
+ x = 24;
+}
+
+int main() {
+ return x;
+}
diff --git a/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp b/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp
index 31eaf3f..75fd72a 100644
--- a/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp
+++ b/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp
@@ -11,7 +11,7 @@
///
/// Now vtables should have just two components.
// POINTER: @_ZTV1A = unnamed_addr constant { [2 x ptr] } { [2 x ptr] [ptr null, ptr @_ZN1A3fooEv] }, align 8
-// RELATIVE: @_ZTV1A.local = private unnamed_addr constant { [2 x i32] } { [2 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [2 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 1) to i64)) to i32)] }, align 4
+// RELATIVE: @_ZTV1A.local = internal unnamed_addr constant { [2 x i32] } { [2 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [2 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 1) to i64)) to i32)] }, align 4
// RELATIVE: @_ZTV1A = unnamed_addr alias { [2 x i32] }, ptr @_ZTV1A.local
/// None of these supplementary symbols should be emitted with -fno-rtti, but
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp
index 62b09c8..bb86d45 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp
@@ -13,7 +13,7 @@
// CHECK: $_ZTI1A.rtti_proxy = comdat any
// The VTable for B is emitted here since it has a key function which is defined in this module
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// The VTable for A is emitted here and in a comdat section since it has no key function, and is used in this module when creating an instance of A (in func()).
// CHECK: @_ZTV1A.local = linkonce_odr hidden unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, comdat($_ZTV1A), align 4
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-1.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-1.cpp
index 660897f..89a94d4 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-1.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-1.cpp
@@ -7,7 +7,7 @@
// CHECK: $_ZTI1A.rtti_proxy = comdat any
-// CHECK: @_ZTV1A.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1A.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// @_ZTV1A ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1A.local
void A::foo() {}
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-2.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-2.cpp
index 0c3cf94..af925c5 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-2.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/cross-translation-unit-2.cpp
@@ -7,7 +7,7 @@
// CHECK: $_ZTI1B.rtti_proxy = comdat any
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1B ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1B.local
// A::bar() is defined outside of the module that defines the vtable for A
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-inheritance.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-inheritance.cpp
index 96bbd19..4c940aa 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-inheritance.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-inheritance.cpp
@@ -4,16 +4,16 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -O1 -o - -emit-llvm -fhalf-no-semantic-interposition | FileCheck %s
// VTable for B should contain offset to top (0), RTTI pointer, A::foo(), and B::barB().
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// VTable for C should contain offset to top (0), RTTI pointer, A::foo(), and C::barC().
-// CHECK: @_ZTV1C.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1C.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// VTable for D should be similar to the mutiple inheritance example where this
// vtable contains 2 inner vtables:
// - 1st table containing D::foo(), B::barB(), and D::baz().
// - 2nd table containing a thunk to D::foo() and C::barC().
-// CHECK: @_ZTV1D.local = private unnamed_addr constant { [5 x i32], [4 x i32] } { [5 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1D3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1D3bazEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32)], [4 x i32] [i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZThn8_N1D3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1D.local = internal unnamed_addr constant { [5 x i32], [4 x i32] } { [5 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1D3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1D3bazEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 2) to i64)) to i32)], [4 x i32] [i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZThn8_N1D3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 2) to i64)) to i32)] }, align 4
// @_ZTV1B ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1B.local
// @_ZTV1C ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1C.local
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-virtual-inheritance.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-virtual-inheritance.cpp
index 75a3e21..2502c18 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-virtual-inheritance.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/diamond-virtual-inheritance.cpp
@@ -6,28 +6,28 @@
// Class A contains a vtable ptr, then int, then padding
// VTable for B. Contains an extra field at the start for the virtual-base offset.
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 8, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 8, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
// VTT for B
// CHECK: @_ZTT1B ={{.*}} unnamed_addr constant [2 x ptr] [ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 1, i32 3)], align 8
// VTable for C
-// CHECK: @_ZTV1C.local = private unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 8, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1C.local = internal unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 8, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
// VTT for C
// CHECK: @_ZTT1C ={{.*}} unnamed_addr constant [2 x ptr] [ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 3)], align 8
// VTable for D
-// CHECK: @_ZTV1D.local = private unnamed_addr constant { [5 x i32], [4 x i32], [4 x i32] } { [5 x i32] [i32 16, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1D3bazEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 8, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -16, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 2, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 2, i32 3) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1D.local = internal unnamed_addr constant { [5 x i32], [4 x i32], [4 x i32] } { [5 x i32] [i32 16, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1D3bazEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 8, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -16, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1D.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 2, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 2, i32 3) to i64)) to i32)] }, align 4
// VTT for D
// CHECK: @_ZTT1D ={{.*}} unnamed_addr constant [7 x ptr] [ptr getelementptr inbounds inrange(-12, 8) ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 0, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 0, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 1, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 0, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 1, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 2, i32 3), ptr getelementptr inbounds inrange(-12, 4) ({ [5 x i32], [4 x i32], [4 x i32] }, ptr @_ZTV1D.local, i32 0, i32 1, i32 3)], align 8
// Construction vtable for B-in-D
-// CHECK: @_ZTC1D0_1B.local = private unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 16, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -16, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
+// CHECK: @_ZTC1D0_1B.local = internal unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 16, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B4barBEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -16, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D0_1B.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
// Construction vtable for C-in-D
-// CHECK: @_ZTC1D8_1C.local = private unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 8, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
+// CHECK: @_ZTC1D8_1C.local = internal unnamed_addr constant { [4 x i32], [4 x i32] } { [4 x i32] [i32 8, i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 0, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C4barCEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 0, i32 3) to i64)) to i32)], [4 x i32] [i32 0, i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 1, i32 3) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [4 x i32] }, ptr @_ZTC1D8_1C.local, i32 0, i32 1, i32 3) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1B ={{.*}} unnamed_addr alias { [4 x i32], [4 x i32] }, ptr @_ZTV1B.local
// CHECK: @_ZTV1C ={{.*}} unnamed_addr alias { [4 x i32], [4 x i32] }, ptr @_ZTV1C.local
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/inheritted-virtual-function.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/inheritted-virtual-function.cpp
index 4cd657c..21e22c3 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/inheritted-virtual-function.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/inheritted-virtual-function.cpp
@@ -9,7 +9,7 @@ public:
};
// The VTable for B should look similar to the vtable for A but the component for foo() should point to A::foo() and the component for bar() should point to B::bar().
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1B ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1B.local
class B : public A {
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/inline-virtual-function.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/inline-virtual-function.cpp
index 332ba8d..6a9e0c8 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/inline-virtual-function.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/inline-virtual-function.cpp
@@ -7,7 +7,7 @@
// CHECK: $_ZTI1A.rtti_proxy = comdat any
// The vtable has a key function (A::foo()) so it does not have a comdat
-// CHECK: @_ZTV1A.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1A.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1A ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1A.local
class A {
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/multiple-inheritance.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/multiple-inheritance.cpp
index 0b90000..71f196b 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/multiple-inheritance.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/multiple-inheritance.cpp
@@ -5,7 +5,7 @@
// VTable for C contains 2 sub-vtables (represented as 2 structs). The first contains the components for B and the second contains the components for C. The RTTI ptr in both arrays still point to the RTTI struct for C.
// The component for bar() instead points to a thunk which redirects to C::bar() which overrides B::bar().
// Now that we have a class with 2 parents, the offset to top in the second array is non-zero.
-// CHECK: @_ZTV1C.local = private unnamed_addr constant { [4 x i32], [3 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32)], [3 x i32] [i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZThn8_N1C3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1C.local = internal unnamed_addr constant { [4 x i32], [3 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1C3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 0, i32 2) to i64)) to i32)], [3 x i32] [i32 -8, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1C.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZThn8_N1C3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32], [3 x i32] }, ptr @_ZTV1C.local, i32 0, i32 1, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1C ={{.*}} unnamed_addr alias { [4 x i32], [3 x i32] }, ptr @_ZTV1C.local
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/no-alias-when-dso-local.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/no-alias-when-dso-local.cpp
index 7213513..0fcb0a4 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/no-alias-when-dso-local.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/no-alias-when-dso-local.cpp
@@ -4,7 +4,7 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -o - -emit-llvm -fhalf-no-semantic-interposition | FileCheck %s --check-prefix=DEFAULT-VIS
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -o - -emit-llvm -fvisibility=hidden | FileCheck %s --check-prefix=HIDDEN-VIS
-// DEFAULT-VIS: @_ZTV1A.local = private unnamed_addr constant
+// DEFAULT-VIS: @_ZTV1A.local = internal unnamed_addr constant
// DEFAULT-VIS: @_ZTV1A ={{.*}} unnamed_addr alias { [3 x i32] }, ptr @_ZTV1A.local
// HIDDEN-VIS-NOT: @_ZTV1A.local
// HIDDEN-VIS: @_ZTV1A = hidden unnamed_addr constant
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/override-pure-virtual-method.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/override-pure-virtual-method.cpp
index 6fed01e..7009153 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/override-pure-virtual-method.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/override-pure-virtual-method.cpp
@@ -4,9 +4,9 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -O1 -o - -emit-llvm -fhalf-no-semantic-interposition | FileCheck %s
-// CHECK: @_ZTV1A.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1A.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1A ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1A.local
// CHECK: @_ZTV1B ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1B.local
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/overriden-virtual-function.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/overriden-virtual-function.cpp
index e53135a..dfa68ee 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/overriden-virtual-function.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/overriden-virtual-function.cpp
@@ -3,7 +3,7 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -O1 -o - -emit-llvm -fhalf-no-semantic-interposition | FileCheck %s
-// CHECK: @_ZTV1B.local = private unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1B.local = internal unnamed_addr constant { [4 x i32] } { [4 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1B.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1B3barEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [4 x i32] }, ptr @_ZTV1B.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTV1B ={{.*}} unnamed_addr alias { [4 x i32] }, ptr @_ZTV1B.local
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-flag.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-flag.cpp
index a7054cb..742879d 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-flag.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-flag.cpp
@@ -7,7 +7,7 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -o - -emit-llvm -fno-experimental-relative-c++-abi-vtables | FileCheck --check-prefix=DEFAULT-ABI %s
// VTable contains offsets and references to the hidden symbols
-// RELATIVE-ABI: @_ZTV1A.local = private unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// RELATIVE-ABI: @_ZTV1A.local = internal unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// RELATIVE-ABI: @_ZTV1A ={{.*}} unnamed_addr alias { [3 x i32] }, ptr @_ZTV1A.local
// DEFAULT-ABI: @_ZTV1A ={{.*}} unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A3fooEv] }, align 8
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-hwasan.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-hwasan.cpp
index 6b459a1..a872741 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-hwasan.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/relative-vtables-hwasan.cpp
@@ -4,7 +4,7 @@
/// vtable is hidden and the alias is made public. With hwasan enabled, we want
/// to ensure the local one self-referenced in the hidden vtable is not
/// hwasan-instrumented.
-// CHECK-DAG: @_ZTV1A.local = private unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, no_sanitize_hwaddress, align 4
+// CHECK-DAG: @_ZTV1A.local = internal unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, no_sanitize_hwaddress, align 4
// CHECK-DAG: @_ZTV1A = unnamed_addr alias { [3 x i32] }, ptr @_ZTV1A.local
// CHECK-DAG: @_ZTI1A.rtti_proxy = linkonce_odr hidden unnamed_addr constant ptr @_ZTI1A, no_sanitize_hwaddress, comdat
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp
index 0e88015..ad8018e 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp
@@ -8,7 +8,7 @@
// VTable contains offsets and references to the hidden symbols
// The vtable definition itself is private so we can take relative references to
// it. The vtable symbol will be exposed through a public alias.
-// CHECK: @_ZTV1A.local = private unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTV1A.local = internal unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
// CHECK: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, align 8
diff --git a/clang/test/CodeGenCXX/bitfield-access-empty.cpp b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
index 4922ed1..460fe6e 100644
--- a/clang/test/CodeGenCXX/bitfield-access-empty.cpp
+++ b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
@@ -26,7 +26,6 @@
// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
-// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
diff --git a/clang/test/CodeGenCXX/bitfield-access-tail.cpp b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
index 1539e17..fb961f3 100644
--- a/clang/test/CodeGenCXX/bitfield-access-tail.cpp
+++ b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
@@ -26,7 +26,6 @@
// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT64 %s
// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT32 %s
// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT32 %s
-// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT64 %s
// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT32 %s
// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT32 %s
// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT32 %s
diff --git a/clang/test/CodeGenCXX/temporaries.cpp b/clang/test/CodeGenCXX/temporaries.cpp
index 9f697bd..0990c80 100644
--- a/clang/test/CodeGenCXX/temporaries.cpp
+++ b/clang/test/CodeGenCXX/temporaries.cpp
@@ -64,6 +64,26 @@ namespace RefTempSubobject {
constexpr const SelfReferential &sr = SelfReferential();
}
+namespace Vector {
+ typedef __attribute__((vector_size(16))) int vi4a;
+ typedef __attribute__((ext_vector_type(4))) int vi4b;
+ struct S {
+ vi4a v;
+ vi4b w;
+ };
+
+ int &&r = S().v[1];
+ // CHECK: @_ZGRN6Vector1rE_ = internal global i32 0, align 4
+ // CHECK: @_ZN6Vector1rE = constant ptr @_ZGRN6Vector1rE_, align 8
+
+ int &&s = S().w[1];
+ // CHECK: @_ZGRN6Vector1sE_ = internal global i32 0, align 4
+ // CHECK: @_ZN6Vector1sE = constant ptr @_ZGRN6Vector1sE_, align 8
+
+ int &&t = S().w.y;
+ // CHECK: @_ZGRN6Vector1tE_ = internal global i32 0, align 4
+ // CHECK: @_ZN6Vector1tE = constant ptr @_ZGRN6Vector1tE_, align 8
+}
struct A {
A();
~A();
@@ -665,27 +685,6 @@ namespace Bitfield {
int &&r = S().a;
}
-namespace Vector {
- typedef __attribute__((vector_size(16))) int vi4a;
- typedef __attribute__((ext_vector_type(4))) int vi4b;
- struct S {
- vi4a v;
- vi4b w;
- };
- // CHECK: alloca
- // CHECK: extractelement
- // CHECK: store i32 {{.*}}, ptr @_ZGRN6Vector1rE_
- // CHECK: store ptr @_ZGRN6Vector1rE_, ptr @_ZN6Vector1rE,
- int &&r = S().v[1];
-
- // CHECK: alloca
- // CHECK: extractelement
- // CHECK: store i32 {{.*}}, ptr @_ZGRN6Vector1sE_
- // CHECK: store ptr @_ZGRN6Vector1sE_, ptr @_ZN6Vector1sE,
- int &&s = S().w[1];
- int &&ss = S().w.y;
-}
-
namespace ImplicitTemporaryCleanup {
struct A { A(int); ~A(); };
void g();
diff --git a/clang/test/Driver/Inputs/DriverKit23.0.sdk/SDKSettings.json b/clang/test/Driver/Inputs/DriverKit23.0.sdk/SDKSettings.json
new file mode 100644
index 0000000..7ba6c24
--- /dev/null
+++ b/clang/test/Driver/Inputs/DriverKit23.0.sdk/SDKSettings.json
@@ -0,0 +1 @@
+{"Version":"23.0", "MaximumDeploymentTarget": "23.0.99"}
diff --git a/clang/test/Driver/Inputs/MacOSX99.0.sdk/SDKSettings.json b/clang/test/Driver/Inputs/MacOSX15.0.sdk/SDKSettings.json
index 77b70e1..77b70e1 100644
--- a/clang/test/Driver/Inputs/MacOSX99.0.sdk/SDKSettings.json
+++ b/clang/test/Driver/Inputs/MacOSX15.0.sdk/SDKSettings.json
diff --git a/clang/test/Driver/aarch64-ptrauth.c b/clang/test/Driver/aarch64-ptrauth.c
index 75190c4..19eaa73 100644
--- a/clang/test/Driver/aarch64-ptrauth.c
+++ b/clang/test/Driver/aarch64-ptrauth.c
@@ -12,10 +12,11 @@
// RUN: -fno-ptrauth-vtable-pointer-address-discrimination -fptrauth-vtable-pointer-address-discrimination \
// RUN: -fno-ptrauth-vtable-pointer-type-discrimination -fptrauth-vtable-pointer-type-discrimination \
// RUN: -fno-ptrauth-type-info-vtable-pointer-discrimination -fptrauth-type-info-vtable-pointer-discrimination \
-// RUN: -fno-ptrauth-init-fini -fptrauth-init-fini \
// RUN: -fno-ptrauth-indirect-gotos -fptrauth-indirect-gotos \
+// RUN: -fno-ptrauth-init-fini -fptrauth-init-fini \
+// RUN: -fno-ptrauth-init-fini-address-discrimination -fptrauth-init-fini-address-discrimination \
// RUN: %s 2>&1 | FileCheck %s --check-prefix=ALL
-// ALL: "-cc1"{{.*}} "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-type-info-vtable-pointer-discrimination" "-fptrauth-init-fini" "-fptrauth-indirect-gotos"
+// ALL: "-cc1"{{.*}} "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-returns" "-fptrauth-auth-traps" "-fptrauth-vtable-pointer-address-discrimination" "-fptrauth-vtable-pointer-type-discrimination" "-fptrauth-type-info-vtable-pointer-discrimination" "-fptrauth-indirect-gotos" "-fptrauth-init-fini" "-fptrauth-init-fini-address-discrimination"
// RUN: %clang -### -c --target=aarch64-linux -mabi=pauthtest %s 2>&1 | FileCheck %s --check-prefix=PAUTHABI1
// RUN: %clang -### -c --target=aarch64-linux-pauthtest %s 2>&1 | FileCheck %s --check-prefix=PAUTHABI1
@@ -36,8 +37,8 @@
// RUN: not %clang -### -c --target=x86_64 -fptrauth-intrinsics -fptrauth-calls -fptrauth-returns -fptrauth-auth-traps \
// RUN: -fptrauth-vtable-pointer-address-discrimination -fptrauth-vtable-pointer-type-discrimination \
-// RUN: -fptrauth-type-info-vtable-pointer-discrimination -fptrauth-indirect-gotos -fptrauth-init-fini %s 2>&1 | \
-// RUN: FileCheck %s --check-prefix=ERR1
+// RUN: -fptrauth-type-info-vtable-pointer-discrimination -fptrauth-indirect-gotos -fptrauth-init-fini \
+// RUN: -fptrauth-init-fini-address-discrimination %s 2>&1 | FileCheck %s --check-prefix=ERR1
// ERR1: error: unsupported option '-fptrauth-intrinsics' for target '{{.*}}'
// ERR1-NEXT: error: unsupported option '-fptrauth-calls' for target '{{.*}}'
// ERR1-NEXT: error: unsupported option '-fptrauth-returns' for target '{{.*}}'
@@ -47,6 +48,7 @@
// ERR1-NEXT: error: unsupported option '-fptrauth-type-info-vtable-pointer-discrimination' for target '{{.*}}'
// ERR1-NEXT: error: unsupported option '-fptrauth-indirect-gotos' for target '{{.*}}'
// ERR1-NEXT: error: unsupported option '-fptrauth-init-fini' for target '{{.*}}'
+// ERR1-NEXT: error: unsupported option '-fptrauth-init-fini-address-discrimination' for target '{{.*}}'
//// Only support PAuth ABI for Linux as for now.
// RUN: not %clang -o /dev/null -c --target=aarch64-unknown -mabi=pauthtest %s 2>&1 | FileCheck %s --check-prefix=ERR2
diff --git a/clang/test/Driver/cl-cxx20-modules.cppm b/clang/test/Driver/cl-cxx20-modules.cppm
new file mode 100644
index 0000000..06df929
--- /dev/null
+++ b/clang/test/Driver/cl-cxx20-modules.cppm
@@ -0,0 +1,8 @@
+// RUN: %clang_cl /std:c++20 --precompile -### -- %s 2>&1 | FileCheck --check-prefix=PRECOMPILE %s
+// PRECOMPILE: -emit-module-interface
+
+// RUN: %clang_cl /std:c++20 --fmodule-file=Foo=Foo.pcm -### -- %s 2>&1 | FileCheck --check-prefix=FMODULEFILE %s
+// FMODULEFILE: -fmodule-file=Foo=Foo.pcm
+
+// RUN: %clang_cl /std:c++20 --fprebuilt-module-path=. -### -- %s 2>&1 | FileCheck --check-prefix=FPREBUILT %s
+// FPREBUILT: -fprebuilt-module-path=.
diff --git a/clang/test/Driver/darwin-builtin-modules.c b/clang/test/Driver/darwin-builtin-modules.c
index 1c56e13..ec51513 100644
--- a/clang/test/Driver/darwin-builtin-modules.c
+++ b/clang/test/Driver/darwin-builtin-modules.c
@@ -6,6 +6,7 @@
// RUN: %clang -isysroot %S/Inputs/iPhoneOS13.0.sdk -target arm64-apple-ios13.0 -### %s 2>&1 | FileCheck %s
// CHECK: -fbuiltin-headers-in-system-modules
-// RUN: %clang -isysroot %S/Inputs/MacOSX99.0.sdk -target x86_64-apple-macos98.0 -### %s 2>&1 | FileCheck --check-prefix=CHECK_FUTURE %s
-// RUN: %clang -isysroot %S/Inputs/MacOSX99.0.sdk -target x86_64-apple-macos99.0 -### %s 2>&1 | FileCheck --check-prefix=CHECK_FUTURE %s
+// RUN: %clang -isysroot %S/Inputs/MacOSX15.0.sdk -target x86_64-apple-macos14.0 -### %s 2>&1 | FileCheck --check-prefix=CHECK_FUTURE %s
+// RUN: %clang -isysroot %S/Inputs/MacOSX15.0.sdk -target x86_64-apple-macos15.0 -### %s 2>&1 | FileCheck --check-prefix=CHECK_FUTURE %s
+// RUN: %clang -isysroot %S/Inputs/DriverKit23.0.sdk -target arm64-apple-driverkit23.0 -### %s 2>&1 | FileCheck --check-prefix=CHECK_FUTURE %s
// CHECK_FUTURE-NOT: -fbuiltin-headers-in-system-modules
diff --git a/clang/test/Driver/fsanitize.c b/clang/test/Driver/fsanitize.c
index db14f6e..678fa43 100644
--- a/clang/test/Driver/fsanitize.c
+++ b/clang/test/Driver/fsanitize.c
@@ -611,6 +611,8 @@
// RUN: %clang --target=arm-linux-gnu -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
// RUN: %clang --target=aarch64-linux-gnu -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
// RUN: %clang --target=arm-linux-android -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
+// RUN: %clang --target=arm-none-eabi -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
+// RUN: %clang --target=thumb-none-eabi -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
// RUN: %clang --target=aarch64-linux-android -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
// RUN: %clang --target=aarch64_be -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
// RUN: %clang --target=riscv32 -fvisibility=hidden -fsanitize=cfi -flto -resource-dir=%S/Inputs/resource_dir -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CFI
diff --git a/clang/test/Driver/ftime-trace-sections.py b/clang/test/Driver/ftime-trace-sections.py
index b332931..02afa4a 100755..100644
--- a/clang/test/Driver/ftime-trace-sections.py
+++ b/clang/test/Driver/ftime-trace-sections.py
@@ -19,10 +19,7 @@ def is_before(range1, range2):
log_contents = json.loads(sys.stdin.read())
events = log_contents["traceEvents"]
-
-instants = [event for event in events if event["name"] == "InstantiateFunction"]
codegens = [event for event in events if event["name"] == "CodeGen Function"]
-opts = [event for event in events if event["name"] == "OptFunction"]
frontends = [event for event in events if event["name"] == "Frontend"]
backends = [event for event in events if event["name"] == "Backend"]
@@ -51,11 +48,3 @@ if not all(
]
):
sys.exit("Not all Frontend section are before all Backend sections!")
-
-# Check that entries for foo exist and are in a demangled form.
-if not any(e for e in instants if "foo<int>" in e["args"]["detail"]):
- sys.exit("Missing Instantiate entry for foo!")
-if not any(e for e in codegens if "foo<int>" in e["args"]["detail"]):
- sys.exit("Missing CodeGen entry for foo!")
-if not any(e for e in opts if "foo<int>" in e["args"]["detail"]):
- sys.exit("Missing Optimize entry for foo!")
diff --git a/clang/test/Driver/linker-wrapper-passes.c b/clang/test/Driver/linker-wrapper-passes.c
deleted file mode 100644
index fb63ef7..0000000
--- a/clang/test/Driver/linker-wrapper-passes.c
+++ /dev/null
@@ -1,75 +0,0 @@
-// Check various clang-linker-wrapper pass options after -offload-opt.
-
-// REQUIRES: llvm-plugins, llvm-examples
-// REQUIRES: x86-registered-target
-// REQUIRES: amdgpu-registered-target
-
-// https://github.com/llvm/llvm-project/issues/100212
-// XFAIL: *
-
-// Setup.
-// RUN: mkdir -p %t
-// RUN: %clang -cc1 -emit-llvm-bc -o %t/host-x86_64-unknown-linux-gnu.bc \
-// RUN: -disable-O0-optnone -triple=x86_64-unknown-linux-gnu %s
-// RUN: %clang -cc1 -emit-llvm-bc -o %t/openmp-amdgcn-amd-amdhsa.bc \
-// RUN: -disable-O0-optnone -triple=amdgcn-amd-amdhsa %s
-// RUN: opt %t/openmp-amdgcn-amd-amdhsa.bc -o %t/openmp-amdgcn-amd-amdhsa.bc \
-// RUN: -passes=forceattrs -force-remove-attribute=f:noinline
-// RUN: clang-offload-packager -o %t/openmp-x86_64-unknown-linux-gnu.out \
-// RUN: --image=file=%t/openmp-amdgcn-amd-amdhsa.bc,arch=gfx90a,triple=amdgcn-amd-amdhsa
-// RUN: %clang -cc1 -S -o %t/host-x86_64-unknown-linux-gnu.s \
-// RUN: -fopenmp -fopenmp-targets=amdgcn-amd-amdhsa \
-// RUN: -fembed-offload-object=%t/openmp-x86_64-unknown-linux-gnu.out \
-// RUN: %t/host-x86_64-unknown-linux-gnu.bc
-// RUN: %clang -cc1as -o %t/host-x86_64-unknown-linux-gnu.o \
-// RUN: -triple x86_64-unknown-linux-gnu -filetype obj -target-cpu x86-64 \
-// RUN: %t/host-x86_64-unknown-linux-gnu.s
-
-// Check plugin, -passes, and no remarks.
-// RUN: clang-linker-wrapper -o a.out --embed-bitcode \
-// RUN: --linker-path=/usr/bin/true %t/host-x86_64-unknown-linux-gnu.o \
-// RUN: %offload-opt-loadbye --offload-opt=-wave-goodbye \
-// RUN: --offload-opt=-passes="function(goodbye),module(inline)" 2>&1 | \
-// RUN: FileCheck -match-full-lines -check-prefixes=OUT %s
-
-// Check plugin, -p, and remarks.
-// RUN: clang-linker-wrapper -o a.out --embed-bitcode \
-// RUN: --linker-path=/usr/bin/true %t/host-x86_64-unknown-linux-gnu.o \
-// RUN: %offload-opt-loadbye --offload-opt=-wave-goodbye \
-// RUN: --offload-opt=-p="function(goodbye),module(inline)" \
-// RUN: --offload-opt=-pass-remarks=inline \
-// RUN: --offload-opt=-pass-remarks-output=%t/remarks.yml \
-// RUN: --offload-opt=-pass-remarks-filter=inline \
-// RUN: --offload-opt=-pass-remarks-format=yaml 2>&1 | \
-// RUN: FileCheck -match-full-lines -check-prefixes=OUT,REM %s
-// RUN: FileCheck -input-file=%t/remarks.yml -match-full-lines \
-// RUN: -check-prefixes=YML %s
-
-// Check handling of bad plugin.
-// RUN: not clang-linker-wrapper \
-// RUN: --offload-opt=-load-pass-plugin=%t/nonexistent.so 2>&1 | \
-// RUN: FileCheck -match-full-lines -check-prefixes=BAD-PLUGIN %s
-
-// OUT-NOT: {{.}}
-// OUT: Bye: f
-// OUT-NEXT: Bye: test
-// REM-NEXT: remark: {{.*}} 'f' inlined into 'test' {{.*}}
-// OUT-NOT: {{.}}
-
-// YML-NOT: {{.}}
-// YML: --- !Passed
-// YML-NEXT: Pass: inline
-// YML-NEXT: Name: Inlined
-// YML-NEXT: Function: test
-// YML-NEXT: Args:
-// YML: - Callee: f
-// YML: - Caller: test
-// YML: ...
-// YML-NOT: {{.}}
-
-// BAD-PLUGIN-NOT: {{.}}
-// BAD-PLUGIN: {{.*}}Could not load library {{.*}}nonexistent.so{{.*}}
-// BAD-PLUGIN-NOT: {{.}}
-
-void f() {}
-void test() { f(); }
diff --git a/clang/test/Driver/print-enabled-extensions/aarch64-apple-a15.c b/clang/test/Driver/print-enabled-extensions/aarch64-apple-a15.c
index 267287e..dec48bb 100644
--- a/clang/test/Driver/print-enabled-extensions/aarch64-apple-a15.c
+++ b/clang/test/Driver/print-enabled-extensions/aarch64-apple-a15.c
@@ -23,6 +23,7 @@
// CHECK-NEXT: FEAT_FHM Enable FP16 FML instructions
// CHECK-NEXT: FEAT_FP Enable Armv8.0-A Floating Point Extensions
// CHECK-NEXT: FEAT_FP16 Enable half-precision floating-point data processing
+// CHECK-NEXT: FEAT_FPAC Enable Armv8.3-A Pointer Authentication Faulting enhancement
// CHECK-NEXT: FEAT_FRINTTS Enable FRInt[32|64][Z|X] instructions that round a floating-point number to an integer (in FP format) forcing it to fit into a 32- or 64-bit int
// CHECK-NEXT: FEAT_FlagM Enable Armv8.4-A Flag Manipulation instructions
// CHECK-NEXT: FEAT_FlagM2 Enable alternative NZCV format for floating point comparisons
diff --git a/clang/test/Driver/print-enabled-extensions/aarch64-apple-a16.c b/clang/test/Driver/print-enabled-extensions/aarch64-apple-a16.c
index de382a3..477652d 100644
--- a/clang/test/Driver/print-enabled-extensions/aarch64-apple-a16.c
+++ b/clang/test/Driver/print-enabled-extensions/aarch64-apple-a16.c
@@ -23,6 +23,7 @@
// CHECK-NEXT: FEAT_FHM Enable FP16 FML instructions
// CHECK-NEXT: FEAT_FP Enable Armv8.0-A Floating Point Extensions
// CHECK-NEXT: FEAT_FP16 Enable half-precision floating-point data processing
+// CHECK-NEXT: FEAT_FPAC Enable Armv8.3-A Pointer Authentication Faulting enhancement
// CHECK-NEXT: FEAT_FRINTTS Enable FRInt[32|64][Z|X] instructions that round a floating-point number to an integer (in FP format) forcing it to fit into a 32- or 64-bit int
// CHECK-NEXT: FEAT_FlagM Enable Armv8.4-A Flag Manipulation instructions
// CHECK-NEXT: FEAT_FlagM2 Enable alternative NZCV format for floating point comparisons
diff --git a/clang/test/Driver/print-enabled-extensions/aarch64-apple-a17.c b/clang/test/Driver/print-enabled-extensions/aarch64-apple-a17.c
index 641aa3f..311cc94 100644
--- a/clang/test/Driver/print-enabled-extensions/aarch64-apple-a17.c
+++ b/clang/test/Driver/print-enabled-extensions/aarch64-apple-a17.c
@@ -23,6 +23,7 @@
// CHECK-NEXT: FEAT_FHM Enable FP16 FML instructions
// CHECK-NEXT: FEAT_FP Enable Armv8.0-A Floating Point Extensions
// CHECK-NEXT: FEAT_FP16 Enable half-precision floating-point data processing
+// CHECK-NEXT: FEAT_FPAC Enable Armv8.3-A Pointer Authentication Faulting enhancement
// CHECK-NEXT: FEAT_FRINTTS Enable FRInt[32|64][Z|X] instructions that round a floating-point number to an integer (in FP format) forcing it to fit into a 32- or 64-bit int
// CHECK-NEXT: FEAT_FlagM Enable Armv8.4-A Flag Manipulation instructions
// CHECK-NEXT: FEAT_FlagM2 Enable alternative NZCV format for floating point comparisons
diff --git a/clang/test/Driver/print-enabled-extensions/aarch64-apple-m4.c b/clang/test/Driver/print-enabled-extensions/aarch64-apple-m4.c
index 5096bc6..44d618a 100644
--- a/clang/test/Driver/print-enabled-extensions/aarch64-apple-m4.c
+++ b/clang/test/Driver/print-enabled-extensions/aarch64-apple-m4.c
@@ -23,6 +23,7 @@
// CHECK-NEXT: FEAT_FHM Enable FP16 FML instructions
// CHECK-NEXT: FEAT_FP Enable Armv8.0-A Floating Point Extensions
// CHECK-NEXT: FEAT_FP16 Enable half-precision floating-point data processing
+// CHECK-NEXT: FEAT_FPAC Enable Armv8.3-A Pointer Authentication Faulting enhancement
// CHECK-NEXT: FEAT_FRINTTS Enable FRInt[32|64][Z|X] instructions that round a floating-point number to an integer (in FP format) forcing it to fit into a 32- or 64-bit int
// CHECK-NEXT: FEAT_FlagM Enable Armv8.4-A Flag Manipulation instructions
// CHECK-NEXT: FEAT_FlagM2 Enable alternative NZCV format for floating point comparisons
diff --git a/clang/test/Driver/print-supported-extensions-aarch64.c b/clang/test/Driver/print-supported-extensions-aarch64.c
index 6b969d5..242cece 100644
--- a/clang/test/Driver/print-supported-extensions-aarch64.c
+++ b/clang/test/Driver/print-supported-extensions-aarch64.c
@@ -5,7 +5,7 @@
// CHECK-EMPTY:
// CHECK-NEXT: Name Architecture Feature(s) Description
// CHECK-NEXT: aes FEAT_AES, FEAT_PMULL Enable AES support
-// CHECK-NEXT: b16b16 FEAT_SVE_B16B16 Enable SVE2.1 or SME2.1 non-widening BFloat16 to BFloat16 instructions
+// CHECK-NEXT: b16b16 FEAT_B16B16 Enable SME2.1 ZA-targeting non-widening BFloat16 to BFloat16 instructions
// CHECK-NEXT: bf16 FEAT_BF16 Enable BFloat16 Extension
// CHECK-NEXT: brbe FEAT_BRBE Enable Branch Record Buffer Extension
// CHECK-NEXT: bti FEAT_BTI Enable Branch Target Identification
@@ -71,6 +71,7 @@
// CHECK-NEXT: ssve-fp8dot4 FEAT_SSVE_FP8DOT4 Enable SVE2 FP8 4-way dot product instructions
// CHECK-NEXT: ssve-fp8fma FEAT_SSVE_FP8FMA Enable SVE2 FP8 multiply-add instructions
// CHECK-NEXT: sve FEAT_SVE Enable Scalable Vector Extension (SVE) instructions
+// CHECK-NEXT: sve-b16b16 FEAT_SVE_B16B16 Enable SVE2.1 non-widening and SME2.1 Z-targeting non-widening BFloat16 to BFloat16 instructions
// CHECK-NEXT: sve2 FEAT_SVE2 Enable Scalable Vector Extension 2 (SVE2) instructions
// CHECK-NEXT: sve2-aes FEAT_SVE_AES, FEAT_SVE_PMULL128 Enable AES SVE2 instructions
// CHECK-NEXT: sve2-bitperm FEAT_SVE_BitPerm Enable bit permutation SVE2 instructions
diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c
index 7a885cd..750fb63 100644
--- a/clang/test/Driver/riscv-cpus.c
+++ b/clang/test/Driver/riscv-cpus.c
@@ -304,6 +304,54 @@
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zbs"
// MCPU-SIFIVE-P450-SAME: "-target-abi" "lp64d"
+// RUN: %clang -target riscv64 -### -c %s 2>&1 -mcpu=sifive-p470 | FileCheck -check-prefix=MCPU-SIFIVE-P470 %s
+// MCPU-SIFIVE-P470: "-target-cpu" "sifive-p470"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+m"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+a"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+f"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+d"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+c"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+v"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zic64b"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicbom"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicbop"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicboz"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccamoa"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccif"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicclsm"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccrse"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicsr"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zifencei"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zihintntl"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zihintpause"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zihpm"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zmmul"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+za64rs"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zfhmin"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zba"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zbb"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zbs"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvbb"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvbc"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zve32f"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zve32x"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zve64d"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zve64f"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zve64x"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvkg"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvkn"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvknc"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvkned"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvkng"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvknhb"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvks"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvksc"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvksed"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvksg"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvksh"
+// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvkt"
+// MCPU-SIFIVE-P470-SAME: "-target-abi" "lp64d"
+
// RUN: %clang -target riscv64 -### -c %s 2>&1 -mcpu=sifive-p670 | FileCheck -check-prefix=MCPU-SIFIVE-P670 %s
// MCPU-SIFIVE-P670: "-target-cpu" "sifive-p670"
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+m"
diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c
index b87bced..249bea2 100644
--- a/clang/test/Misc/target-invalid-cpu-note.c
+++ b/clang/test/Misc/target-invalid-cpu-note.c
@@ -85,7 +85,7 @@
// RUN: not %clang_cc1 -triple riscv64 -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix RISCV64
// RISCV64: error: unknown target CPU 'not-a-cpu'
-// RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, spacemit-x60, syntacore-scr3-rv64, syntacore-scr4-rv64, veyron-v1, xiangshan-nanhu{{$}}
+// RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p470, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, spacemit-x60, syntacore-scr3-rv64, syntacore-scr4-rv64, veyron-v1, xiangshan-nanhu{{$}}
// RUN: not %clang_cc1 -triple riscv32 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV32
// TUNE-RISCV32: error: unknown target CPU 'not-a-cpu'
@@ -93,4 +93,5 @@
// RUN: not %clang_cc1 -triple riscv64 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV64
// TUNE-RISCV64: error: unknown target CPU 'not-a-cpu'
-// TUNE-RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, spacemit-x60, syntacore-scr3-rv64, syntacore-scr4-rv64, veyron-v1, xiangshan-nanhu, generic, rocket, sifive-7-series{{$}}
+// TUNE-RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p470, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, spacemit-x60, syntacore-scr3-rv64, syntacore-scr4-rv64, veyron-v1, xiangshan-nanhu, generic, rocket, sifive-7-series{{$}}
+
diff --git a/clang/test/Modules/crash-vfs-include-pch.m b/clang/test/Modules/crash-vfs-include-pch.m
index 4c05a07..9ca1002 100644
--- a/clang/test/Modules/crash-vfs-include-pch.m
+++ b/clang/test/Modules/crash-vfs-include-pch.m
@@ -10,7 +10,7 @@
// RUN: env FORCE_CLANG_DIAGNOSTICS_CRASH= TMPDIR=%t TEMP=%t TMP=%t \
// RUN: not %clang %s -E -include-pch %t/out/pch-used.h.pch -fmodules -nostdlibinc \
-// RUN: -fimplicit-module-maps -fbuiltin-headers-in-system-modules \
+// RUN: -fimplicit-module-maps -Xclang -fbuiltin-headers-in-system-modules \
// RUN: -fmodules-cache-path=%t/cache -O0 -Xclang -fno-validate-pch \
// RUN: -isystem %S/Inputs/System/usr/include -o %t/output.E 2>&1 | FileCheck %s
diff --git a/clang/test/OpenMP/target_teams_ast_print.cpp b/clang/test/OpenMP/target_teams_ast_print.cpp
index 2ff34e4..1590a99 100644
--- a/clang/test/OpenMP/target_teams_ast_print.cpp
+++ b/clang/test/OpenMP/target_teams_ast_print.cpp
@@ -115,6 +115,10 @@ int main (int argc, char **argv) {
// CHECK-NEXT: #pragma omp target teams ompx_bare num_teams(1) thread_limit(32)
a=3;
// CHECK-NEXT: a = 3;
+#pragma omp target teams ompx_bare num_teams(1, 2, 3) thread_limit(32)
+// CHECK-NEXT: #pragma omp target teams ompx_bare num_teams(1,2,3) thread_limit(32)
+ a=4;
+// CHECK-NEXT: a = 4;
#pragma omp target teams default(none), private(argc,b) num_teams(f) firstprivate(argv) reduction(| : c, d) reduction(* : e) thread_limit(f+g)
// CHECK-NEXT: #pragma omp target teams default(none) private(argc,b) num_teams(f) firstprivate(argv) reduction(|: c,d) reduction(*: e) thread_limit(f + g)
foo();
diff --git a/clang/test/OpenMP/target_teams_distribute_num_teams_messages.cpp b/clang/test/OpenMP/target_teams_distribute_num_teams_messages.cpp
index c0a31fa..b489e6a 100644
--- a/clang/test/OpenMP/target_teams_distribute_num_teams_messages.cpp
+++ b/clang/test/OpenMP/target_teams_distribute_num_teams_messages.cpp
@@ -44,6 +44,12 @@ T tmain(T argc) {
#pragma omp target teams distribute num_teams(3.14) // expected-error 2 {{expression must have integral or unscoped enumeration type, not 'double'}}
for (int i=0; i<100; i++) foo();
+#pragma omp target teams distribute num_teams(1, 2, 3) // expected-error {{only one expression allowed in 'num_teams' clause}}
+ for (int i=0; i<100; i++) foo();
+
+#pragma omp target teams ompx_bare num_teams(1, 2, 3, 4) thread_limit(1) // expected-error {{at most three expressions are allowed in 'num_teams' clause in 'target teams ompx_bare' construct}}
+ for (int i=0; i<100; i++) foo();
+
return 0;
}
@@ -85,5 +91,11 @@ int main(int argc, char **argv) {
#pragma omp target teams distribute num_teams (3.14) // expected-error {{expression must have integral or unscoped enumeration type, not 'double'}}
for (int i=0; i<100; i++) foo();
+#pragma omp target teams distribute num_teams(1, 2, 3) // expected-error {{only one expression allowed in 'num_teams' clause}}
+ for (int i=0; i<100; i++) foo();
+
+#pragma omp target teams ompx_bare num_teams(1, 2, 3, 4) thread_limit(1) // expected-error {{at most three expressions are allowed in 'num_teams' clause in 'target teams ompx_bare' construct}}
+ for (int i=0; i<100; i++) foo();
+
return tmain<int, 10>(argc); // expected-note {{in instantiation of function template specialization 'tmain<int, 10>' requested here}}
}
diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_num_teams_messages.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_num_teams_messages.cpp
index d80b6ea..fa6e8f5 100644
--- a/clang/test/OpenMP/target_teams_distribute_parallel_for_num_teams_messages.cpp
+++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_num_teams_messages.cpp
@@ -43,6 +43,8 @@ T tmain(T argc) {
for (int i=0; i<100; i++) foo();
#pragma omp target teams distribute parallel for num_teams(3.14) // expected-error 2 {{expression must have integral or unscoped enumeration type, not 'double'}}
for (int i=0; i<100; i++) foo();
+#pragma omp target teams distribute parallel for num_teams(1, 2, 3) // expected-error {{only one expression allowed in 'num_teams' clause}}
+ for (int i=0; i<100; i++) foo();
return 0;
}
@@ -85,5 +87,8 @@ int main(int argc, char **argv) {
#pragma omp target teams distribute parallel for num_teams (3.14) // expected-error {{expression must have integral or unscoped enumeration type, not 'double'}}
for (int i=0; i<100; i++) foo();
+#pragma omp target teams distribute parallel for num_teams(1, 2, 3) // expected-error {{only one expression allowed in 'num_teams' clause}}
+ for (int i=0; i<100; i++) foo();
+
return tmain<int, 10>(argc); // expected-note {{in instantiation of function template specialization 'tmain<int, 10>' requested here}}
}
diff --git a/clang/test/OpenMP/teams_num_teams_messages.cpp b/clang/test/OpenMP/teams_num_teams_messages.cpp
index 40da396..0cfecc5 100644
--- a/clang/test/OpenMP/teams_num_teams_messages.cpp
+++ b/clang/test/OpenMP/teams_num_teams_messages.cpp
@@ -57,6 +57,9 @@ T tmain(T argc) {
#pragma omp target
#pragma omp teams num_teams(3.14) // expected-error 2 {{expression must have integral or unscoped enumeration type, not 'double'}}
foo();
+#pragma omp target
+#pragma omp teams num_teams (1, 2, 3) // expected-error {{only one expression allowed in 'num_teams' clause}}
+ foo();
return 0;
}
@@ -111,5 +114,9 @@ int main(int argc, char **argv) {
#pragma omp teams num_teams (3.14) // expected-error {{expression must have integral or unscoped enumeration type, not 'double'}}
foo();
+#pragma omp target
+#pragma omp teams num_teams (1, 2, 3) // expected-error {{only one expression allowed in 'num_teams' clause}}
+ foo();
+
return tmain<int, 10>(argc); // expected-note {{in instantiation of function template specialization 'tmain<int, 10>' requested here}}
}
diff --git a/clang/test/Preprocessor/predefined-macros-no-warnings.c b/clang/test/Preprocessor/predefined-macros-no-warnings.c
index 722e3e7..d44b99a 100644
--- a/clang/test/Preprocessor/predefined-macros-no-warnings.c
+++ b/clang/test/Preprocessor/predefined-macros-no-warnings.c
@@ -75,8 +75,6 @@
// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple m68k
// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple m68k-linux
// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple m68k-netbsd
-// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple le32-nacl
-// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple le64
// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple ppc
// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple ppc-freebsd
// RUN: %clang_cc1 %s -Eonly -Wsystem-headers -Werror -triple ppc-netbsd
diff --git a/clang/test/Preprocessor/ptrauth_feature.c b/clang/test/Preprocessor/ptrauth_feature.c
index 14059f8..2a3edc2 100644
--- a/clang/test/Preprocessor/ptrauth_feature.c
+++ b/clang/test/Preprocessor/ptrauth_feature.c
@@ -2,31 +2,34 @@
//// For example, -fptrauth-init-fini will not affect codegen without -fptrauth-calls, but the preprocessor feature would be set anyway.
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-intrinsics | \
-// RUN: FileCheck %s --check-prefixes=INTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=INTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-calls | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,CALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,CALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-returns | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,RETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,RETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-vtable-pointer-address-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,VPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,VPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-vtable-pointer-type-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,VPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,VPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-type-info-vtable-pointer-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,TYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,TYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-function-pointer-type-discrimination | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,FUNC,NOINITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,FUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-init-fini | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,INITFINI,NOGOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,INITFINI,NOINITFINI_ADDR_DISCR,NOGOTOS
+
+// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-init-fini-address-discrimination | \
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,INITFINI_ADDR_DISCR,NOGOTOS
// RUN: %clang_cc1 -E %s -triple=aarch64 -fptrauth-indirect-gotos | \
-// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,GOTOS
+// RUN: FileCheck %s --check-prefixes=NOINTRIN,NOCALLS,NORETS,NOVPTR_ADDR_DISCR,NOVPTR_TYPE_DISCR,NOTYPE_INFO_DISCR,NOFUNC,NOINITFINI,NOINITFINI_ADDR_DISCR,GOTOS
#if __has_feature(ptrauth_intrinsics)
// INTRIN: has_ptrauth_intrinsics
@@ -101,6 +104,14 @@ void has_ptrauth_init_fini() {}
void no_ptrauth_init_fini() {}
#endif
+#if __has_feature(ptrauth_init_fini_address_discrimination)
+// INITFINI_ADDR_DISCR: has_ptrauth_init_fini_address_discrimination
+void has_ptrauth_init_fini_address_discrimination() {}
+#else
+// NOINITFINI_ADDR_DISCR: no_ptrauth_init_fini_address_discrimination
+void no_ptrauth_init_fini_address_discrimination() {}
+#endif
+
#if __has_feature(ptrauth_indirect_gotos)
// GOTOS: has_ptrauth_indirect_gotos
void has_ptrauth_indirect_gotos() {}
diff --git a/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_b16b16.cpp b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_b16b16.cpp
index 8a8f121..b93f348 100644
--- a/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_b16b16.cpp
+++ b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_b16b16.cpp
@@ -6,8 +6,45 @@
void test_b16b16( svbfloat16_t bf16, svbfloat16x2_t bf16x2, svbfloat16x4_t bf16x4) __arm_streaming
{
- // expected-error@+1 {{'svclamp_single_bf16_x2' needs target feature sme2,b16b16}}
+ // expected-error@+1 {{'svclamp_single_bf16_x2' needs target feature sme2,sve-b16b16}}
svclamp_single_bf16_x2(bf16x2, bf16, bf16);
- // expected-error@+1 {{'svclamp_single_bf16_x4' needs target feature sme2,b16b16}}
+ // expected-error@+1 {{'svclamp_single_bf16_x4' needs target feature sme2,sve-b16b16}}
svclamp_single_bf16_x4(bf16x4, bf16, bf16);
+
+ // expected-error@+1 {{'svmax_single_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svmax_single_bf16_x2(bf16x2, bf16);
+ // expected-error@+1 {{'svmax_single_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svmax_single_bf16_x4(bf16x4, bf16);
+ // expected-error@+1 {{'svmax_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svmax_bf16_x2(bf16x2, bf16x2);
+ // expected-error@+1 {{'svmax_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svmax_bf16_x4(bf16x4, bf16x4);
+
+ // expected-error@+1 {{'svmaxnm_single_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svmaxnm_single_bf16_x2(bf16x2, bf16);
+ // expected-error@+1 {{'svmaxnm_single_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svmaxnm_single_bf16_x4(bf16x4, bf16);
+ // expected-error@+1 {{'svmaxnm_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svmaxnm_bf16_x2(bf16x2, bf16x2);
+ // expected-error@+1 {{'svmaxnm_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svmaxnm_bf16_x4(bf16x4, bf16x4);
+
+ // expected-error@+1 {{'svmin_single_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svmin_single_bf16_x2(bf16x2, bf16);
+ // expected-error@+1 {{'svmin_single_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svmin_single_bf16_x4(bf16x4, bf16);
+ // expected-error@+1 {{'svmin_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svmin_bf16_x2(bf16x2, bf16x2);
+ // expected-error@+1 {{'svmin_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svmin_bf16_x4(bf16x4, bf16x4);
+
+ // expected-error@+1 {{'svminnm_single_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svminnm_single_bf16_x2(bf16x2, bf16);
+ // expected-error@+1 {{'svminnm_single_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svminnm_single_bf16_x4(bf16x4, bf16);
+
+ // expected-error@+1 {{'svminnm_bf16_x2' needs target feature sme2,sve-b16b16}}
+ svminnm_bf16_x2(bf16x2, bf16x2);
+ // expected-error@+1 {{'svminnm_bf16_x4' needs target feature sme2,sve-b16b16}}
+ svminnm_bf16_x4(bf16x4, bf16x4);
} \ No newline at end of file
diff --git a/clang/test/Sema/aarch64-sve2p1-intrinsics/acle_sve2p1_b16b16.cpp b/clang/test/Sema/aarch64-sve2p1-intrinsics/acle_sve2p1_b16b16.cpp
new file mode 100644
index 0000000..188054f
--- /dev/null
+++ b/clang/test/Sema/aarch64-sve2p1-intrinsics/acle_sve2p1_b16b16.cpp
@@ -0,0 +1,56 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sve2 -verify -verify-ignore-unexpected=error,note -emit-llvm -o - %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme2 -verify -verify-ignore-unexpected=error,note -emit-llvm -o - %s
+// REQUIRES: aarch64-registered-target
+
+#include <arm_sve.h>
+
+#if defined __ARM_FEATURE_SME
+#define MODE_ATTR __arm_streaming
+#else
+#define MODE_ATTR
+#endif
+
+__attribute__((target("sve-b16b16")))
+void test_with_sve_b16b16(svbool_t pg, svbfloat16_t op1, svbfloat16_t op2, svbfloat16_t op3) MODE_ATTR
+{
+ svclamp_bf16(op1, op2, op3);
+ svadd_bf16_m(pg, op1, op2);
+ svmax_bf16_m(pg, op1, op2);
+ svmaxnm_bf16_m(pg, op1, op2);
+ svmin_bf16_m(pg, op1, op2);
+ svminnm_bf16_m(pg, op1, op2);
+ svmla_lane_bf16(op1, op2, op3, 1);
+ svmla_bf16_m(pg, op1, op2, op3);
+ svmls_bf16_m(pg, op1, op2, op3);
+ svmul_lane_bf16(op1, op2, 1);
+ svmul_bf16_m(pg, op1, op2);
+ svsub_bf16_m(pg, op1, op2);
+}
+
+void test_no_sve_b16b16(svbool_t pg, svbfloat16_t op1, svbfloat16_t op2, svbfloat16_t op3) MODE_ATTR
+{
+ // expected-error@+1 {{'svclamp_bf16' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svclamp_bf16(op1, op2, op3);
+ // expected-error@+1 {{'svadd_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svadd_bf16_m(pg, op1, op2);
+ // expected-error@+1 {{'svmax_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmax_bf16_m(pg, op1, op2);
+ // expected-error@+1 {{'svmaxnm_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmaxnm_bf16_m(pg, op1, op2);
+ // expected-error@+1 {{'svmin_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmin_bf16_m(pg, op1, op2);
+ // expected-error@+1 {{'svminnm_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svminnm_bf16_m(pg, op1, op2);
+ // expected-error@+1 {{'svmla_lane_bf16' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmla_lane_bf16(op1, op2, op3, 1);
+ // expected-error@+1 {{'svmla_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmla_bf16_m(pg, op1, op2, op3);
+ // expected-error@+1 {{'svmls_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmls_bf16_m(pg, op1, op2, op3);
+ // expected-error@+1 {{'svmul_lane_bf16' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmul_lane_bf16(op1, op2, 1);
+ // expected-error@+1 {{'svmul_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svmul_bf16_m(pg, op1, op2);
+ // expected-error@+1 {{'svsub_bf16_m' needs target feature (sve2,sve-b16b16)|(sme2,sve-b16b16)}}
+ svsub_bf16_m(pg, op1, op2);
+}
diff --git a/clang/test/SemaCXX/constexpr-vectors-access-elements.cpp b/clang/test/SemaCXX/constexpr-vectors-access-elements.cpp
new file mode 100644
index 0000000..08223e1
--- /dev/null
+++ b/clang/test/SemaCXX/constexpr-vectors-access-elements.cpp
@@ -0,0 +1,46 @@
+// RUN: %clang_cc1 %s -Wno-uninitialized -std=c++17 -fsyntax-only -verify
+
+namespace Vector {
+
+using TwoIntsVecSize __attribute__((vector_size(8))) = int;
+
+constexpr TwoIntsVecSize a = {1,2};
+static_assert(a[1] == 2);
+static_assert(a[2]); // expected-error {{not an integral constant expression}} expected-note {{read of dereferenced one-past-the-end pointer}}
+
+constexpr struct {
+ TwoIntsVecSize b;
+} Val = {{0,1}};
+
+static_assert(Val.b[1] == 1);
+
+constexpr TwoIntsVecSize c[3] = {{0,1}, {2,3}, {4,5}};
+static_assert(c[0][0] == 0);
+static_assert(c[1][1] == 3);
+static_assert(c[2][3]); // expected-error {{not an integral constant expression}} expected-note {{cannot refer to element 3 of array of 2 elements}}
+
+// make sure clang rejects taking address of a vector element
+static_assert(&a[0]); // expected-error {{address of vector element requested}}
+
+}
+
+namespace ExtVector {
+
+using FourIntsExtVec __attribute__((ext_vector_type(4))) = int;
+
+constexpr FourIntsExtVec b = {1,2,3,4};
+static_assert(b[0] == 1 && b[1] == 2 && b[2] == 3 && b[3] == 4);
+static_assert(b.s0 == 1 && b.s1 == 2 && b.s2 == 3 && b.s3 == 4);
+static_assert(b.x == 1 && b.y == 2 && b.z == 3 && b.w == 4);
+static_assert(b.r == 1 && b.g == 2 && b.b == 3 && b.a == 4);
+static_assert(b[5]); // expected-error {{not an integral constant expression}} expected-note {{cannot refer to element 5 of array of 4 elements}}
+
+// FIXME: support selecting multiple elements
+static_assert(b.lo.lo == 1); // expected-error {{not an integral constant expression}}
+// static_assert(b.lo.lo==1 && b.lo.hi==2 && b.hi.lo == 3 && b.hi.hi == 4);
+// static_assert(b.odd[0]==1 && b.odd[1]==2 && b.even[0] == 3 && b.even[1] == 4);
+
+// make sure clang rejects taking address of a vector element
+static_assert(&b[1]); // expected-error {{address of vector element requested}}
+
+}
diff --git a/clang/test/SemaCXX/decltype.cpp b/clang/test/SemaCXX/decltype.cpp
index 9961c5e..76d6a041 100644
--- a/clang/test/SemaCXX/decltype.cpp
+++ b/clang/test/SemaCXX/decltype.cpp
@@ -147,6 +147,31 @@ namespace GH97646 {
}
}
+namespace GH99873 {
+struct B {
+ int x;
+};
+
+template<typename T>
+struct A {
+ template<typename U>
+ constexpr int f() const {
+ return 1;
+ }
+
+ template<>
+ constexpr int f<int>() const {
+ return decltype(B::x)();
+ }
+};
+
+// This shouldn't crash.
+static_assert(A<int>().f<int>() == 0, "");
+// The result should not be dependent.
+static_assert(A<int>().f<int>() != 0, ""); // expected-error {{static assertion failed due to requirement 'GH99873::A<int>().f<int>() != 0'}}
+ // expected-note@-1 {{expression evaluates to '0 != 0'}}
+}
+
template<typename>
class conditional {
};
diff --git a/clang/test/SemaCXX/fold_lambda_with_variadics.cpp b/clang/test/SemaCXX/fold_lambda_with_variadics.cpp
new file mode 100644
index 0000000..14e242f
--- /dev/null
+++ b/clang/test/SemaCXX/fold_lambda_with_variadics.cpp
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -fsyntax-only -std=c++20 -verify %s
+
+namespace GH85667 {
+
+template <class T>
+struct identity {
+ using type = T;
+};
+
+template <class = void> void f() {
+
+ static_assert([]<class... Is>(Is... x) {
+ return ([I(x)] {
+ return I;
+ }() + ...);
+ }(1, 2) == 3);
+
+ []<class... Is>(Is... x) {
+ return ([](auto y = Is()) { return y + 1; }() + ...); // expected-error {{no matching function}} \
+ // expected-note {{couldn't infer template argument 'y:auto'}} \
+ // expected-note@-1 {{requested here}}
+ // expected-note@#instantiate-f {{requested here}}
+ }(1);
+
+ []<class... Is>() {
+ ([]<class = Is>(Is)
+ noexcept(bool(Is()))
+ {}(Is()),
+ ...);
+ }.template operator()<char, int, float>();
+
+ static_assert(__is_same(decltype([]<class... Is>() {
+ return ([]() -> decltype(Is()) { return {}; }(),
+ ...);
+ }.template operator()<int, char>()),
+ char));
+
+ []<class... Is>() {
+ return ([]<class... Ts>() -> decltype(Is()) { return Ts(); }() + ...);
+ // expected-error@-1 {{unexpanded parameter pack 'Ts'}}
+ }.template operator()<int, int>();
+
+ // https://github.com/llvm/llvm-project/issues/56852
+ []<class... Is>(Is...) {
+ ([] {
+ using T = identity<Is>::type;
+ }(), ...);
+ }(1, 2);
+
+ [](auto ...y) {
+ ([y] { }(), ...);
+ }();
+
+ [](auto ...x) {
+ ([&](auto ...y) {
+ ([x..., y] { }(), ...);
+ })(1);
+ }(2, 'b');
+
+#if 0
+ // FIXME: https://github.com/llvm/llvm-project/issues/18873
+ [](auto ...x) { // #1
+ ([&](auto ...y) { // #2
+ ([x, y] { }(), ...); // #3
+ })(1, 'a'); // #4
+ }(2, 'b'); // #5
+
+ // We run into another crash for the above lambda because of the absence of a
+ // mechanism that rebuilds an unexpanded pack from an expanded Decls.
+ //
+ // Basically, this happens after `x` at #1 being expanded when the template
+ // arguments at #5, deduced as <int, char>, are ready. When we want to
+ // instantiate the body of #1, we first instantiate the CallExpr at #4, which
+ // boils down to the lambda's instantiation at #2. To that end, we have to
+ // instantiate the body of it, which turns out to be #3. #3 is a CXXFoldExpr,
+ // and we immediately have to hold off on the expansion because we don't have
+ // corresponding template arguments (arguments at #4 are not transformed yet) for it.
+ // Therefore, we want to rebuild a CXXFoldExpr, which requires another pattern
+ // transformation of the lambda inside #3. Then we need to find an unexpanded form
+ // of such a Decl of x at the time of transforming the capture, which is impossible
+ // because the instantiated form has been expanded at #1!
+
+ [](auto ...x) { // #outer
+ ([&](auto ...y) { // #inner
+ ([x, y] { }(), ...);
+ // expected-error@-1 {{parameter pack 'y' that has a different length (4 vs. 3) from outer parameter packs}}
+ // expected-note-re@#inner {{function template specialization {{.*}} requested here}}
+ // expected-note-re@#outer {{function template specialization {{.*}} requested here}}
+ // expected-note-re@#instantiate-f {{function template specialization {{.*}} requested here}}
+ })('a', 'b', 'c');
+ }(0, 1, 2, 3);
+#endif
+}
+
+template void f(); // #instantiate-f
+
+} // namespace GH85667
+
+namespace GH99877 {
+
+struct tuple {
+ int x[3];
+};
+
+template <class F> int apply(F f, tuple v) { return f(v.x[0], v.x[1], v.x[2]); }
+
+int Cartesian1(auto x, auto y) {
+ return apply(
+ [&](auto... xs) {
+ return (apply([xs](auto... ys) { return (ys + ...); }, y) + ...);
+ },
+ x);
+}
+
+int Cartesian2(auto x, auto y) {
+ return apply(
+ [&](auto... xs) {
+ return (apply([zs = xs](auto... ys) { return (ys + ...); }, y) + ...);
+ },
+ x);
+}
+
+template <int...> struct Ints {};
+template <int> struct Choose {
+ template <class> struct Templ;
+};
+template <int... x> int Cartesian3(auto y) {
+ return [&]<int... xs>(Ints<xs...>) {
+ // check in default template arguments for
+ // - type template parameters,
+ (void)(apply([]<class = decltype(xs)>(auto... ys) { return (ys + ...); },
+ y) +
+ ...);
+ // - template template parameters.
+ (void)(apply([]<template <class> class = Choose<xs>::template Templ>(
+ auto... ys) { return (ys + ...); },
+ y) +
+ ...);
+ // - non-type template parameters,
+ return (apply([]<int = xs>(auto... ys) { return (ys + ...); }, y) + ...);
+ }(Ints<x...>());
+}
+
+template <int... x> int Cartesian4(auto y) {
+ return [&]<int... xs>(Ints<xs...>) {
+ return (
+ apply([]<decltype(xs) xx = 1>(auto... ys) { return (ys + ...); }, y) +
+ ...);
+ }(Ints<x...>());
+}
+
+// FIXME: Attributes should preserve the ContainsUnexpandedPack flag.
+#if 0
+
+int Cartesian5(auto x, auto y) {
+ return apply(
+ [&](auto... xs) {
+ return (apply([](auto... ys) __attribute__((
+ diagnose_if(!__is_same(decltype(xs), int), "message",
+ "error"))) { return (ys + ...); },
+ y) +
+ ...);
+ },
+ x);
+}
+
+#endif
+
+void foo() {
+ auto x = tuple({1, 2, 3});
+ auto y = tuple({4, 5, 6});
+ Cartesian1(x, y);
+ Cartesian2(x, y);
+ Cartesian3<1, 2, 3>(y);
+ Cartesian4<1, 2, 3>(y);
+#if 0
+ Cartesian5(x, y);
+#endif
+}
+
+} // namespace GH99877
diff --git a/clang/test/SemaHLSL/BuiltIns/length-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/length-errors.hlsl
index fe0046a..281faad 100644
--- a/clang/test/SemaHLSL/BuiltIns/length-errors.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/length-errors.hlsl
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -verify-ignore-unexpected
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -disable-llvm-passes -verify -verify-ignore-unexpected
+
void test_too_few_arg()
{
diff --git a/clang/test/SemaTemplate/address_space-dependent.cpp b/clang/test/SemaTemplate/address_space-dependent.cpp
index c8cc67e..2ca9b80 100644
--- a/clang/test/SemaTemplate/address_space-dependent.cpp
+++ b/clang/test/SemaTemplate/address_space-dependent.cpp
@@ -117,3 +117,16 @@ int main() {
return 0;
}
+
+namespace gh101685 {
+template <int AS>
+using ASPtrTy = void [[clang::address_space(AS)]] *;
+
+template <int AS>
+struct EntryTy {
+ ASPtrTy<AS> Base;
+};
+
+ASPtrTy<1> x;
+EntryTy<2> y;
+}
diff --git a/clang/test/TableGen/attrs-parser-string-switches.td b/clang/test/TableGen/attrs-parser-string-switches.td
index c15ab10..3219916 100644
--- a/clang/test/TableGen/attrs-parser-string-switches.td
+++ b/clang/test/TableGen/attrs-parser-string-switches.td
@@ -22,19 +22,15 @@ def TestUnEvalTwo : InheritableAttr {
}
// CHECK: #if defined(CLANG_ATTR_ARG_CONTEXT_LIST)
-// CHECK-NOT: .Case("Pragma::test_uneval", true)
-// CHECK: .Case("GNU::test_uneval", true)
-// CHECK-NOT: .Case("Pragma::test_uneval", true)
-// CHECK: .Case("CXX11::clang::test_uneval", true)
-// CHECK-NOT: .Case("Pragma::test_uneval", true)
-// CHECK: .Case("C23::clang::test_uneval", true)
-// CHECK-NOT: .Case("Pragma::test_uneval", true)
+// CHECK: .Case("test_uneval", (Syntax==AttributeCommonInfo::AS_GNU && !ScopeName) ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_CXX11 && ScopeName && ScopeName->getName()=="clang") ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_C23 && ScopeName && ScopeName->getName()=="clang") ? 1 : 0)
// CHECK: #endif // CLANG_ATTR_ARG_CONTEXT_LIST
// Test attributeHasIdentifierArg: Same spelling, one with and one without
// an IdentifierArg.
def TestIdentOne : Attr {
- let Spellings = [Clang<"test_ident">];
+ let Spellings = [GNU<"test_ident">];
let Args = [EnumArgument<"Option", "OptionType", /*is_string=*/false,
["optA", "optB"], ["OPTA", "OPTB"]>];
let Subjects = SubjectList<[Function]>;
@@ -48,28 +44,35 @@ def TestIdentTwo : StmtAttr {
let Documentation = [Undocumented];
}
+// Checks that the simple value is produced if only one attribute with a
+// spelling.
+def TestOnlyIdent : Attr {
+ let Spellings = [GNU<"test_only_ident">];
+ let Args = [EnumArgument<"Option", "OptionType", /*is_string=*/false,
+ ["optA", "optB"], ["OPTA", "OPTB"]>];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [Undocumented];
+}
+
// CHECK: #if defined(CLANG_ATTR_IDENTIFIER_ARG_LIST)
-// CHECK-NOT: .Case("Pragma::test_ident", true)
-// CHECK: .Case("GNU::test_ident", true)
-// CHECK-NOT: .Case("Pragma::test_ident", true)
-// CHECK: .Case("CXX11::clang::test_ident", true)
-// CHECK-NOT: .Case("Pragma::test_ident", true)
-// CHECK: .Case("C23::clang::test_ident", true)
-// CHECK-NOT: .Case("Pragma::test_ident", true)
+// CHECK: .Case("test_ident", (Syntax==AttributeCommonInfo::AS_GNU && !ScopeName)
+// CHECK: .Case("test_only_ident", true)
+// CHECK: .Case("test_targspec",
+// CHECK-SAME: (T.getArch() == llvm::Triple::arm)) ? 1 : 0
// CHECK: #endif // CLANG_ATTR_IDENTIFIER_ARG_LIST
// Test attributeStringLiteralListArg : Same spelling, some with a
// StringArgument, some without, some in different locations.
def TestStringOne : DeclOrTypeAttr {
let Spellings = [Clang<"test_string">];
- let Args = [StringArgument<"strarg">];
+ let Args = [UnsignedArgument<"unsarg">];
let Subjects = SubjectList<[Function, TypedefName, ParmVar]>;
let Documentation = [AcquireHandleDocs];
}
def TestStringTwo : InheritableAttr {
let Spellings = [Pragma<"", "test_string">];
- let Args = [UnsignedArgument<"unsarg">];
+ let Args = [StringArgument<"strarg">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
}
@@ -83,39 +86,31 @@ def TestStringThree : Attr {
}
// CHECK: #if defined(CLANG_ATTR_STRING_LITERAL_ARG_LIST)
-// CHECK-NOT: .Case("Pragma::test_string"
-// CHECK: .Case("GNU::test_string", 1)
-// CHECK: .Case("CXX11::clang::test_string", 1)
-// CHECK: .Case("C23::clang::test_string", 1)
-// CHECK-NOT: .Case("Pragma::test_string"
-// CHECK: .Case("Declspec::test_string", 2)
-// CHECK-NOT: .Case("Pragma::test_string"
+// CHECK: .Case("test_string", (Syntax==AttributeCommonInfo::AS_Declspec && !ScopeName) ? 2 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_Pragma && !ScopeName) ? 1 : 0)
+// CHECK: .Case("test_targspec",
+// CHECK-SAME: (T.getArch() == llvm::Triple::arm)) ? 4294967294 :
+// CHECK-SAME: (T.getArch() == llvm::Triple::ppc)) ? 1 :
// CHECK: #endif // CLANG_ATTR_STRING_LITERAL_ARG_LIST
// Test attributeHasVariadicIdentifierArg : One with VariadicIdentifierArgument
// and one without.
def TestVariadicIdentOne : InheritableAttr {
let Spellings = [Clang<"test_var_ident">];
- let Args = [VariadicIdentifierArgument<"iargs">];
+ let Args = [UnsignedArgument<"Hint">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
}
def TestVariadicIdentTwo : InheritableAttr {
let Spellings = [Pragma<"", "test_var_ident">];
- let Args = [UnsignedArgument<"Hint">];
+ let Args = [VariadicIdentifierArgument<"iargs">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
}
// CHECK: #if defined(CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST)
-// CHECK-NOT: .Case("Pragma::"test_var_ident", true)
-// CHECK: .Case("GNU::test_var_ident", true)
-// CHECK-NOT: .Case("Pragma::test_var_ident", true)
-// CHECK: .Case("CXX11::clang::test_var_ident", true)
-// CHECK-NOT: .Case("Pragma::test_var_ident", true)
-// CHECK: .Case("C23::clang::test_var_ident", true)
-// CHECK-NOT: .Case("Pragma::test_var_ident", true)
+// CHECK: .Case("test_var_ident", (Syntax==AttributeCommonInfo::AS_Pragma && !ScopeName))
// CHECK: #endif // CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
// Test attributeTreatsKeywordThisAsIdentifier : Same spelling, one with and
@@ -135,13 +130,9 @@ def TestVarOrIdxTwo : InheritableAttr {
}
// CHECK: #if defined(CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST)
-// CHECK-NOT: .Case("Pragma::test_var_idx", true)
-// CHECK: .Case("GNU::test_var_idx", true)
-// CHECK-NOT: .Case("Pragma::test_var_idx", true)
-// CHECK: .Case("CXX11::clang::test_var_idx", true)
-// CHECK-NOT: .Case("Pragma::test_var_idx", true)
-// CHECK: .Case("C23::clang::test_var_idx", true)
-// CHECK-NOT: .Case("Pragma::test_var_idx", true)
+// CHECK: .Case("test_var_idx", (Syntax==AttributeCommonInfo::AS_GNU && !ScopeName) ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_CXX11 && ScopeName && ScopeName->getName()=="clang") ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_C23 && ScopeName && ScopeName->getName()=="clang") ? 1 : 0)
// CHECK: #endif // CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
// Test attributeAcceptsExprPack : One with, one without.
@@ -161,13 +152,9 @@ def TestExprPackTwo : InheritableAttr {
}
// CHECK: #if defined(CLANG_ATTR_ACCEPTS_EXPR_PACK)
-// CHECK-NOT: .Case("Pragma::test_expr_pack", true)
-// CHECK: .Case("GNU::test_expr_pack", true)
-// CHECK-NOT: .Case("Pragma::test_expr_pack", true)
-// CHECK: .Case("CXX11::clang::test_expr_pack", true)
-// CHECK-NOT: .Case("Pragma::test_expr_pack", true)
-// CHECK: .Case("C23::clang::test_expr_pack", true)
-// CHECK-NOT: .Case("Pragma::test_expr_pack", true)
+// CHECK: .Case("test_expr_pack", (Syntax==AttributeCommonInfo::AS_GNU && !ScopeName) ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_CXX11 && ScopeName && ScopeName->getName()=="clang") ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_C23 && ScopeName && ScopeName->getName()=="clang") ? 1 : 0)
// CHECK: #endif // CLANG_ATTR_ACCEPTS_EXPR_PACK
@@ -188,17 +175,12 @@ def TestTypeTwo : InheritableAttr {
}
// CHECK: #if defined(CLANG_ATTR_TYPE_ARG_LIST)
-// CHECK-NOT: .Case("Pragma::test_type", true)
-// CHECK: .Case("GNU::test_type", true)
-// CHECK-NOT: .Case("Pragma::test_type", true)
-// CHECK: .Case("CXX11::clang::test_type", true)
-// CHECK-NOT: .Case("Pragma::test_type", true)
-// CHECK: .Case("C23::clang::test_type", true)
-// CHECK-NOT: .Case("Pragma::test_type", true)
+// CHECK: .Case("test_type", (Syntax==AttributeCommonInfo::AS_GNU && !ScopeName) ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_CXX11 && ScopeName && ScopeName->getName()=="clang") ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_C23 && ScopeName && ScopeName->getName()=="clang") ? 1 : 0)
// CHECK: #endif // CLANG_ATTR_TYPE_ARG_LIST
-// Test attributeHasStrictIdentifierArgs and
-// attributeHasStrictIdentifierArgAtIndex, one used StrictEnumParameters, the
+// Test attributeHasStrictIdentifierArgs, one used StrictEnumParameters, the
// other does not.
def TestStrictEnumOne : InheritableAttr {
let Spellings = [Clang<"strict_enum">];
@@ -221,12 +203,32 @@ def TestStrictEnumTwo : InheritableAttr {
let Documentation = [Undocumented];
}
-// CHECK: #if defined(CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST)
-// CHECK-NOT: .Case("Pragma::strict_enum", 5ull)
-// CHECK: .Case("GNU::strict_enum", 5ull)
-// CHECK-NOT: .Case("Pragma::strict_enum", 5ull)
-// CHECK: .Case("CXX11::clang::strict_enum", 5ull)
-// CHECK-NOT: .Case("Pragma::strict_enum", 5ull)
-// CHECK: .Case("C23::clang::strict_enum", 5ull)
-// CHECK-NOT: .Case("Pragma::strict_enum", 5ull)
-// CHECK: #endif // CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
+// CHECK: #if defined(CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST)
+// CHECK: .Case("strict_enum", (Syntax==AttributeCommonInfo::AS_GNU && !ScopeName) ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_CXX11 && ScopeName && ScopeName->getName()=="clang") ? 1 :
+// CHECK-SAME: (Syntax==AttributeCommonInfo::AS_C23 && ScopeName && ScopeName->getName()=="clang") ? 1 : 0)
+// CHECK: #endif // CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST
+
+// Test that TargetSpecific attributes work as expected.
+
+def TargSpecX86 : InheritableAttr, TargetSpecificAttr<TargetArch<["x86"]>> {
+ let Spellings = [GCC<"test_targspec">];
+ let Subjects = SubjectList<[Function]>;
+ let Args = [UnsignedArgument<"ua">, DefaultIntArgument<"o", 0>];
+ let ParseKind = "TargSpec";
+ let Documentation = [Undocumented];
+}
+def TargSpecPPC : InheritableAttr, TargetSpecificAttr<TargetArch<["ppc"]>> {
+ let Spellings = [GCC<"test_targspec">];
+ let Subjects = SubjectList<[Function]>;
+ let Args = [StringArgument<"str">, VariadicExprArgument<"args">];
+ let ParseKind = "TargSpec";
+ let Documentation = [Undocumented];
+}
+def TargSpecARM : InheritableAttr, TargetSpecificAttr<TargetArch<["arm"]>> {
+ let Spellings = [GCC<"test_targspec">];
+ let Subjects = SubjectList<[Function]>;
+ let Args = [IdentifierArgument<"id">, VariadicStringArgument<"sargs">];
+ let ParseKind = "TargSpec";
+ let Documentation = [Undocumented];
+}
diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index 2bd7501..92a3361 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -110,15 +110,6 @@ tools = [
if config.clang_examples:
config.available_features.add("examples")
-if config.llvm_examples:
- config.available_features.add("llvm-examples")
-
-if config.llvm_linked_bye_extension:
- config.substitutions.append(("%offload-opt-loadbye", ""))
-else:
- loadbye = f"-load-pass-plugin={config.llvm_shlib_dir}/Bye{config.llvm_shlib_ext}"
- config.substitutions.append(("%offload-opt-loadbye", f"--offload-opt={loadbye}"))
-
def have_host_jit_feature_support(feature_name):
clang_repl_exe = lit.util.which("clang-repl", config.clang_tools_dir)
@@ -223,9 +214,6 @@ config.substitutions.append(("%host_cxx", config.host_cxx))
if config.has_plugins and config.llvm_plugin_ext:
config.available_features.add("plugins")
-if config.llvm_has_plugins and config.llvm_plugin_ext:
- config.available_features.add("llvm-plugins")
-
if config.clang_default_pie_on_linux:
config.available_features.add("default-pie-on-linux")
diff --git a/clang/test/lit.site.cfg.py.in b/clang/test/lit.site.cfg.py.in
index 2cc70e5..1cbd876 100644
--- a/clang/test/lit.site.cfg.py.in
+++ b/clang/test/lit.site.cfg.py.in
@@ -7,7 +7,6 @@ config.llvm_obj_root = path(r"@LLVM_BINARY_DIR@")
config.llvm_tools_dir = lit_config.substitute(path(r"@LLVM_TOOLS_DIR@"))
config.llvm_libs_dir = lit_config.substitute(path(r"@LLVM_LIBS_DIR@"))
config.llvm_shlib_dir = lit_config.substitute(path(r"@SHLIBDIR@"))
-config.llvm_shlib_ext = "@SHLIBEXT@"
config.llvm_plugin_ext = "@LLVM_PLUGIN_EXT@"
config.lit_tools_dir = path(r"@LLVM_LIT_TOOLS_DIR@")
config.errc_messages = "@LLVM_LIT_ERRC_MESSAGES@"
@@ -40,10 +39,7 @@ config.python_executable = "@Python3_EXECUTABLE@"
config.use_z3_solver = lit_config.params.get('USE_Z3_SOLVER', "@USE_Z3_SOLVER@")
config.has_plugins = @CLANG_PLUGIN_SUPPORT@
config.clang_vendor_uti = "@CLANG_VENDOR_UTI@"
-config.llvm_examples = @LLVM_BUILD_EXAMPLES@
-config.llvm_linked_bye_extension = @LLVM_BYE_LINK_INTO_TOOLS@
config.llvm_external_lit = path(r"@LLVM_EXTERNAL_LIT@")
-config.llvm_has_plugins = @LLVM_ENABLE_PLUGINS@
config.standalone_build = @CLANG_BUILT_STANDALONE@
config.ppc_linux_default_ieeelongdouble = @PPC_LINUX_DEFAULT_IEEELONGDOUBLE@
config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@
diff --git a/clang/tools/clang-linker-wrapper/CMakeLists.txt b/clang/tools/clang-linker-wrapper/CMakeLists.txt
index bf37d80..4a16c3c 100644
--- a/clang/tools/clang-linker-wrapper/CMakeLists.txt
+++ b/clang/tools/clang-linker-wrapper/CMakeLists.txt
@@ -31,6 +31,7 @@ add_clang_tool(clang-linker-wrapper
DEPENDS
${tablegen_deps}
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
set(CLANG_LINKER_WRAPPER_LIB_DEPS
@@ -41,5 +42,3 @@ target_link_libraries(clang-linker-wrapper
PRIVATE
${CLANG_LINKER_WRAPPER_LIB_DEPS}
)
-
-export_executable_symbols_for_plugins(clang-linker-wrapper)
diff --git a/clang/tools/clang-repl/CMakeLists.txt b/clang/tools/clang-repl/CMakeLists.txt
index a35ff13..52b740b 100644
--- a/clang/tools/clang-repl/CMakeLists.txt
+++ b/clang/tools/clang-repl/CMakeLists.txt
@@ -9,6 +9,8 @@ set( LLVM_LINK_COMPONENTS
add_clang_tool(clang-repl
ClangRepl.cpp
+
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
if(MSVC)
@@ -61,8 +63,6 @@ clang_target_link_libraries(clang-repl PRIVATE
clangInterpreter
)
-export_executable_symbols_for_plugins(clang-repl)
-
# The clang-repl binary can get huge with static linking in debug mode.
# Some 32-bit targets use PLT slots with limited branch range by default and we
# start to exceed this limit, e.g. when linking for arm-linux-gnueabihf with
diff --git a/clang/tools/driver/CMakeLists.txt b/clang/tools/driver/CMakeLists.txt
index 018605c2..805dffb 100644
--- a/clang/tools/driver/CMakeLists.txt
+++ b/clang/tools/driver/CMakeLists.txt
@@ -21,6 +21,7 @@ set( LLVM_LINK_COMPONENTS
# Support plugins.
if(CLANG_PLUGIN_SUPPORT)
set(support_plugins SUPPORT_PLUGINS)
+ set(export_symbols EXPORT_SYMBOLS_FOR_PLUGINS)
endif()
add_clang_tool(clang
@@ -35,6 +36,7 @@ add_clang_tool(clang
ARMTargetParserTableGen
AArch64TargetParserTableGen
${support_plugins}
+ ${export_symbols}
GENERATE_DRIVER
)
@@ -54,11 +56,6 @@ else()
set_target_properties(clang PROPERTIES VERSION ${CLANG_EXECUTABLE_VERSION})
endif()
-# Support plugins.
-if(CLANG_PLUGIN_SUPPORT)
- export_executable_symbols_for_plugins(clang)
-endif()
-
add_dependencies(clang clang-resource-headers)
if(NOT CLANG_LINKS_TO_CREATE)
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 52d0bd9..48b34e0 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -2516,8 +2516,8 @@ void OMPClauseEnqueue::VisitOMPDeviceClause(const OMPDeviceClause *C) {
}
void OMPClauseEnqueue::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
+ VisitOMPClauseList(C);
VisitOMPClauseWithPreInit(C);
- Visitor->AddStmt(C->getNumTeams());
}
void OMPClauseEnqueue::VisitOMPThreadLimitClause(
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index 57242ff..cc87e83 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -6738,6 +6738,23 @@ TEST_P(ASTImporterOptionSpecificTestBase,
EXPECT_TRUE(ToLambda);
}
+TEST_P(ASTImporterOptionSpecificTestBase,
+ ReturnTypeDeclaredInsideOfCXX11LambdaWithTrailingReturn) {
+ Decl *From, *To;
+ std::tie(From, To) = getImportedDecl(
+ R"(
+ void foo() {
+ (void) [] {
+ struct X {};
+ return X();
+ };
+ }
+ )",
+ Lang_CXX11, "", Lang_CXX11, "foo"); // c++11 only
+ auto *ToLambda = FirstDeclMatcher<LambdaExpr>().match(To, lambdaExpr());
+ EXPECT_TRUE(ToLambda);
+}
+
TEST_P(ASTImporterOptionSpecificTestBase, LambdaInFunctionParam) {
Decl *FromTU = getTuDecl(
R"(
@@ -7581,6 +7598,25 @@ TEST_P(ImportAutoFunctions, ReturnWithSubstNonTypeTemplateParmExpr) {
EXPECT_TRUE(ToBar);
}
+TEST_P(ImportAutoFunctions, ReturnWithUnaryTransformType) {
+ const char *Code =
+ R"(
+ enum E { E1 };
+
+ template<typename T>
+ auto foo(T v) { return static_cast<__underlying_type(T)>(v); }
+
+ bool bar() { return foo(E1); }
+ )";
+ Decl *FromTU = getTuDecl(Code, Lang_CXX17);
+
+ auto *FromBar = FirstDeclMatcher<FunctionDecl>().match(
+ FromTU, functionDecl(hasName("bar")));
+
+ auto *ToBar = Import(FromBar, Lang_CXX17);
+ EXPECT_TRUE(ToBar);
+}
+
struct ImportSourceLocations : ASTImporterOptionSpecificTestBase {};
TEST_P(ImportSourceLocations, PreserveFileIDTreeStructure) {
diff --git a/clang/unittests/Interpreter/CMakeLists.txt b/clang/unittests/Interpreter/CMakeLists.txt
index c0fd2d8..ec6f81e 100644
--- a/clang/unittests/Interpreter/CMakeLists.txt
+++ b/clang/unittests/Interpreter/CMakeLists.txt
@@ -13,6 +13,8 @@ add_clang_unittest(ClangReplInterpreterTests
InterpreterTest.cpp
InterpreterExtensionsTest.cpp
CodeCompletionTest.cpp
+
+ EXPORT_SYMBOLS
)
target_link_libraries(ClangReplInterpreterTests PUBLIC
clangAST
@@ -28,8 +30,6 @@ if(NOT WIN32)
add_subdirectory(ExceptionTests)
endif()
-export_executable_symbols(ClangReplInterpreterTests)
-
if(MSVC)
set_target_properties(ClangReplInterpreterTests PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS 1)
diff --git a/clang/unittests/Interpreter/ExceptionTests/CMakeLists.txt b/clang/unittests/Interpreter/ExceptionTests/CMakeLists.txt
index 5a6597d..24ae9cd 100644
--- a/clang/unittests/Interpreter/ExceptionTests/CMakeLists.txt
+++ b/clang/unittests/Interpreter/ExceptionTests/CMakeLists.txt
@@ -12,6 +12,8 @@ set(LLVM_LINK_COMPONENTS
add_clang_unittest(ClangReplInterpreterExceptionTests
InterpreterExceptionTest.cpp
+
+ EXPORT_SYMBOLS
)
llvm_update_compile_flags(ClangReplInterpreterExceptionTests)
@@ -22,5 +24,3 @@ target_link_libraries(ClangReplInterpreterExceptionTests PUBLIC
clangFrontend
)
add_dependencies(ClangReplInterpreterExceptionTests clang-resource-headers)
-
-export_executable_symbols(ClangReplInterpreterExceptionTests)
diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp
index f504b1d..adbe6af 100644
--- a/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -76,8 +76,27 @@ public:
const Record &getSpellingRecord() const { return OriginalSpelling; }
};
+struct FlattenedSpellingInfo {
+ FlattenedSpellingInfo(std::string Syntax, std::string Scope,
+ std::string TargetTest, uint32_t ArgMask)
+ : Syntax(Syntax), Scope(Scope), TargetTest(TargetTest), ArgMask(ArgMask) {
+ }
+ std::string Syntax;
+ std::string Scope;
+ std::string TargetTest;
+ uint32_t ArgMask;
+};
+using FSIVecTy = std::vector<FlattenedSpellingInfo>;
+
} // end anonymous namespace
+static bool GenerateTargetSpecificAttrChecks(const Record *R,
+ std::vector<StringRef> &Arches,
+ std::string &Test,
+ std::string *FnName);
+static bool isStringLiteralArgument(const Record *Arg);
+static bool isVariadicStringLiteralArgument(const Record *Arg);
+
static std::vector<FlattenedSpelling>
GetFlattenedSpellings(const Record &Attr) {
std::vector<Record *> Spellings = Attr.getValueAsListOfDefs("Spellings");
@@ -2379,6 +2398,112 @@ template <typename Fn> static void forEachSpelling(const Record &Attr, Fn &&F) {
}
}
+std::map<std::string, std::vector<const Record *>> NameToAttrsMap;
+
+/// Build a map from the attribute name to the Attrs that use that name. If more
+/// than one Attr use a name, the arguments could be different so a more complex
+/// check is needed in the generated switch.
+void generateNameToAttrsMap(RecordKeeper &Records) {
+ std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ for (const auto *A : Attrs) {
+ std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*A);
+ for (const auto &S : Spellings) {
+ auto It = NameToAttrsMap.find(S.name());
+ if (It != NameToAttrsMap.end()) {
+ if (llvm::none_of(It->second, [&](const Record *R) { return R == A; }))
+ It->second.emplace_back(A);
+ } else {
+ std::vector<const Record *> V;
+ V.emplace_back(A);
+ NameToAttrsMap.insert(std::make_pair(S.name(), V));
+ }
+ }
+ }
+}
+
+/// Generate the info needed to produce the case values in case more than one
+/// attribute has the same name. Store the info in a map that can be processed
+/// after all attributes are seen.
+static void generateFlattenedSpellingInfo(const Record &Attr,
+ std::map<std::string, FSIVecTy> &Map,
+ uint32_t ArgMask = 0) {
+ std::string TargetTest;
+ if (Attr.isSubClassOf("TargetSpecificAttr") &&
+ !Attr.isValueUnset("ParseKind")) {
+ const Record *T = Attr.getValueAsDef("Target");
+ std::vector<StringRef> Arches = T->getValueAsListOfStrings("Arches");
+ (void)GenerateTargetSpecificAttrChecks(T, Arches, TargetTest, nullptr);
+ }
+
+ forEachSpelling(Attr, [&](const FlattenedSpelling &S) {
+ auto It = Map.find(S.name());
+ if (It != Map.end()) {
+ It->second.emplace_back(S.variety(), S.nameSpace(), TargetTest, ArgMask);
+ } else {
+ FSIVecTy V;
+ V.emplace_back(S.variety(), S.nameSpace(), TargetTest, ArgMask);
+ Map.insert(std::make_pair(S.name(), V));
+ }
+ });
+}
+
+static bool nameAppliesToOneAttribute(std::string Name) {
+ auto It = NameToAttrsMap.find(Name);
+ assert(It != NameToAttrsMap.end());
+ return It->second.size() == 1;
+}
+
+static bool emitIfSimpleValue(std::string Name, uint32_t ArgMask,
+ raw_ostream &OS) {
+ if (nameAppliesToOneAttribute(Name)) {
+ OS << ".Case(\"" << Name << "\", ";
+ if (ArgMask != 0)
+ OS << ArgMask << ")\n";
+ else
+ OS << "true)\n";
+ return true;
+ }
+ return false;
+}
+
+static void emitSingleCondition(const FlattenedSpellingInfo &FSI,
+ raw_ostream &OS) {
+ OS << "(Syntax==AttributeCommonInfo::AS_" << FSI.Syntax << " && ";
+ if (!FSI.Scope.empty())
+ OS << "ScopeName && ScopeName->getName()==\"" << FSI.Scope << "\"";
+ else
+ OS << "!ScopeName";
+ if (!FSI.TargetTest.empty())
+ OS << " && " << FSI.TargetTest;
+ OS << ")";
+}
+
+static void emitStringSwitchCases(std::map<std::string, FSIVecTy> &Map,
+ raw_ostream &OS) {
+ for (const auto &P : Map) {
+ if (emitIfSimpleValue(P.first, P.second[0].ArgMask, OS))
+ continue;
+
+ // Not simple, build expressions for each case.
+ StringRef Name = P.first;
+ const FSIVecTy &Vec = P.second;
+ OS << ".Case(\"" << Name << "\", ";
+ for (unsigned I = 0, E = Vec.size(); I < E; ++I) {
+ emitSingleCondition(Vec[I], OS);
+ uint32_t ArgMask = Vec[I].ArgMask;
+ if (E == 1 && ArgMask == 0)
+ continue;
+
+ // More than one or it's the Mask form. Create a conditional expression.
+ uint32_t SuccessValue = ArgMask != 0 ? ArgMask : 1;
+ OS << " ? " << SuccessValue << " : ";
+ if (I == E - 1)
+ OS << 0;
+ }
+ OS << ")\n";
+ }
+}
+
static bool isTypeArgument(const Record *Arg) {
return !Arg->getSuperClasses().empty() &&
Arg->getSuperClasses().back().first->getName() == "TypeArgument";
@@ -2387,6 +2512,7 @@ static bool isTypeArgument(const Record *Arg) {
/// Emits the first-argument-is-type property for attributes.
static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_TYPE_ARG_LIST)\n";
+ std::map<std::string, FSIVecTy> FSIMap;
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
for (const auto *Attr : Attrs) {
@@ -2397,15 +2523,9 @@ static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) {
if (!isTypeArgument(Args[0]))
continue;
-
- // All these spellings take a single type argument.
- forEachSpelling(*Attr, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", true)\n";
- });
+ generateFlattenedSpellingInfo(*Attr, FSIMap);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_TYPE_ARG_LIST\n\n";
}
@@ -2413,21 +2533,16 @@ static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) {
/// attributes.
static void emitClangAttrArgContextList(RecordKeeper &Records, raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_ARG_CONTEXT_LIST)\n";
+ std::map<std::string, FSIVecTy> FSIMap;
ParsedAttrMap Attrs = getParsedAttrList(Records);
for (const auto &I : Attrs) {
const Record &Attr = *I.second;
if (!Attr.getValueAsBit("ParseArgumentsAsUnevaluated"))
continue;
-
- // All these spellings take are parsed unevaluated.
- forEachSpelling(Attr, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", true)\n";
- });
+ generateFlattenedSpellingInfo(Attr, FSIMap);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_ARG_CONTEXT_LIST\n\n";
}
@@ -2479,28 +2594,18 @@ static void emitClangAttrVariadicIdentifierArgList(RecordKeeper &Records,
raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST)\n";
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::map<std::string, FSIVecTy> FSIMap;
for (const auto *A : Attrs) {
// Determine whether the first argument is a variadic identifier.
std::vector<Record *> Args = A->getValueAsListOfDefs("Args");
if (Args.empty() || !isVariadicIdentifierArgument(Args[0]))
continue;
-
- // All these spellings take an identifier argument.
- forEachSpelling(*A, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", true)\n";
- });
+ generateFlattenedSpellingInfo(*A, FSIMap);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST\n\n";
}
-static bool GenerateTargetSpecificAttrChecks(const Record *R,
- std::vector<StringRef> &Arches,
- std::string &Test,
- std::string *FnName);
-
// Emits the list of arguments that should be parsed as unevaluated string
// literals for each attribute.
static void emitClangAttrUnevaluatedStringLiteralList(RecordKeeper &Records,
@@ -2521,48 +2626,16 @@ static void emitClangAttrUnevaluatedStringLiteralList(RecordKeeper &Records,
return Bits;
};
- auto AddMaskWithTargetCheck = [](const Record *Attr, uint32_t Mask,
- std::string &MaskStr) {
- const Record *T = Attr->getValueAsDef("Target");
- std::vector<StringRef> Arches = T->getValueAsListOfStrings("Arches");
- std::string Test;
- GenerateTargetSpecificAttrChecks(T, Arches, Test, nullptr);
- MaskStr.append(Test + " ? " + std::to_string(Mask) + " : ");
- };
-
- ParsedAttrMap Dupes;
- ParsedAttrMap Attrs = getParsedAttrList(Records, &Dupes, /*SemaOnly=*/false);
- for (const auto &[AttrName, Attr] : Attrs) {
- std::string MaskStr;
- if (Attr->isSubClassOf("TargetSpecificAttr") &&
- !Attr->isValueUnset("ParseKind")) {
- if (uint32_t Mask = MakeMask(Attr->getValueAsListOfDefs("Args")))
- AddMaskWithTargetCheck(Attr, Mask, MaskStr);
- StringRef ParseKind = Attr->getValueAsString("ParseKind");
- for (const auto &[DupeParseKind, DupAttr] : Dupes) {
- if (DupeParseKind != ParseKind)
- continue;
- if (uint32_t Mask = MakeMask(DupAttr->getValueAsListOfDefs("Args")))
- AddMaskWithTargetCheck(DupAttr, Mask, MaskStr);
- }
- if (!MaskStr.empty())
- MaskStr.append("0");
- } else {
- if (uint32_t Mask = MakeMask(Attr->getValueAsListOfDefs("Args")))
- MaskStr = std::to_string(Mask);
- }
-
- if (MaskStr.empty())
+ std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::map<std::string, FSIVecTy> FSIMap;
+ for (const auto *Attr : Attrs) {
+ // Determine whether there are any string arguments.
+ uint32_t ArgMask = MakeMask(Attr->getValueAsListOfDefs("Args"));
+ if (!ArgMask)
continue;
-
- // All these spellings have at least one string literal has argument.
- forEachSpelling(*Attr, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", " << MaskStr << ")\n";
- });
+ generateFlattenedSpellingInfo(*Attr, FSIMap, ArgMask);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_STRING_LITERAL_ARG_LIST\n\n";
}
@@ -2571,49 +2644,36 @@ static void emitClangAttrIdentifierArgList(RecordKeeper &Records, raw_ostream &O
OS << "#if defined(CLANG_ATTR_IDENTIFIER_ARG_LIST)\n";
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::map<std::string, FSIVecTy> FSIMap;
for (const auto *Attr : Attrs) {
// Determine whether the first argument is an identifier.
std::vector<Record *> Args = Attr->getValueAsListOfDefs("Args");
if (Args.empty() || !isIdentifierArgument(Args[0]))
continue;
-
- // All these spellings take an identifier argument.
- forEachSpelling(*Attr, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", true)\n";
- });
+ generateFlattenedSpellingInfo(*Attr, FSIMap);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_IDENTIFIER_ARG_LIST\n\n";
}
-// Emits the indexed-argument-is-identifier property for attributes.
-static void emitClangAttrStrictIdentifierArgAtIndexList(RecordKeeper &Records,
- raw_ostream &OS) {
- OS << "#if defined(CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST)\n";
+// Emits the list for attributes having StrictEnumParameters.
+static void emitClangAttrStrictIdentifierArgList(RecordKeeper &Records,
+ raw_ostream &OS) {
+ OS << "#if defined(CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST)\n";
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::map<std::string, FSIVecTy> FSIMap;
for (const auto *Attr : Attrs) {
if (!Attr->getValueAsBit("StrictEnumParameters"))
continue;
- // Determine whether each argument is an identifier.
+ // Check that there is really an identifier argument.
std::vector<Record *> Args = Attr->getValueAsListOfDefs("Args");
- uint64_t enumAtIndex = 0;
- for (size_t I = 0; I < Args.size(); I++)
- enumAtIndex |= ((uint64_t)isIdentifierArgument(Args[I])) << I;
- if (!enumAtIndex)
+ if (llvm::none_of(Args, [&](Record *R) { return isIdentifierArgument(R); }))
continue;
-
- // All these spellings take an identifier argument.
- forEachSpelling(*Attr, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", " << enumAtIndex << "ull)\n";
- });
+ generateFlattenedSpellingInfo(*Attr, FSIMap);
}
- OS << "#endif // CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST\n\n";
+ emitStringSwitchCases(FSIMap, OS);
+ OS << "#endif // CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST\n\n";
}
static bool keywordThisIsaIdentifierInArgument(const Record *Arg) {
@@ -2628,20 +2688,15 @@ static void emitClangAttrThisIsaIdentifierArgList(RecordKeeper &Records,
raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST)\n";
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::map<std::string, FSIVecTy> FSIMap;
for (const auto *A : Attrs) {
// Determine whether the first argument is a variadic identifier.
std::vector<Record *> Args = A->getValueAsListOfDefs("Args");
if (Args.empty() || !keywordThisIsaIdentifierInArgument(Args[0]))
continue;
-
- // All these spellings take an identifier argument.
- forEachSpelling(*A, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", true)\n";
- });
+ generateFlattenedSpellingInfo(*A, FSIMap);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST\n\n";
}
@@ -2649,19 +2704,15 @@ static void emitClangAttrAcceptsExprPack(RecordKeeper &Records,
raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_ACCEPTS_EXPR_PACK)\n";
ParsedAttrMap Attrs = getParsedAttrList(Records);
+ std::map<std::string, FSIVecTy> FSIMap;
for (const auto &I : Attrs) {
const Record &Attr = *I.second;
if (!Attr.getValueAsBit("AcceptsExprPack"))
continue;
-
- forEachSpelling(Attr, [&](const FlattenedSpelling &S) {
- OS << ".Case(\"" << S.variety();
- if (S.nameSpace().length())
- OS << "::" << S.nameSpace();
- OS << "::" << S.name() << "\", true)\n";
- });
+ generateFlattenedSpellingInfo(Attr, FSIMap);
}
+ emitStringSwitchCases(FSIMap, OS);
OS << "#endif // CLANG_ATTR_ACCEPTS_EXPR_PACK\n\n";
}
@@ -4991,6 +5042,7 @@ void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) {
}
void EmitClangAttrParserStringSwitches(RecordKeeper &Records, raw_ostream &OS) {
+ generateNameToAttrsMap(Records);
emitSourceFileHeader("Parser-related llvm::StringSwitch cases", OS, Records);
emitClangAttrArgContextList(Records, OS);
emitClangAttrIdentifierArgList(Records, OS);
@@ -5001,7 +5053,7 @@ void EmitClangAttrParserStringSwitches(RecordKeeper &Records, raw_ostream &OS) {
emitClangAttrTypeArgList(Records, OS);
emitClangAttrLateParsedList(Records, OS);
emitClangAttrLateParsedExperimentalList(Records, OS);
- emitClangAttrStrictIdentifierArgAtIndexList(Records, OS);
+ emitClangAttrStrictIdentifierArgList(Records, OS);
}
void EmitClangAttrSubjectMatchRulesParserStringSwitches(RecordKeeper &Records,
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 7f3cb70..ef7159fa 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -670,6 +670,7 @@ void RVVEmitter::createRVVIntrinsics(
.Case("Zvksh", RVV_REQ_Zvksh)
.Case("Zvfbfwma", RVV_REQ_Zvfbfwma)
.Case("Zvfbfmin", RVV_REQ_Zvfbfmin)
+ .Case("Zvfh", RVV_REQ_Zvfh)
.Case("Experimental", RVV_REQ_Experimental)
.Default(RVV_REQ_None);
assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");
diff --git a/compiler-rt/lib/asan/asan_errors.cpp b/compiler-rt/lib/asan/asan_errors.cpp
index 26eabf2..6f2fd28 100644
--- a/compiler-rt/lib/asan/asan_errors.cpp
+++ b/compiler-rt/lib/asan/asan_errors.cpp
@@ -327,7 +327,6 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
" old_mid : %p\n"
" new_mid : %p\n",
(void *)beg, (void *)end, (void *)old_mid, (void *)new_mid);
- uptr granularity = ASAN_SHADOW_GRANULARITY;
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
@@ -345,7 +344,6 @@ void ErrorBadParamsToAnnotateDoubleEndedContiguousContainer::Print() {
(void *)storage_beg, (void *)storage_end, (void *)old_container_beg,
(void *)old_container_end, (void *)new_container_beg,
(void *)new_container_end);
- uptr granularity = ASAN_SHADOW_GRANULARITY;
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
diff --git a/compiler-rt/lib/builtins/crtbegin.c b/compiler-rt/lib/builtins/crtbegin.c
index a0860ca..d5f7756 100644
--- a/compiler-rt/lib/builtins/crtbegin.c
+++ b/compiler-rt/lib/builtins/crtbegin.c
@@ -8,6 +8,14 @@
#include <stddef.h>
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#if __has_feature(ptrauth_init_fini)
+#include <ptrauth.h>
+#endif
+
__attribute__((visibility("hidden"))) void *__dso_handle = &__dso_handle;
#ifdef EH_USE_FRAME_REGISTRY
@@ -46,8 +54,22 @@ static void __attribute__((used)) __do_init(void) {
}
#ifdef CRT_HAS_INITFINI_ARRAY
+#if __has_feature(ptrauth_init_fini)
+// TODO: use __ptrauth-qualified pointers when they are supported on clang side
+#if __has_feature(ptrauth_init_fini_address_discrimination)
+__attribute__((section(".init_array"), used)) static void *__init =
+ ptrauth_sign_constant(&__do_init, ptrauth_key_init_fini_pointer,
+ ptrauth_blend_discriminator(
+ &__init, __ptrauth_init_fini_discriminator));
+#else
+__attribute__((section(".init_array"), used)) static void *__init =
+ ptrauth_sign_constant(&__do_init, ptrauth_key_init_fini_pointer,
+ __ptrauth_init_fini_discriminator);
+#endif
+#else
__attribute__((section(".init_array"),
used)) static void (*__init)(void) = __do_init;
+#endif
#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
"call __do_init\n\t"
@@ -103,8 +125,22 @@ static void __attribute__((used)) __do_fini(void) {
}
#ifdef CRT_HAS_INITFINI_ARRAY
+#if __has_feature(ptrauth_init_fini)
+// TODO: use __ptrauth-qualified pointers when they are supported on clang side
+#if __has_feature(ptrauth_init_fini_address_discrimination)
+__attribute__((section(".fini_array"), used)) static void *__fini =
+ ptrauth_sign_constant(&__do_fini, ptrauth_key_init_fini_pointer,
+ ptrauth_blend_discriminator(
+ &__fini, __ptrauth_init_fini_discriminator));
+#else
+__attribute__((section(".fini_array"), used)) static void *__fini =
+ ptrauth_sign_constant(&__do_fini, ptrauth_key_init_fini_pointer,
+ __ptrauth_init_fini_discriminator);
+#endif
+#else
__attribute__((section(".fini_array"),
used)) static void (*__fini)(void) = __do_fini;
+#endif
#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
"call __do_fini\n\t"
diff --git a/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/CMakeLists.txt
index 8fc245e..dc700ce 100644
--- a/compiler-rt/lib/scudo/standalone/CMakeLists.txt
+++ b/compiler-rt/lib/scudo/standalone/CMakeLists.txt
@@ -246,7 +246,6 @@ if(COMPILER_RT_SCUDO_STANDALONE_BUILD_SHARED)
PARENT_TARGET scudo_standalone)
endif()
-add_subdirectory(benchmarks)
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
endif()
diff --git a/compiler-rt/lib/scudo/standalone/benchmarks/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/benchmarks/CMakeLists.txt
deleted file mode 100644
index 26d023c..0000000
--- a/compiler-rt/lib/scudo/standalone/benchmarks/CMakeLists.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-# To build these benchmarks, build the target "ScudoBenchmarks.$ARCH", where
-# $ARCH is the name of the target architecture. For example,
-# ScudoBenchmarks.x86_64 for 64-bit x86. The benchmark executable is then
-# available under projects/compiler-rt/lib/scudo/standalone/benchmarks/ in the
-# build directory.
-
-include(AddLLVM)
-
-set(SCUDO_BENCHMARK_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone)
-if(ANDROID)
- list(APPEND SCUDO_BENCHMARK_CFLAGS -fno-emulated-tls)
-endif()
-string(REPLACE ";" " " SCUDO_BENCHMARK_CFLAGS " ${SCUDO_BENCHMARK_CFLAGS}")
-
-foreach(arch ${SCUDO_STANDALONE_SUPPORTED_ARCH})
- add_benchmark(ScudoBenchmarks.${arch}
- malloc_benchmark.cpp
- $<TARGET_OBJECTS:RTScudoStandalone.${arch}>)
- set_property(TARGET ScudoBenchmarks.${arch} APPEND_STRING PROPERTY
- COMPILE_FLAGS "${SCUDO_BENCHMARK_CFLAGS}")
-
- if (COMPILER_RT_HAS_GWP_ASAN)
- add_benchmark(
- ScudoBenchmarksWithGwpAsan.${arch} malloc_benchmark.cpp
- $<TARGET_OBJECTS:RTScudoStandalone.${arch}>
- $<TARGET_OBJECTS:RTGwpAsan.${arch}>
- $<TARGET_OBJECTS:RTGwpAsanBacktraceLibc.${arch}>
- $<TARGET_OBJECTS:RTGwpAsanSegvHandler.${arch}>)
- set_property(
- TARGET ScudoBenchmarksWithGwpAsan.${arch} APPEND_STRING PROPERTY
- COMPILE_FLAGS "${SCUDO_BENCHMARK_CFLAGS} -DGWP_ASAN_HOOKS")
- endif()
-endforeach()
diff --git a/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp b/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp
deleted file mode 100644
index 4fb05b7..0000000
--- a/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-//===-- malloc_benchmark.cpp ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "allocator_config.h"
-#include "combined.h"
-#include "common.h"
-
-#include "benchmark/benchmark.h"
-
-#include <memory>
-#include <vector>
-
-void *CurrentAllocator;
-template <typename Config> void PostInitCallback() {
- reinterpret_cast<scudo::Allocator<Config> *>(CurrentAllocator)->initGwpAsan();
-}
-
-template <typename Config> static void BM_malloc_free(benchmark::State &State) {
- using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
- auto Deleter = [](AllocatorT *A) {
- A->unmapTestOnly();
- delete A;
- };
- std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
- Deleter);
- CurrentAllocator = Allocator.get();
-
- const size_t NBytes = State.range(0);
- size_t PageSize = scudo::getPageSizeCached();
-
- for (auto _ : State) {
- void *Ptr = Allocator->allocate(NBytes, scudo::Chunk::Origin::Malloc);
- auto *Data = reinterpret_cast<uint8_t *>(Ptr);
- for (size_t I = 0; I < NBytes; I += PageSize)
- Data[I] = 1;
- benchmark::DoNotOptimize(Ptr);
- Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
- }
-
- State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NBytes));
-}
-
-static const size_t MinSize = 8;
-static const size_t MaxSize = 128 * 1024;
-
-// FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
-// cleanly.
-BENCHMARK_TEMPLATE(BM_malloc_free, scudo::AndroidConfig)
- ->Range(MinSize, MaxSize);
-#if SCUDO_CAN_USE_PRIMARY64
-BENCHMARK_TEMPLATE(BM_malloc_free, scudo::FuchsiaConfig)
- ->Range(MinSize, MaxSize);
-#endif
-
-template <typename Config>
-static void BM_malloc_free_loop(benchmark::State &State) {
- using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
- auto Deleter = [](AllocatorT *A) {
- A->unmapTestOnly();
- delete A;
- };
- std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
- Deleter);
- CurrentAllocator = Allocator.get();
-
- const size_t NumIters = State.range(0);
- size_t PageSize = scudo::getPageSizeCached();
- std::vector<void *> Ptrs(NumIters);
-
- for (auto _ : State) {
- size_t SizeLog2 = 0;
- for (void *&Ptr : Ptrs) {
- Ptr = Allocator->allocate(1 << SizeLog2, scudo::Chunk::Origin::Malloc);
- auto *Data = reinterpret_cast<uint8_t *>(Ptr);
- for (size_t I = 0; I < 1 << SizeLog2; I += PageSize)
- Data[I] = 1;
- benchmark::DoNotOptimize(Ptr);
- SizeLog2 = (SizeLog2 + 1) % 16;
- }
- for (void *&Ptr : Ptrs)
- Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
- }
-
- State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NumIters) *
- 8192);
-}
-
-static const size_t MinIters = 8;
-static const size_t MaxIters = 32 * 1024;
-
-// FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
-// cleanly.
-BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::AndroidConfig)
- ->Range(MinIters, MaxIters);
-#if SCUDO_CAN_USE_PRIMARY64
-BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::FuchsiaConfig)
- ->Range(MinIters, MaxIters);
-#endif
-
-BENCHMARK_MAIN();
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index fcf6565..9c26282e 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -1706,14 +1706,12 @@ private:
return;
// N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
// is very important.
- RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
- RB->RawStackDepotMap.getCapacity());
+ RB->RawStackDepotMap.unmap();
// Note that the `RB->RawRingBufferMap` is stored on the pages managed by
// itself. Take over the ownership before calling unmap() so that any
// operation along with unmap() won't touch inaccessible pages.
MemMapT RawRingBufferMap = RB->RawRingBufferMap;
- RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
- RawRingBufferMap.getCapacity());
+ RawRingBufferMap.unmap();
atomic_store(&RingBufferAddress, 0, memory_order_release);
}
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_base.h b/compiler-rt/lib/scudo/standalone/mem_map_base.h
index 99ab0cb..dbf4ec3 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_base.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map_base.h
@@ -35,6 +35,8 @@ public:
DCHECK((Addr == getBase()) || (Addr + Size == getBase() + getCapacity()));
invokeImpl(&Derived::unmapImpl, Addr, Size);
}
+ // A default implementation to unmap all pages.
+ void unmap() { unmap(getBase(), getCapacity()); }
// This is used to remap a mapped range (either from map() or dispatched from
// ReservedMemory). For example, we have reserved several pages and then we
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 8a583ba..8436f33 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -160,7 +160,7 @@ public:
ScopedLock ML(Region->MMLock);
MemMapT MemMap = Region->MemMapInfo.MemMap;
if (MemMap.isAllocated())
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ MemMap.unmap();
}
*Region = {};
}
diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index b6f76a4..69f926e 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -158,7 +158,7 @@ public:
DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
} else {
- Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
+ Buf.MemMap.unmap();
}
}
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index d850574..a9a7c2c8 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -65,13 +65,7 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
} // namespace LargeBlock
-static inline void unmap(LargeBlock::Header *H) {
- // Note that the `H->MapMap` is stored on the pages managed by itself. Take
- // over the ownership before unmap() so that any operation along with unmap()
- // won't touch inaccessible pages.
- MemMapT MemMap = H->MemMap;
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
-}
+static inline void unmap(MemMapT &MemMap) { MemMap.unmap(); }
namespace {
@@ -96,12 +90,15 @@ struct CachedBlock {
template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
- bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
- UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
- UNUSED bool *Zeroed) {
- return false;
+ CachedBlock retrieve(UNUSED uptr Size, UNUSED uptr Alignment,
+ UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
+ return {};
+ }
+ void store(UNUSED Options Options, UNUSED uptr CommitBase,
+ UNUSED uptr CommitSize, UNUSED uptr BlockBegin, MemMapT MemMap) {
+ unmap(MemMap);
}
- void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
+
bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}
@@ -239,19 +236,19 @@ public:
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
}
- void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
- if (!canCache(H->CommitSize))
- return unmap(H);
+ void store(const Options &Options, uptr CommitBase, uptr CommitSize,
+ uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
+ DCHECK(canCache(CommitSize));
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
u64 Time;
CachedBlock Entry;
-
- Entry.CommitBase = H->CommitBase;
- Entry.CommitSize = H->CommitSize;
- Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
- Entry.MemMap = H->MemMap;
+ Entry.CommitBase = CommitBase;
+ Entry.CommitSize = CommitSize;
+ Entry.BlockBegin = BlockBegin;
+ Entry.MemMap = MemMap;
Entry.Time = UINT64_MAX;
+
if (useMemoryTagging<Config>(Options)) {
if (Interval == 0 && !SCUDO_FUCHSIA) {
// Release the memory and make it inaccessible at the same time by
@@ -290,7 +287,7 @@ public:
// read Options and when we locked Mutex. We can't insert our entry into
// the quarantine or the cache because the permissions would be wrong so
// just unmap it.
- Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
+ unmap(Entry.MemMap);
break;
}
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -321,7 +318,7 @@ public:
} while (0);
for (MemMapT &EvictMemMap : EvictionMemMaps)
- EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
+ unmap(EvictMemMap);
if (Interval >= 0) {
// TODO: Add ReleaseToOS logic to LRU algorithm
@@ -329,20 +326,20 @@ public:
}
}
- bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
- LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
+ CachedBlock retrieve(uptr Size, uptr Alignment, uptr HeadersSize,
+ uptr &EntryHeaderPos) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
// 10% of the requested size proved to be the optimal choice for
// retrieving cached blocks after testing several options.
constexpr u32 FragmentedBytesDivisor = 10;
bool Found = false;
CachedBlock Entry;
- uptr EntryHeaderPos = 0;
+ EntryHeaderPos = 0;
{
ScopedLock L(Mutex);
CallsToRetrieve++;
if (EntriesCount == 0)
- return false;
+ return {};
u32 OptimalFitIndex = 0;
uptr MinDiff = UINTPTR_MAX;
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
@@ -383,29 +380,8 @@ public:
SuccessfulRetrieves++;
}
}
- if (!Found)
- return false;
- *H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
- *Zeroed = Entry.Time == 0;
- if (useMemoryTagging<Config>(Options))
- Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
- uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
- if (useMemoryTagging<Config>(Options)) {
- if (*Zeroed) {
- storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
- NewBlockBegin);
- } else if (Entry.BlockBegin < NewBlockBegin) {
- storeTags(Entry.BlockBegin, NewBlockBegin);
- } else {
- storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
- }
- }
- (*H)->CommitBase = Entry.CommitBase;
- (*H)->CommitSize = Entry.CommitSize;
- (*H)->MemMap = Entry.MemMap;
- return true;
+ return Entry;
}
bool canCache(uptr Size) {
@@ -444,7 +420,7 @@ public:
for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
if (Quarantine[I].isValid()) {
MemMapT &MemMap = Quarantine[I].MemMap;
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ unmap(MemMap);
Quarantine[I].invalidate();
}
}
@@ -538,7 +514,7 @@ private:
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ unmap(MemMap);
}
}
@@ -605,6 +581,9 @@ public:
void deallocate(const Options &Options, void *Ptr);
+ void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment,
+ uptr *BlockEndPtr, FillContentsMode FillContents);
+
static uptr getBlockEnd(void *Ptr) {
auto *B = LargeBlock::getHeader<Config>(Ptr);
return B->CommitBase + B->CommitSize;
@@ -665,6 +644,60 @@ private:
LocalStats Stats GUARDED_BY(Mutex);
};
+template <typename Config>
+void *
+MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
+ uptr Alignment, uptr *BlockEndPtr,
+ FillContentsMode FillContents) {
+ CachedBlock Entry;
+ uptr EntryHeaderPos;
+
+ Entry = Cache.retrieve(Size, Alignment, getHeadersSize(), EntryHeaderPos);
+ if (!Entry.isValid())
+ return nullptr;
+
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
+ bool Zeroed = Entry.Time == 0;
+ if (useMemoryTagging<Config>(Options)) {
+ uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+ if (Zeroed) {
+ storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+ NewBlockBegin);
+ } else if (Entry.BlockBegin < NewBlockBegin) {
+ storeTags(Entry.BlockBegin, NewBlockBegin);
+ } else {
+ storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
+ }
+ }
+
+ H->CommitBase = Entry.CommitBase;
+ H->CommitSize = Entry.CommitSize;
+ H->MemMap = Entry.MemMap;
+
+ const uptr BlockEnd = H->CommitBase + H->CommitSize;
+ if (BlockEndPtr)
+ *BlockEndPtr = BlockEnd;
+ uptr HInt = reinterpret_cast<uptr>(H);
+ if (allocatorSupportsMemoryTagging<Config>())
+ HInt = untagPointer(HInt);
+ const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
+ void *Ptr = reinterpret_cast<void *>(PtrInt);
+ if (FillContents && !Zeroed)
+ memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
+ BlockEnd - PtrInt);
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.push_back(H);
+ AllocatedBytes += H->CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, H->CommitSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
+ }
+ return Ptr;
+}
// As with the Primary, the size passed to this function includes any desired
// alignment, so that the frontend can align the user allocation. The hint
// parameter allows us to unmap spurious memory when dealing with larger
@@ -690,32 +723,10 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
- LargeBlock::Header *H;
- bool Zeroed;
- if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
- &Zeroed)) {
- const uptr BlockEnd = H->CommitBase + H->CommitSize;
- if (BlockEndPtr)
- *BlockEndPtr = BlockEnd;
- uptr HInt = reinterpret_cast<uptr>(H);
- if (allocatorSupportsMemoryTagging<Config>())
- HInt = untagPointer(HInt);
- const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
- void *Ptr = reinterpret_cast<void *>(PtrInt);
- if (FillContents && !Zeroed)
- memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
- BlockEnd - PtrInt);
- {
- ScopedLock L(Mutex);
- InUseBlocks.push_back(H);
- AllocatedBytes += H->CommitSize;
- FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
- NumberOfAllocs++;
- Stats.add(StatAllocated, H->CommitSize);
- Stats.add(StatMapped, H->MemMap.getCapacity());
- }
+ void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr,
+ FillContents);
+ if (Ptr != nullptr)
return Ptr;
- }
}
uptr RoundedSize =
@@ -740,9 +751,9 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
// In the unlikely event of alignments larger than a page, adjust the amount
// of memory we want to commit, and trim the extra memory.
if (UNLIKELY(Alignment >= PageSize)) {
- // For alignments greater than or equal to a page, the user pointer (eg: the
- // pointer that is returned by the C or C++ allocation APIs) ends up on a
- // page boundary , and our headers will live in the preceding page.
+ // For alignments greater than or equal to a page, the user pointer (eg:
+ // the pointer that is returned by the C or C++ allocation APIs) ends up
+ // on a page boundary , and our headers will live in the preceding page.
CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
@@ -765,7 +776,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
MemMap)) {
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ unmap(MemMap);
return nullptr;
}
const uptr HeaderPos = AllocPos - getHeadersSize();
@@ -807,7 +818,13 @@ void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
Stats.sub(StatAllocated, CommitSize);
Stats.sub(StatMapped, H->MemMap.getCapacity());
}
- Cache.store(Options, H);
+
+ if (Cache.canCache(H->CommitSize)) {
+ Cache.store(Options, H->CommitBase, H->CommitSize,
+ reinterpret_cast<uptr>(H + 1), H->MemMap);
+ } else {
+ unmap(H->MemMap);
+ }
}
template <typename Config>
diff --git a/compiler-rt/lib/scudo/standalone/tests/common_test.cpp b/compiler-rt/lib/scudo/standalone/tests/common_test.cpp
index fff7c66..e6ddbb0 100644
--- a/compiler-rt/lib/scudo/standalone/tests/common_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/common_test.cpp
@@ -50,7 +50,7 @@ TEST(ScudoCommonTest, SKIP_ON_FUCHSIA(ResidentMemorySize)) {
memset(P, 1, Size);
EXPECT_GT(getResidentMemorySize(), OnStart + Size - Threshold);
- MemMap.unmap(MemMap.getBase(), Size);
+ MemMap.unmap();
}
TEST(ScudoCommonTest, Zeros) {
@@ -69,7 +69,7 @@ TEST(ScudoCommonTest, Zeros) {
MemMap.releasePagesToOS(MemMap.getBase(), Size);
EXPECT_EQ(std::count(P, P + N, 0), N);
- MemMap.unmap(MemMap.getBase(), Size);
+ MemMap.unmap();
}
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/tests/map_test.cpp b/compiler-rt/lib/scudo/standalone/tests/map_test.cpp
index 06a56f8..cc7d3ee 100644
--- a/compiler-rt/lib/scudo/standalone/tests/map_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/map_test.cpp
@@ -46,7 +46,7 @@ TEST(ScudoMapDeathTest, MapUnmap) {
scudo::uptr P = MemMap.getBase();
if (P == 0U)
continue;
- MemMap.unmap(MemMap.getBase(), Size);
+ MemMap.unmap();
memset(reinterpret_cast<void *>(P), 0xbb, Size);
}
},
@@ -68,7 +68,7 @@ TEST(ScudoMapDeathTest, MapWithGuardUnmap) {
ASSERT_TRUE(MemMap.remap(Q, Size, MappingName));
memset(reinterpret_cast<void *>(Q), 0xaa, Size);
EXPECT_DEATH(memset(reinterpret_cast<void *>(Q), 0xaa, Size + 1), "");
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ MemMap.unmap();
}
TEST(ScudoMapTest, MapGrowUnmap) {
@@ -87,5 +87,5 @@ TEST(ScudoMapTest, MapGrowUnmap) {
Q += PageSize;
ASSERT_TRUE(MemMap.remap(Q, PageSize, MappingName));
memset(reinterpret_cast<void *>(Q), 0xbb, PageSize);
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ MemMap.unmap();
}
diff --git a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
index 0613847..1fae651 100644
--- a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp
@@ -63,7 +63,7 @@ protected:
void TearDown() override {
if (Buffer) {
ASSERT_TRUE(MemMap.isAllocated());
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ MemMap.unmap();
}
}
diff --git a/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp b/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp
index 2c0916d..f81e503 100644
--- a/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp
@@ -145,7 +145,7 @@ TEST(ScudoStringsTest, CapacityIncreaseFails) {
scudo::MemMapT MemMap;
if (MemMap.map(/*Addr=*/0U, scudo::getPageSizeCached(), "scudo:test",
MAP_ALLOWNOMEM)) {
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ MemMap.unmap();
setrlimit(RLIMIT_AS, &Limit);
TEST_SKIP("Limiting address space does not prevent mmap.");
}
diff --git a/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp b/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp
index a972d24..cec8f46 100644
--- a/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp
@@ -62,7 +62,7 @@ TEST(ScudoVectorTest, ReallocateFails) {
scudo::MemMapT MemMap;
if (MemMap.map(/*Addr=*/0U, scudo::getPageSizeCached(), "scudo:test",
MAP_ALLOWNOMEM)) {
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ MemMap.unmap();
setrlimit(RLIMIT_AS, &Limit);
TEST_SKIP("Limiting address space does not prevent mmap.");
}
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
index 98b3db4..0d059ba 100644
--- a/compiler-rt/lib/scudo/standalone/vector.h
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -86,8 +86,7 @@ protected:
}
void destroy() {
if (Data != &LocalData[0])
- ExternalBuffer.unmap(ExternalBuffer.getBase(),
- ExternalBuffer.getCapacity());
+ ExternalBuffer.unmap();
}
private:
diff --git a/flang/include/flang/Runtime/CUDA/allocator.h b/flang/include/flang/Runtime/CUDA/allocator.h
index 46ff5db..8f52047 100644
--- a/flang/include/flang/Runtime/CUDA/allocator.h
+++ b/flang/include/flang/Runtime/CUDA/allocator.h
@@ -23,7 +23,7 @@
terminator.Crash("'%s' failed with '%s'", #expr, name); \
}(expr)
-namespace Fortran::runtime::cuf {
+namespace Fortran::runtime::cuda {
void CUFRegisterAllocator();
@@ -36,5 +36,8 @@ void CUFFreeDevice(void *);
void *CUFAllocManaged(std::size_t);
void CUFFreeManaged(void *);
-} // namespace Fortran::runtime::cuf
+void *CUFAllocUnified(std::size_t);
+void CUFFreeUnified(void *);
+
+} // namespace Fortran::runtime::cuda
#endif // FORTRAN_RUNTIME_CUDA_ALLOCATOR_H_
diff --git a/flang/include/flang/Runtime/CUDA/descriptor.h b/flang/include/flang/Runtime/CUDA/descriptor.h
new file mode 100644
index 0000000..d593989
--- /dev/null
+++ b/flang/include/flang/Runtime/CUDA/descriptor.h
@@ -0,0 +1,30 @@
+//===-- include/flang/Runtime/CUDA/descriptor.h -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_RUNTIME_CUDA_DESCRIPTOR_H_
+#define FORTRAN_RUNTIME_CUDA_DESCRIPTOR_H_
+
+#include "flang/Runtime/descriptor.h"
+#include "flang/Runtime/entry-names.h"
+#include <cstddef>
+
+namespace Fortran::runtime::cuda {
+
+extern "C" {
+
+// Allocate a descriptor in managed.
+Descriptor *RTDECL(CUFAllocDesciptor)(
+ std::size_t, const char *sourceFile = nullptr, int sourceLine = 0);
+
+// Deallocate a descriptor allocated in managed or unified memory.
+void RTDECL(CUFFreeDesciptor)(
+ Descriptor *, const char *sourceFile = nullptr, int sourceLine = 0);
+
+} // extern "C"
+} // namespace Fortran::runtime::cuda
+#endif // FORTRAN_RUNTIME_CUDA_DESCRIPTOR_H_
diff --git a/flang/include/flang/Runtime/allocator-registry.h b/flang/include/flang/Runtime/allocator-registry.h
index 209b4d2..acfada5 100644
--- a/flang/include/flang/Runtime/allocator-registry.h
+++ b/flang/include/flang/Runtime/allocator-registry.h
@@ -19,8 +19,9 @@ static constexpr unsigned kDefaultAllocator = 0;
static constexpr unsigned kPinnedAllocatorPos = 1;
static constexpr unsigned kDeviceAllocatorPos = 2;
static constexpr unsigned kManagedAllocatorPos = 3;
+static constexpr unsigned kUnifiedAllocatorPos = 4;
-#define MAX_ALLOCATOR 5
+#define MAX_ALLOCATOR 7 // 3 bits are reserved in the descriptor.
namespace Fortran::runtime {
diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp
index 4538909..ffbbea2 100644
--- a/flang/lib/Lower/ConvertVariable.cpp
+++ b/flang/lib/Lower/ConvertVariable.cpp
@@ -1860,9 +1860,10 @@ static unsigned getAllocatorIdx(const Fortran::semantics::Symbol &sym) {
return kPinnedAllocatorPos;
if (*cudaAttr == Fortran::common::CUDADataAttr::Device)
return kDeviceAllocatorPos;
- if (*cudaAttr == Fortran::common::CUDADataAttr::Managed ||
- *cudaAttr == Fortran::common::CUDADataAttr::Unified)
+ if (*cudaAttr == Fortran::common::CUDADataAttr::Managed)
return kManagedAllocatorPos;
+ if (*cudaAttr == Fortran::common::CUDADataAttr::Unified)
+ return kUnifiedAllocatorPos;
}
return kDefaultAllocator;
}
diff --git a/flang/lib/Lower/DirectivesCommon.h b/flang/lib/Lower/DirectivesCommon.h
index 24cb7c2..d8b1f1f 100644
--- a/flang/lib/Lower/DirectivesCommon.h
+++ b/flang/lib/Lower/DirectivesCommon.h
@@ -58,9 +58,20 @@ struct AddrAndBoundsInfo {
explicit AddrAndBoundsInfo(mlir::Value addr, mlir::Value rawInput,
mlir::Value isPresent)
: addr(addr), rawInput(rawInput), isPresent(isPresent) {}
+ explicit AddrAndBoundsInfo(mlir::Value addr, mlir::Value rawInput,
+ mlir::Value isPresent, mlir::Type boxType)
+ : addr(addr), rawInput(rawInput), isPresent(isPresent), boxType(boxType) {
+ }
mlir::Value addr = nullptr;
mlir::Value rawInput = nullptr;
mlir::Value isPresent = nullptr;
+ mlir::Type boxType = nullptr;
+ void dump(llvm::raw_ostream &os) {
+ os << "AddrAndBoundsInfo addr: " << addr << "\n";
+ os << "AddrAndBoundsInfo rawInput: " << rawInput << "\n";
+ os << "AddrAndBoundsInfo isPresent: " << isPresent << "\n";
+ os << "AddrAndBoundsInfo boxType: " << boxType << "\n";
+ }
};
/// Checks if the assignment statement has a single variable on the RHS.
@@ -674,27 +685,18 @@ getDataOperandBaseAddr(Fortran::lower::AbstractConverter &converter,
if (mlir::isa<fir::RecordType>(boxTy.getEleTy()))
TODO(loc, "derived type");
- // Load the box when baseAddr is a `fir.ref<fir.box<T>>` or a
- // `fir.ref<fir.class<T>>` type.
- if (mlir::isa<fir::ReferenceType>(symAddr.getType())) {
- if (Fortran::semantics::IsOptional(sym)) {
- mlir::Value addr =
- builder.genIfOp(loc, {boxTy}, isPresent, /*withElseRegion=*/true)
- .genThen([&]() {
- mlir::Value load = builder.create<fir::LoadOp>(loc, symAddr);
- builder.create<fir::ResultOp>(loc, mlir::ValueRange{load});
- })
- .genElse([&] {
- mlir::Value absent =
- builder.create<fir::AbsentOp>(loc, boxTy);
- builder.create<fir::ResultOp>(loc, mlir::ValueRange{absent});
- })
- .getResults()[0];
- return AddrAndBoundsInfo(addr, rawInput, isPresent);
- }
+ // In case of a box reference, load it here to get the box value.
+ // This is preferrable because then the same box value can then be used for
+ // all address/dimension retrievals. For Fortran optional though, leave
+ // the load generation for later so it can be done in the appropriate
+ // if branches.
+ if (mlir::isa<fir::ReferenceType>(symAddr.getType()) &&
+ !Fortran::semantics::IsOptional(sym)) {
mlir::Value addr = builder.create<fir::LoadOp>(loc, symAddr);
- return AddrAndBoundsInfo(addr, rawInput, isPresent);
+ return AddrAndBoundsInfo(addr, rawInput, isPresent, boxTy);
}
+
+ return AddrAndBoundsInfo(symAddr, rawInput, isPresent, boxTy);
}
return AddrAndBoundsInfo(symAddr, rawInput, isPresent);
}
@@ -704,6 +706,7 @@ llvm::SmallVector<mlir::Value>
gatherBoundsOrBoundValues(fir::FirOpBuilder &builder, mlir::Location loc,
fir::ExtendedValue dataExv, mlir::Value box,
bool collectValuesOnly = false) {
+ assert(box && "box must exist");
llvm::SmallVector<mlir::Value> values;
mlir::Value byteStride;
mlir::Type idxTy = builder.getIndexType();
@@ -748,8 +751,10 @@ genBoundsOpsFromBox(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Type idxTy = builder.getIndexType();
mlir::Type boundTy = builder.getType<BoundsType>();
- assert(mlir::isa<fir::BaseBoxType>(info.addr.getType()) &&
+ assert(mlir::isa<fir::BaseBoxType>(info.boxType) &&
"expect fir.box or fir.class");
+ assert(fir::unwrapRefType(info.addr.getType()) == info.boxType &&
+ "expected box type consistency");
if (info.isPresent) {
llvm::SmallVector<mlir::Type> resTypes;
@@ -760,9 +765,13 @@ genBoundsOpsFromBox(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Operation::result_range ifRes =
builder.genIfOp(loc, resTypes, info.isPresent, /*withElseRegion=*/true)
.genThen([&]() {
+ mlir::Value box =
+ !fir::isBoxAddress(info.addr.getType())
+ ? info.addr
+ : builder.create<fir::LoadOp>(loc, info.addr);
llvm::SmallVector<mlir::Value> boundValues =
gatherBoundsOrBoundValues<BoundsOp, BoundsType>(
- builder, loc, dataExv, info.addr,
+ builder, loc, dataExv, box,
/*collectValuesOnly=*/true);
builder.create<fir::ResultOp>(loc, boundValues);
})
@@ -790,8 +799,11 @@ genBoundsOpsFromBox(fir::FirOpBuilder &builder, mlir::Location loc,
bounds.push_back(bound);
}
} else {
- bounds = gatherBoundsOrBoundValues<BoundsOp, BoundsType>(
- builder, loc, dataExv, info.addr);
+ mlir::Value box = !fir::isBoxAddress(info.addr.getType())
+ ? info.addr
+ : builder.create<fir::LoadOp>(loc, info.addr);
+ bounds = gatherBoundsOrBoundValues<BoundsOp, BoundsType>(builder, loc,
+ dataExv, box);
}
return bounds;
}
@@ -941,10 +953,14 @@ genBoundsOps(fir::FirOpBuilder &builder, mlir::Location loc,
builder
.genIfOp(loc, idxTy, info.isPresent, /*withElseRegion=*/true)
.genThen([&]() {
+ mlir::Value box =
+ !fir::isBoxAddress(info.addr.getType())
+ ? info.addr
+ : builder.create<fir::LoadOp>(loc, info.addr);
mlir::Value d =
builder.createIntegerConstant(loc, idxTy, dimension);
auto dimInfo = builder.create<fir::BoxDimsOp>(
- loc, idxTy, idxTy, idxTy, info.addr, d);
+ loc, idxTy, idxTy, idxTy, box, d);
builder.create<fir::ResultOp>(loc, dimInfo.getByteStride());
})
.genElse([&] {
@@ -954,9 +970,12 @@ genBoundsOps(fir::FirOpBuilder &builder, mlir::Location loc,
})
.getResults()[0];
} else {
+ mlir::Value box = !fir::isBoxAddress(info.addr.getType())
+ ? info.addr
+ : builder.create<fir::LoadOp>(loc, info.addr);
mlir::Value d = builder.createIntegerConstant(loc, idxTy, dimension);
- auto dimInfo = builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy,
- idxTy, info.addr, d);
+ auto dimInfo =
+ builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, box, d);
stride = dimInfo.getByteStride();
}
strideInBytes = true;
@@ -1197,8 +1216,10 @@ AddrAndBoundsInfo gatherDataOperandAddrAndBounds(
if (auto loadOp =
mlir::dyn_cast_or_null<fir::LoadOp>(info.addr.getDefiningOp())) {
if (fir::isAllocatableType(loadOp.getType()) ||
- fir::isPointerType(loadOp.getType()))
+ fir::isPointerType(loadOp.getType())) {
+ info.boxType = info.addr.getType();
info.addr = builder.create<fir::BoxAddrOp>(operandLocation, info.addr);
+ }
info.rawInput = info.addr;
}
@@ -1209,6 +1230,7 @@ AddrAndBoundsInfo gatherDataOperandAddrAndBounds(
if (auto boxAddrOp =
mlir::dyn_cast_or_null<fir::BoxAddrOp>(info.addr.getDefiningOp())) {
info.addr = boxAddrOp.getVal();
+ info.boxType = info.addr.getType();
info.rawInput = info.addr;
bounds = genBoundsOpsFromBox<BoundsOp, BoundsType>(
builder, operandLocation, compExv, info);
@@ -1227,6 +1249,7 @@ AddrAndBoundsInfo gatherDataOperandAddrAndBounds(
getDataOperandBaseAddr(converter, builder, *symRef, operandLocation);
if (mlir::isa<fir::BaseBoxType>(
fir::unwrapRefType(info.addr.getType()))) {
+ info.boxType = fir::unwrapRefType(info.addr.getType());
bounds = genBoundsOpsFromBox<BoundsOp, BoundsType>(
builder, operandLocation, dataExv, info);
}
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 6266a50..be184ae 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -32,6 +32,9 @@
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
#include "llvm/Frontend/OpenACC/ACC.h.inc"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "flang-lower-openacc"
// Special value for * passed in device_type or gang clauses.
static constexpr std::int64_t starCst = -1;
@@ -85,11 +88,17 @@ createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Type retTy, llvm::ArrayRef<mlir::Value> async,
llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
- mlir::Value isPresent = {}) {
+ bool unwrapBoxAddr = false, mlir::Value isPresent = {}) {
mlir::Value varPtrPtr;
- if (auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(baseAddr.getType())) {
+ // The data clause may apply to either the box reference itself or the
+ // pointer to the data it holds. So use `unwrapBoxAddr` to decide.
+ // When we have a box value - assume it refers to the data inside box.
+ if ((fir::isBoxAddress(baseAddr.getType()) && unwrapBoxAddr) ||
+ fir::isa_box_type(baseAddr.getType())) {
if (isPresent) {
- mlir::Type ifRetTy = boxTy.getEleTy();
+ mlir::Type ifRetTy =
+ mlir::cast<fir::BaseBoxType>(fir::unwrapRefType(baseAddr.getType()))
+ .getEleTy();
if (!fir::isa_ref_type(ifRetTy))
ifRetTy = fir::ReferenceType::get(ifRetTy);
baseAddr =
@@ -97,6 +106,8 @@ createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
.genIfOp(loc, {ifRetTy}, isPresent,
/*withElseRegion=*/true)
.genThen([&]() {
+ if (fir::isBoxAddress(baseAddr.getType()))
+ baseAddr = builder.create<fir::LoadOp>(loc, baseAddr);
mlir::Value boxAddr =
builder.create<fir::BoxAddrOp>(loc, baseAddr);
builder.create<fir::ResultOp>(loc, mlir::ValueRange{boxAddr});
@@ -108,6 +119,8 @@ createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
})
.getResults()[0];
} else {
+ if (fir::isBoxAddress(baseAddr.getType()))
+ baseAddr = builder.create<fir::LoadOp>(loc, baseAddr);
baseAddr = builder.create<fir::BoxAddrOp>(loc, baseAddr);
}
retTy = baseAddr.getType();
@@ -342,18 +355,19 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds,
/*treatIndexAsSection=*/true);
+ LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
// If the input value is optional and is not a descriptor, we use the
// rawInput directly.
- mlir::Value baseAddr =
- ((info.addr.getType() != fir::unwrapRefType(info.rawInput.getType())) &&
- info.isPresent)
- ? info.rawInput
- : info.addr;
- Op op = createDataEntryOp<Op>(builder, operandLocation, baseAddr, asFortran,
- bounds, structured, implicit, dataClause,
- baseAddr.getType(), async, asyncDeviceTypes,
- asyncOnlyDeviceTypes, info.isPresent);
+ mlir::Value baseAddr = ((fir::unwrapRefType(info.addr.getType()) !=
+ fir::unwrapRefType(info.rawInput.getType())) &&
+ info.isPresent)
+ ? info.rawInput
+ : info.addr;
+ Op op = createDataEntryOp<Op>(
+ builder, operandLocation, baseAddr, asFortran, bounds, structured,
+ implicit, dataClause, baseAddr.getType(), async, asyncDeviceTypes,
+ asyncOnlyDeviceTypes, /*unwrapBoxAddr=*/true, info.isPresent);
dataOperands.push_back(op.getAccPtr());
}
}
@@ -380,6 +394,7 @@ static void genDeclareDataOperandOperations(
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds);
+ LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
EntryOp op = createDataEntryOp<EntryOp>(
builder, operandLocation, info.addr, asFortran, bounds, structured,
implicit, dataClause, info.addr.getType(),
@@ -842,6 +857,8 @@ genPrivatizations(const Fortran::parser::AccObjectList &objectList,
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds);
+ LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
+
RecipeOp recipe;
mlir::Type retTy = getTypeFromBounds(bounds, info.addr.getType());
if constexpr (std::is_same_v<RecipeOp, mlir::acc::PrivateRecipeOp>) {
@@ -853,7 +870,7 @@ genPrivatizations(const Fortran::parser::AccObjectList &objectList,
auto op = createDataEntryOp<mlir::acc::PrivateOp>(
builder, operandLocation, info.addr, asFortran, bounds, true,
/*implicit=*/false, mlir::acc::DataClause::acc_private, retTy, async,
- asyncDeviceTypes, asyncOnlyDeviceTypes);
+ asyncDeviceTypes, asyncOnlyDeviceTypes, /*unwrapBoxAddr=*/true);
dataOperands.push_back(op.getAccPtr());
} else {
std::string suffix =
@@ -865,7 +882,8 @@ genPrivatizations(const Fortran::parser::AccObjectList &objectList,
auto op = createDataEntryOp<mlir::acc::FirstprivateOp>(
builder, operandLocation, info.addr, asFortran, bounds, true,
/*implicit=*/false, mlir::acc::DataClause::acc_firstprivate, retTy,
- async, asyncDeviceTypes, asyncOnlyDeviceTypes);
+ async, asyncDeviceTypes, asyncOnlyDeviceTypes,
+ /*unwrapBoxAddr=*/true);
dataOperands.push_back(op.getAccPtr());
}
privatizations.push_back(mlir::SymbolRefAttr::get(
@@ -1421,6 +1439,7 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList,
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds);
+ LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
mlir::Type reductionTy = fir::unwrapRefType(info.addr.getType());
if (auto seqTy = mlir::dyn_cast<fir::SequenceType>(reductionTy))
@@ -1433,7 +1452,7 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList,
builder, operandLocation, info.addr, asFortran, bounds,
/*structured=*/true, /*implicit=*/false,
mlir::acc::DataClause::acc_reduction, info.addr.getType(), async,
- asyncDeviceTypes, asyncOnlyDeviceTypes);
+ asyncDeviceTypes, asyncOnlyDeviceTypes, /*unwrapBoxAddr=*/true);
mlir::Type ty = op.getAccPtr().getType();
if (!areAllBoundConstant(bounds) ||
fir::isAssumedShape(info.addr.getType()) ||
diff --git a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
index b9a28b8..5db9d8a 100644
--- a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
+++ b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
@@ -102,9 +102,11 @@ mlir::Value ConvertFIRToLLVMPattern::getValueFromBox(
auto p = rewriter.create<mlir::LLVM::GEPOp>(
loc, pty, boxTy.llvm, box,
llvm::ArrayRef<mlir::LLVM::GEPArg>{0, boxValue});
- auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p);
+ auto fldTy = getBoxEleTy(boxTy.llvm, {boxValue});
+ auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, fldTy, p);
+ auto castOp = integerCast(loc, rewriter, resultTy, loadOp);
attachTBAATag(loadOp, boxTy.fir, nullptr, p);
- return loadOp;
+ return castOp;
}
return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, box, boxValue);
}
diff --git a/flang/lib/Optimizer/Transforms/CufOpConversion.cpp b/flang/lib/Optimizer/Transforms/CufOpConversion.cpp
index bdeaaab..70b5037 100644
--- a/flang/lib/Optimizer/Transforms/CufOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CufOpConversion.cpp
@@ -8,10 +8,13 @@
#include "flang/Common/Fortran.h"
#include "flang/Optimizer/Builder/Runtime/RTBuilder.h"
+#include "flang/Optimizer/CodeGen/TypeConverter.h"
#include "flang/Optimizer/Dialect/CUF/CUFOps.h"
#include "flang/Optimizer/Dialect/FIRDialect.h"
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/HLFIR/HLFIROps.h"
+#include "flang/Optimizer/Support/DataLayout.h"
+#include "flang/Runtime/CUDA/descriptor.h"
#include "flang/Runtime/allocatable.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
@@ -25,6 +28,7 @@ namespace fir {
using namespace fir;
using namespace mlir;
using namespace Fortran::runtime;
+using namespace Fortran::runtime::cuda;
namespace {
@@ -137,14 +141,105 @@ struct CufDeallocateOpConversion
}
};
+struct CufAllocOpConversion : public mlir::OpRewritePattern<cuf::AllocOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ CufAllocOpConversion(mlir::MLIRContext *context, mlir::DataLayout *dl,
+ fir::LLVMTypeConverter *typeConverter)
+ : OpRewritePattern(context), dl{dl}, typeConverter{typeConverter} {}
+
+ mlir::LogicalResult
+ matchAndRewrite(cuf::AllocOp op,
+ mlir::PatternRewriter &rewriter) const override {
+ auto boxTy = mlir::dyn_cast_or_null<fir::BaseBoxType>(op.getInType());
+
+ // Only convert cuf.alloc that allocates a descriptor.
+ if (!boxTy)
+ return failure();
+
+ auto mod = op->getParentOfType<mlir::ModuleOp>();
+ fir::FirOpBuilder builder(rewriter, mod);
+ mlir::Location loc = op.getLoc();
+ mlir::func::FuncOp func =
+ fir::runtime::getRuntimeFunc<mkRTKey(CUFAllocDesciptor)>(loc, builder);
+
+ auto fTy = func.getFunctionType();
+ mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
+ mlir::Value sourceLine =
+ fir::factory::locationToLineNo(builder, loc, fTy.getInput(2));
+
+ mlir::Type structTy = typeConverter->convertBoxTypeAsStruct(boxTy);
+ std::size_t boxSize = dl->getTypeSizeInBits(structTy) / 8;
+ mlir::Value sizeInBytes =
+ builder.createIntegerConstant(loc, builder.getIndexType(), boxSize);
+
+ llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
+ builder, loc, fTy, sizeInBytes, sourceFile, sourceLine)};
+ auto callOp = builder.create<fir::CallOp>(loc, func, args);
+ auto convOp = builder.createConvert(loc, op.getResult().getType(),
+ callOp.getResult(0));
+ rewriter.replaceOp(op, convOp);
+ return mlir::success();
+ }
+
+private:
+ mlir::DataLayout *dl;
+ fir::LLVMTypeConverter *typeConverter;
+};
+
+struct CufFreeOpConversion : public mlir::OpRewritePattern<cuf::FreeOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cuf::FreeOp op,
+ mlir::PatternRewriter &rewriter) const override {
+ // Only convert cuf.free on descriptor.
+ if (!mlir::isa<fir::ReferenceType>(op.getDevptr().getType()))
+ return failure();
+ auto refTy = mlir::dyn_cast<fir::ReferenceType>(op.getDevptr().getType());
+ if (!mlir::isa<fir::BaseBoxType>(refTy.getEleTy()))
+ return failure();
+
+ auto mod = op->getParentOfType<mlir::ModuleOp>();
+ fir::FirOpBuilder builder(rewriter, mod);
+ mlir::Location loc = op.getLoc();
+ mlir::func::FuncOp func =
+ fir::runtime::getRuntimeFunc<mkRTKey(CUFFreeDesciptor)>(loc, builder);
+
+ auto fTy = func.getFunctionType();
+ mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
+ mlir::Value sourceLine =
+ fir::factory::locationToLineNo(builder, loc, fTy.getInput(2));
+ llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
+ builder, loc, fTy, op.getDevptr(), sourceFile, sourceLine)};
+ builder.create<fir::CallOp>(loc, func, args);
+ rewriter.eraseOp(op);
+ return mlir::success();
+ }
+};
+
class CufOpConversion : public fir::impl::CufOpConversionBase<CufOpConversion> {
public:
void runOnOperation() override {
auto *ctx = &getContext();
mlir::RewritePatternSet patterns(ctx);
mlir::ConversionTarget target(*ctx);
- target.addIllegalOp<cuf::AllocateOp, cuf::DeallocateOp>();
- patterns.insert<CufAllocateOpConversion, CufDeallocateOpConversion>(ctx);
+
+ mlir::Operation *op = getOperation();
+ mlir::ModuleOp module = mlir::dyn_cast<mlir::ModuleOp>(op);
+ if (!module)
+ return signalPassFailure();
+
+ std::optional<mlir::DataLayout> dl =
+ fir::support::getOrSetDataLayout(module, /*allowDefaultLayout=*/false);
+ fir::LLVMTypeConverter typeConverter(module, /*applyTBAA=*/false,
+ /*forceUnifiedTBAATree=*/false, *dl);
+
+ target.addIllegalOp<cuf::AllocOp, cuf::AllocateOp, cuf::DeallocateOp,
+ cuf::FreeOp>();
+ patterns.insert<CufAllocOpConversion>(ctx, &*dl, &typeConverter);
+ patterns.insert<CufAllocateOpConversion, CufDeallocateOpConversion,
+ CufFreeOpConversion>(ctx);
if (mlir::failed(mlir::applyPartialConversion(getOperation(), target,
std::move(patterns)))) {
mlir::emitError(mlir::UnknownLoc::get(ctx),
diff --git a/flang/runtime/CUDA/CMakeLists.txt b/flang/runtime/CUDA/CMakeLists.txt
index de1104f..8824353 100644
--- a/flang/runtime/CUDA/CMakeLists.txt
+++ b/flang/runtime/CUDA/CMakeLists.txt
@@ -11,6 +11,7 @@ find_library(CUDA_RUNTIME_LIBRARY cuda HINTS ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTOR
add_flang_library(CufRuntime
allocator.cpp
+ descriptor.cpp
)
target_link_libraries(CufRuntime
PRIVATE
diff --git a/flang/runtime/CUDA/allocator.cpp b/flang/runtime/CUDA/allocator.cpp
index 26a3c29..cd00d40 100644
--- a/flang/runtime/CUDA/allocator.cpp
+++ b/flang/runtime/CUDA/allocator.cpp
@@ -17,7 +17,7 @@
#include "cuda.h"
-namespace Fortran::runtime::cuf {
+namespace Fortran::runtime::cuda {
void CUFRegisterAllocator() {
allocatorRegistry.Register(
@@ -26,6 +26,8 @@ void CUFRegisterAllocator() {
kDeviceAllocatorPos, {&CUFAllocDevice, CUFFreeDevice});
allocatorRegistry.Register(
kManagedAllocatorPos, {&CUFAllocManaged, CUFFreeManaged});
+ allocatorRegistry.Register(
+ kUnifiedAllocatorPos, {&CUFAllocUnified, CUFFreeUnified});
}
void *CUFAllocPinned(std::size_t sizeInBytes) {
@@ -57,4 +59,14 @@ void CUFFreeManaged(void *p) {
CUDA_REPORT_IF_ERROR(cuMemFree(reinterpret_cast<CUdeviceptr>(p)));
}
-} // namespace Fortran::runtime::cuf
+void *CUFAllocUnified(std::size_t sizeInBytes) {
+ // Call alloc managed for the time being.
+ return CUFAllocManaged(sizeInBytes);
+}
+
+void CUFFreeUnified(void *p) {
+ // Call free managed for the time being.
+ CUFFreeManaged(p);
+}
+
+} // namespace Fortran::runtime::cuda
diff --git a/flang/runtime/CUDA/descriptor.cpp b/flang/runtime/CUDA/descriptor.cpp
new file mode 100644
index 0000000..1031b1e
--- /dev/null
+++ b/flang/runtime/CUDA/descriptor.cpp
@@ -0,0 +1,28 @@
+//===-- runtime/CUDA/descriptor.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Runtime/CUDA/descriptor.h"
+#include "flang/Runtime/CUDA/allocator.h"
+
+namespace Fortran::runtime::cuda {
+extern "C" {
+RT_EXT_API_GROUP_BEGIN
+
+Descriptor *RTDEF(CUFAllocDesciptor)(
+ std::size_t sizeInBytes, const char *sourceFile, int sourceLine) {
+ return reinterpret_cast<Descriptor *>(CUFAllocManaged(sizeInBytes));
+}
+
+void RTDEF(CUFFreeDesciptor)(
+ Descriptor *desc, const char *sourceFile, int sourceLine) {
+ CUFFreeManaged(reinterpret_cast<void *>(desc));
+}
+
+RT_EXT_API_GROUP_END
+}
+} // namespace Fortran::runtime::cuda
diff --git a/flang/runtime/copy.cpp b/flang/runtime/copy.cpp
index c2dbbc4..41d2aef 100644
--- a/flang/runtime/copy.cpp
+++ b/flang/runtime/copy.cpp
@@ -23,17 +23,17 @@ using StaticDescTy = StaticDescriptor<maxRank, true, 0>;
// for CopyElement.
struct CopyDescriptor {
// A constructor specifying all members explicitly.
+ // The toAt and fromAt specify subscript storages that might be
+ // external to CopyElement, and cannot be modified.
+ // The copy descriptor only establishes toAtPtr_ and fromAtPtr_
+ // pointers to point to these storages.
RT_API_ATTRS CopyDescriptor(const Descriptor &to, const SubscriptValue toAt[],
const Descriptor &from, const SubscriptValue fromAt[],
std::size_t elements, bool usesStaticDescriptors = false)
: to_(to), from_(from), elements_(elements),
usesStaticDescriptors_(usesStaticDescriptors) {
- for (int dim{0}; dim < to.rank(); ++dim) {
- toAt_[dim] = toAt[dim];
- }
- for (int dim{0}; dim < from.rank(); ++dim) {
- fromAt_[dim] = fromAt[dim];
- }
+ toAtPtr_ = toAt;
+ fromAtPtr_ = fromAt;
}
// The number of elements to copy is initialized from the to descriptor.
// The current element subscripts are initialized from the lower bounds
@@ -46,14 +46,32 @@ struct CopyDescriptor {
from.GetLowerBounds(fromAt_);
}
+ // Increment the toAt_ and fromAt_ subscripts to the next
+ // element.
+ RT_API_ATTRS void IncrementSubscripts(Terminator &terminator) {
+ // This method must not be called for copy descriptors
+ // using external non-modifiable subscript storage.
+ RUNTIME_CHECK(terminator, toAt_ == toAtPtr_ && fromAt_ == fromAtPtr_);
+ to_.IncrementSubscripts(toAt_);
+ from_.IncrementSubscripts(fromAt_);
+ }
+
// Descriptor of the destination.
const Descriptor &to_;
// A subscript specifying the current element position to copy to.
SubscriptValue toAt_[maxRank];
+ // A pointer to the storage of the 'to' subscript.
+ // It may point to toAt_ or to an external non-modifiable
+ // subscript storage.
+ const SubscriptValue *toAtPtr_{toAt_};
// Descriptor of the source.
const Descriptor &from_;
// A subscript specifying the current element position to copy from.
SubscriptValue fromAt_[maxRank];
+ // A pointer to the storage of the 'from' subscript.
+ // It may point to fromAt_ or to an external non-modifiable
+ // subscript storage.
+ const SubscriptValue *fromAtPtr_{fromAt_};
// Number of elements left to copy.
std::size_t elements_;
// Must be true, if the to and from descriptors are allocated
@@ -75,6 +93,18 @@ RT_OFFLOAD_API_GROUP_BEGIN
RT_API_ATTRS void CopyElement(const Descriptor &to, const SubscriptValue toAt[],
const Descriptor &from, const SubscriptValue fromAt[],
Terminator &terminator) {
+ if (!to.Addendum()) {
+ // Avoid the overhead of creating the work stacks below
+ // for the simple non-derived type cases, because the overhead
+ // might be noticeable over the total amount of work that
+ // needs to be done for the copy.
+ char *toPtr{to.Element<char>(toAt)};
+ char *fromPtr{from.Element<char>(fromAt)};
+ RUNTIME_CHECK(terminator, to.ElementBytes() == from.ElementBytes());
+ std::memcpy(toPtr, fromPtr, to.ElementBytes());
+ return;
+ }
+
#if !defined(RT_DEVICE_COMPILATION)
constexpr unsigned copyStackReserve{16};
constexpr unsigned descriptorStackReserve{6};
@@ -108,9 +138,9 @@ RT_API_ATTRS void CopyElement(const Descriptor &to, const SubscriptValue toAt[],
continue;
}
const Descriptor &curTo{currentCopy.to_};
- SubscriptValue *curToAt{currentCopy.toAt_};
+ const SubscriptValue *curToAt{currentCopy.toAtPtr_};
const Descriptor &curFrom{currentCopy.from_};
- SubscriptValue *curFromAt{currentCopy.fromAt_};
+ const SubscriptValue *curFromAt{currentCopy.fromAtPtr_};
char *toPtr{curTo.Element<char>(curToAt)};
char *fromPtr{curFrom.Element<char>(curFromAt)};
RUNTIME_CHECK(terminator, curTo.ElementBytes() == curFrom.ElementBytes());
@@ -121,8 +151,7 @@ RT_API_ATTRS void CopyElement(const Descriptor &to, const SubscriptValue toAt[],
std::memcpy(toPtr, fromPtr, curTo.ElementBytes());
--elements;
if (elements != 0) {
- curTo.IncrementSubscripts(curToAt);
- curFrom.IncrementSubscripts(curFromAt);
+ currentCopy.IncrementSubscripts(terminator);
}
// Deep copy allocatable and automatic components if any.
diff --git a/flang/test/Fir/CUDA/cuda-allocate.fir b/flang/test/Fir/CUDA/cuda-allocate.fir
index ab4a253..1274d39 100644
--- a/flang/test/Fir/CUDA/cuda-allocate.fir
+++ b/flang/test/Fir/CUDA/cuda-allocate.fir
@@ -1,5 +1,7 @@
// RUN: fir-opt --cuf-convert %s | FileCheck %s
+module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>} {
+
func.func @_QPsub1() {
%0 = cuf.alloc !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", data_attr = #cuf.cuda<device>, uniq_name = "_QFsub1Ea"} -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
%4:2 = hlfir.declare %0 {data_attr = #cuf.cuda<device>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub1Ea"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>)
@@ -8,14 +10,21 @@ func.func @_QPsub1() {
%c0_i32 = arith.constant 0 : i32
%9 = cuf.allocate %4#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> {data_attr = #cuf.cuda<device>} -> i32
%10 = cuf.deallocate %4#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> {data_attr = #cuf.cuda<device>} -> i32
+ cuf.free %4#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> {data_attr = #cuf.cuda<device>}
return
}
+
// CHECK-LABEL: func.func @_QPsub1()
-// CHECK: %[[DESC:.*]] = cuf.alloc !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", data_attr = #cuf.cuda<device>, uniq_name = "_QFsub1Ea"} -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+// CHECK: %[[DESC_RT_CALL:.*]] = fir.call @_FortranACUFAllocDesciptor(%{{.*}}, %{{.*}}, %{{.*}}) : (i64, !fir.ref<i8>, i32) -> !fir.ref<!fir.box<none>>
+// CHECK: %[[DESC:.*]] = fir.convert %[[DESC_RT_CALL]] : (!fir.ref<!fir.box<none>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
// CHECK: %[[DECL_DESC:.*]]:2 = hlfir.declare %[[DESC]] {data_attr = #cuf.cuda<device>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub1Ea"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>)
// CHECK: %[[BOX_NONE:.*]] = fir.convert %[[DECL_DESC]]#1 : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[BOX_NONE:.*]] = fir.convert %[[DECL_DESC]]#1 : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %{{.*}} = fir.call @_FortranAAllocatableDeallocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
+// CHECK: %[[BOX_NONE:.*]] = fir.convert %[[DECL_DESC]]#1 : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
+// CHECK: fir.call @_FortranACUFFreeDesciptor(%[[BOX_NONE]], %{{.*}}, %{{.*}}) : (!fir.ref<!fir.box<none>>, !fir.ref<i8>, i32) -> none
+
+}
diff --git a/flang/test/Fir/box-typecode.fir b/flang/test/Fir/box-typecode.fir
index 51b2d02..766c516 100644
--- a/flang/test/Fir/box-typecode.fir
+++ b/flang/test/Fir/box-typecode.fir
@@ -8,5 +8,6 @@ func.func @test_box_typecode(%a: !fir.class<none>) -> i32 {
// CHECK-LABEL: @test_box_typecode(
// CHECK-SAME: ptr %[[BOX:.*]])
// CHECK: %[[GEP:.*]] = getelementptr { ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}} }, ptr %[[BOX]], i32 0, i32 4
-// CHECK: %[[TYPE_CODE:.*]] = load i32, ptr %[[GEP]]
-// CHECK: ret i32 %[[TYPE_CODE]]
+// CHECK: %[[TYPE_CODE:.*]] = load i8, ptr %[[GEP]]
+// CHECK: %[[TYPE_CODE_CONV:.*]] = sext i8 %[[TYPE_CODE]] to i32
+// CHECK: ret i32 %[[TYPE_CODE_CONV]]
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index af9b928..c5a62ca 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -1050,8 +1050,9 @@ func.func @extract_elesize(%arg0: !fir.box<f32>) -> i32 {
// CHECK-LABEL: llvm.func @extract_elesize(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ELE_SIZE:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
-// CHECK: llvm.return %[[ELE_SIZE]] : i32
+// CHECK: %[[ELE_SIZE:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i64
+// CHECK: %[[ELE_SIZE_CONV:.*]] = llvm.trunc %[[ELE_SIZE]] : i64 to i32
+// CHECK: llvm.return %[[ELE_SIZE_CONV]] : i32
// -----
@@ -1085,9 +1086,10 @@ func.func @box_isalloc(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isalloc(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i1
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i8
+// CHECK: %[[ATTR_CONV:.*]] = llvm.sext %[[ATTR]] : i8 to i32
// CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32
+// CHECK: %[[AND:.*]] = llvm.and %[[ATTR_CONV]], %[[ATTR_ISALLOC]] : i32
// CHECK: %[[CMP_C0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[IS_ALLOC:.*]] = llvm.icmp "ne" %[[AND]], %[[CMP_C0]] : i32
// CHECK: llvm.return %[[IS_ALLOC]] : i1
@@ -1106,9 +1108,10 @@ func.func @box_isptr(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isptr(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i1
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i8
+// CHECK: %[[ATTR_CONV:.*]] = llvm.sext %[[ATTR]] : i8 to i32
// CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32
+// CHECK: %[[AND:.*]] = llvm.and %[[ATTR_CONV]], %[[ATTR_ISALLOC]] : i32
// CHECK: %[[CMP_C0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[IS_ALLOC:.*]] = llvm.icmp "ne" %[[AND]], %[[CMP_C0]] : i32
// CHECK: llvm.return %[[IS_ALLOC]] : i1
diff --git a/flang/test/Fir/polymorphic.fir b/flang/test/Fir/polymorphic.fir
index 7493c701..a6b1663 100644
--- a/flang/test/Fir/polymorphic.fir
+++ b/flang/test/Fir/polymorphic.fir
@@ -44,9 +44,10 @@ func.func @_QMpolymorphic_testPtest_rebox() {
// CHECK: %[[ELE_SIZE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 1
// CHECK: %[[ELE_SIZE:.*]] = load i64, ptr %[[ELE_SIZE_GEP]]
// CHECK: %[[TYPE_CODE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 4
-// CHECK: %[[TYPE_CODE:.*]] = load i32, ptr %[[TYPE_CODE_GEP]]
+// CHECK: %[[TYPE_CODE:.*]] = load i8, ptr %[[TYPE_CODE_GEP]]
+// CHECK-NEXT: %[[TYPE_CODE_I32:.*]] = sext i8 %[[TYPE_CODE]] to i32
// CHECK: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } undef, i64 %[[ELE_SIZE]], 1
-// CHECK: %[[TYPE_CODE_I8:.*]] = trunc i32 %[[TYPE_CODE]] to i8
+// CHECK: %[[TYPE_CODE_I8:.*]] = trunc i32 %[[TYPE_CODE_I32]] to i8
// CHECK: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } %{{.*}}, i8 %[[TYPE_CODE_I8]], 4
// Test emboxing to a unlimited polymorphic descriptor
@@ -95,9 +96,10 @@ func.func @_QMunlimitedPsub1(%arg0: !fir.class<!fir.array<?xnone>> {fir.bindc_na
// CHECK: %[[ELE_SIZE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %[[ARRAY]], i32 0, i32 1
// CHECK: %[[ELE_SIZE:.*]] = load i64, ptr %[[ELE_SIZE_GEP]]
// CHECK: %[[TYPE_CODE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %[[ARRAY]], i32 0, i32 4
-// CHECK: %[[TYPE_CODE:.*]] = load i32, ptr %[[TYPE_CODE_GEP]]
+// CHECK: %[[TYPE_CODE:.*]] = load i8, ptr %[[TYPE_CODE_GEP]]
+// CHECK-NEXT: %[[TYPE_CODE_EXT:.*]] = sext i8 %[[TYPE_CODE]] to i32
// CHECK: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } undef, i64 %[[ELE_SIZE]], 1
-// CHECK: %[[TYPE_CODE_TRUNC:.*]] = trunc i32 %[[TYPE_CODE]] to i8
+// CHECK: %[[TYPE_CODE_TRUNC:.*]] = trunc i32 %[[TYPE_CODE_EXT]] to i8
// CHECK: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, i8 %[[TYPE_CODE_TRUNC]], 4
// CHECK: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, ptr %[[TYPE_DESC]], 7
// CHECK: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, i64 0, 8, 0
diff --git a/flang/test/Fir/tbaa.fir b/flang/test/Fir/tbaa.fir
index 32e2645..14ee3b7 100644
--- a/flang/test/Fir/tbaa.fir
+++ b/flang/test/Fir/tbaa.fir
@@ -51,7 +51,8 @@ module {
// CHECK: %[[VAL_20:.*]] = llvm.getelementptr %[[VAL_0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_21:.*]] = llvm.load %[[VAL_20]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_0]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_23:.*]] = llvm.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_23:.*]] = llvm.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i8
+// CHECK-NEXT: %[[VAL_23_EXT:.*]] = llvm.sext %[[VAL_23]] : i8 to i32
// CHECK: %[[VAL_24:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_25:.*]] = llvm.insertvalue %[[VAL_21]], %[[VAL_24]][1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_26:.*]] = llvm.mlir.constant(20240719 : i32) : i32
@@ -59,7 +60,7 @@ module {
// CHECK: %[[VAL_28:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_29:.*]] = llvm.trunc %[[VAL_28]] : i32 to i8
// CHECK: %[[VAL_30:.*]] = llvm.insertvalue %[[VAL_29]], %[[VAL_27]][3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_31:.*]] = llvm.trunc %[[VAL_23]] : i32 to i8
+// CHECK: %[[VAL_31:.*]] = llvm.trunc %[[VAL_23_EXT]] : i32 to i8
// CHECK: %[[VAL_32:.*]] = llvm.insertvalue %[[VAL_31]], %[[VAL_30]][4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_33:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_34:.*]] = llvm.trunc %[[VAL_33]] : i32 to i8
@@ -149,7 +150,8 @@ module {
// CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_3]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_23:.*]] = llvm.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_24:.*]] = llvm.getelementptr %[[VAL_3]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_25:.*]] = llvm.load %[[VAL_24]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_25:.*]] = llvm.load %[[VAL_24]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i8
+// CHECK-NEXT: %[[VAL_25_EXT:.*]] = llvm.sext %[[VAL_25]] : i8 to i32
// CHECK: %[[VAL_26:.*]] = llvm.getelementptr %[[VAL_3]][0, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_27:.*]] = llvm.load %[[VAL_26]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_28:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
@@ -159,7 +161,7 @@ module {
// CHECK: %[[VAL_32:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[VAL_33:.*]] = llvm.trunc %[[VAL_32]] : i32 to i8
// CHECK: %[[VAL_34:.*]] = llvm.insertvalue %[[VAL_33]], %[[VAL_31]][3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_35:.*]] = llvm.trunc %[[VAL_25]] : i32 to i8
+// CHECK: %[[VAL_35:.*]] = llvm.trunc %[[VAL_25_EXT]] : i32 to i8
// CHECK: %[[VAL_36:.*]] = llvm.insertvalue %[[VAL_35]], %[[VAL_34]][4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_37:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_38:.*]] = llvm.trunc %[[VAL_37]] : i32 to i8
@@ -289,8 +291,9 @@ func.func @tbaa(%arg0: !fir.box<f32>) -> i32 {
// CHECK-LABEL: llvm.func @tbaa(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr) -> i32 {
// CHECK: %[[VAL_1:.*]] = llvm.getelementptr %[[VAL_0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
-// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
-// CHECK: llvm.return %[[VAL_2]] : i32
+// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_3:.*]] = llvm.trunc %[[VAL_2]] : i64 to i32
+// CHECK: llvm.return %[[VAL_3]] : i32
// CHECK: }
// -----
@@ -308,9 +311,10 @@ func.func @tbaa(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @tbaa(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr) -> i1 {
// CHECK: %[[VAL_1:.*]] = llvm.getelementptr %[[VAL_0]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<15 x array<3 x i64>>)>
-// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i8
+// CHECK: %[[VAL_2_I32:.*]] = llvm.sext %[[VAL_2]] : i8 to i32
// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK: %[[VAL_4:.*]] = llvm.and %[[VAL_2]], %[[VAL_3]] : i32
+// CHECK: %[[VAL_4:.*]] = llvm.and %[[VAL_2_I32]], %[[VAL_3]] : i32
// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_6:.*]] = llvm.icmp "ne" %[[VAL_4]], %[[VAL_5]] : i32
// CHECK: llvm.return %[[VAL_6]] : i1
diff --git a/flang/test/Lower/OpenACC/acc-bounds.f90 b/flang/test/Lower/OpenACC/acc-bounds.f90
index a83de91..e44c786 100644
--- a/flang/test/Lower/OpenACC/acc-bounds.f90
+++ b/flang/test/Lower/OpenACC/acc-bounds.f90
@@ -128,14 +128,8 @@ contains
! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>> {fir.bindc_name = "a", fir.optional}) {
! CHECK: %[[ARG0_DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs<optional, pointer>, uniq_name = "_QMopenacc_boundsFacc_optional_dataEa"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>)
! CHECK: %[[IS_PRESENT:.*]] = fir.is_present %[[ARG0_DECL]]#1 : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>) -> i1
-! CHECK: %[[BOX:.*]] = fir.if %[[IS_PRESENT]] -> (!fir.box<!fir.ptr<!fir.array<?xf32>>>) {
-! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! CHECK: fir.result %[[LOAD]] : !fir.box<!fir.ptr<!fir.array<?xf32>>>
-! CHECK: } else {
-! CHECK: %[[ABSENT:.*]] = fir.absent !fir.box<!fir.ptr<!fir.array<?xf32>>>
-! CHECK: fir.result %[[ABSENT]] : !fir.box<!fir.ptr<!fir.array<?xf32>>>
-! CHECK: }
! CHECK: %[[RES:.*]]:5 = fir.if %[[IS_PRESENT]] -> (index, index, index, index, index) {
+! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
! CHECK: fir.result %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : index, index, index, index, index
! CHECK: } else {
! CHECK: %[[C0:.*]] = arith.constant 0 : index
@@ -144,7 +138,8 @@ contains
! CHECK: }
! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[RES]]#0 : index) upperbound(%[[RES]]#1 : index) extent(%[[RES]]#2 : index) stride(%[[RES]]#3 : index) startIdx(%[[RES]]#4 : index) {strideInBytes = true}
! CHECK: %[[BOX_ADDR:.*]] = fir.if %[[IS_PRESENT]] -> (!fir.ptr<!fir.array<?xf32>>) {
-! CHECK: %[[ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
+! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
+! CHECK: %[[ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
! CHECK: fir.result %[[ADDR]] : !fir.ptr<!fir.array<?xf32>>
! CHECK: } else {
! CHECK: %[[ABSENT:.*]] = fir.absent !fir.ptr<!fir.array<?xf32>>
diff --git a/flang/test/Lower/allocatable-polymorphic.f90 b/flang/test/Lower/allocatable-polymorphic.f90
index 7632b22..e7d2dcd 100644
--- a/flang/test/Lower/allocatable-polymorphic.f90
+++ b/flang/test/Lower/allocatable-polymorphic.f90
@@ -612,9 +612,10 @@ end
! LLVM: %[[ELEM_SIZE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 1
! LLVM: %[[ELEM_SIZE:.*]] = load i64, ptr %[[ELEM_SIZE_GEP]]
! LLVM: %[[TYPE_CODE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 4
-! LLVM: %[[TYPE_CODE:.*]] = load i32, ptr %[[TYPE_CODE_GEP]]
+! LLVM: %[[TYPE_CODE:.*]] = load i8, ptr %[[TYPE_CODE_GEP]]
+! LLVM-NEXT: %[[EXT_TYPE_CODE:.*]] = sext i8 %[[TYPE_CODE]] to i32
! LLVM: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } undef, i64 %[[ELEM_SIZE]], 1
-! LLVM: %[[TRUNC_TYPE_CODE:.*]] = trunc i32 %[[TYPE_CODE]] to i8
+! LLVM: %[[TRUNC_TYPE_CODE:.*]] = trunc i32 %[[EXT_TYPE_CODE]] to i8
! LLVM: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, i8 %[[TRUNC_TYPE_CODE]], 4
! LLVM: store { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, ptr %[[TMP:.*]]
! LLVM: call void %{{.*}}(ptr %{{.*}})
@@ -626,9 +627,10 @@ end
! LLVM: %[[ELEM_SIZE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 1
! LLVM: %[[ELEM_SIZE:.*]] = load i64, ptr %[[ELEM_SIZE_GEP]]
! LLVM: %[[TYPE_CODE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 4
-! LLVM: %[[TYPE_CODE:.*]] = load i32, ptr %[[TYPE_CODE_GEP]]
+! LLVM: %[[TYPE_CODE:.*]] = load i8, ptr %[[TYPE_CODE_GEP]]
+! LLVM-NEXT: %[[EXT_TYPE_CODE:.*]] = sext i8 %[[TYPE_CODE]] to i32
! LLVM: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } undef, i64 %[[ELEM_SIZE]], 1
-! LLVM: %[[TRUNC_TYPE_CODE:.*]] = trunc i32 %[[TYPE_CODE]] to i8
+! LLVM: %[[TRUNC_TYPE_CODE:.*]] = trunc i32 %[[EXT_TYPE_CODE]] to i8
! LLVM: %{{.*}} = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, i8 %[[TRUNC_TYPE_CODE]], 4
! LLVM: store { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %{{.*}}, ptr %{{.*}}
! LLVM: call void %{{.*}}(ptr %{{.*}})
@@ -641,11 +643,12 @@ end
! LLVM: %[[ELE_SIZE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 1
! LLVM: %[[ELE_SIZE:.*]] = load i64, ptr %[[ELE_SIZE_GEP]]
! LLVM: %[[TYPE_CODE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 4
-! LLVM: %[[TYPE_CODE:.*]] = load i32, ptr %[[TYPE_CODE_GEP]]
+! LLVM: %[[TYPE_CODE:.*]] = load i8, ptr %[[TYPE_CODE_GEP]]
+! LLVM-NEXT: %[[TYPE_CODE_EXT:.*]] = sext i8 %[[TYPE_CODE]] to i32
! LLVM: %[[BOX0:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } undef, i64 %[[ELE_SIZE]], 1
! LLVM: %[[BOX1:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX0]], i32 20240719, 2
! LLVM: %[[BOX2:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX1]], i8 0, 3
-! LLVM: %[[TYPE_CODE_TRUNC:.*]] = trunc i32 %[[TYPE_CODE]] to i8
+! LLVM: %[[TYPE_CODE_TRUNC:.*]] = trunc i32 %[[TYPE_CODE_EXT]] to i8
! LLVM: %[[BOX3:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX2]], i8 %[[TYPE_CODE_TRUNC]], 4
! LLVM: %[[BOX4:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX3]], i8 0, 5
! LLVM: %[[BOX5:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX4]], i8 1, 6
@@ -662,11 +665,12 @@ end
! LLVM: %[[ELE_SIZE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 1
! LLVM: %[[ELE_SIZE:.*]] = load i64, ptr %[[ELE_SIZE_GEP]]
! LLVM: %[[TYPE_CODE_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, ptr %{{.*}}, i32 0, i32 4
-! LLVM: %[[TYPE_CODE:.*]] = load i32, ptr %[[TYPE_CODE_GEP]]
+! LLVM: %[[TYPE_CODE:.*]] = load i8, ptr %[[TYPE_CODE_GEP]]
+! LLVM-NEXT: %[[TYPE_CODE_EXT:.*]] = sext i8 %[[TYPE_CODE]] to i32
! LLVM: %[[BOX0:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } undef, i64 %[[ELE_SIZE]], 1
! LLVM: %[[BOX1:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX0]], i32 20240719, 2
! LLVM: %[[BOX2:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX1]], i8 0, 3
-! LLVM: %[[TYPE_CODE_TRUNC:.*]] = trunc i32 %[[TYPE_CODE]] to i8
+! LLVM: %[[TYPE_CODE_TRUNC:.*]] = trunc i32 %[[TYPE_CODE_EXT]] to i8
! LLVM: %[[BOX3:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX2]], i8 %[[TYPE_CODE_TRUNC]], 4
! LLVM: %[[BOX4:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX3]], i8 0, 5
! LLVM: %[[BOX5:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } %[[BOX4]], i8 1, 6
diff --git a/flang/tools/flang-driver/CMakeLists.txt b/flang/tools/flang-driver/CMakeLists.txt
index 9f33cdf..baa9496 100644
--- a/flang/tools/flang-driver/CMakeLists.txt
+++ b/flang/tools/flang-driver/CMakeLists.txt
@@ -11,9 +11,18 @@ set( LLVM_LINK_COMPONENTS
TargetParser
)
+option(FLANG_PLUGIN_SUPPORT "Build Flang with plugin support." ON)
+
+# Enable support for plugins, which need access to symbols from flang-new
+if(FLANG_PLUGIN_SUPPORT)
+ set(export_symbols EXPORT_SYMBOLS_FOR_PLUGINS)
+endif()
+
add_flang_tool(flang-new
driver.cpp
fc1_main.cpp
+
+ ${export_symbols}
)
target_link_libraries(flang-new
@@ -28,11 +37,4 @@ clang_target_link_libraries(flang-new
clangBasic
)
-option(FLANG_PLUGIN_SUPPORT "Build Flang with plugin support." ON)
-
-# Enable support for plugins, which need access to symbols from flang-new
-if(FLANG_PLUGIN_SUPPORT)
- export_executable_symbols_for_plugins(flang-new)
-endif()
-
install(TARGETS flang-new DESTINATION "${CMAKE_INSTALL_BINDIR}")
diff --git a/flang/unittests/Runtime/CUDA/AllocatorCUF.cpp b/flang/unittests/Runtime/CUDA/AllocatorCUF.cpp
index f372ae1..4f53e65 100644
--- a/flang/unittests/Runtime/CUDA/AllocatorCUF.cpp
+++ b/flang/unittests/Runtime/CUDA/AllocatorCUF.cpp
@@ -10,12 +10,14 @@
#include "../../../runtime/terminator.h"
#include "flang/Common/Fortran.h"
#include "flang/Runtime/CUDA/allocator.h"
+#include "flang/Runtime/CUDA/descriptor.h"
#include "flang/Runtime/allocatable.h"
#include "flang/Runtime/allocator-registry.h"
#include "cuda.h"
using namespace Fortran::runtime;
+using namespace Fortran::runtime::cuda;
static OwningPtr<Descriptor> createAllocatable(
Fortran::common::TypeCategory tc, int kind, int rank = 1) {
@@ -53,7 +55,7 @@ public:
TEST(AllocatableCUFTest, SimpleDeviceAllocate) {
using Fortran::common::TypeCategory;
- Fortran::runtime::cuf::CUFRegisterAllocator();
+ Fortran::runtime::cuda::CUFRegisterAllocator();
ScopedContext ctx;
// REAL(4), DEVICE, ALLOCATABLE :: a(:)
auto a{createAllocatable(TypeCategory::Real, 4)};
@@ -71,7 +73,7 @@ TEST(AllocatableCUFTest, SimpleDeviceAllocate) {
TEST(AllocatableCUFTest, SimplePinnedAllocate) {
using Fortran::common::TypeCategory;
- Fortran::runtime::cuf::CUFRegisterAllocator();
+ Fortran::runtime::cuda::CUFRegisterAllocator();
ScopedContext ctx;
// INTEGER(4), PINNED, ALLOCATABLE :: a(:)
auto a{createAllocatable(TypeCategory::Integer, 4)};
@@ -87,3 +89,15 @@ TEST(AllocatableCUFTest, SimplePinnedAllocate) {
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
EXPECT_FALSE(a->IsAllocated());
}
+
+TEST(AllocatableCUFTest, DescriptorAllocationTest) {
+ using Fortran::common::TypeCategory;
+ Fortran::runtime::cuda::CUFRegisterAllocator();
+ ScopedContext ctx;
+ // REAL(4), DEVICE, ALLOCATABLE :: a(:)
+ auto a{createAllocatable(TypeCategory::Real, 4)};
+ Descriptor *desc = nullptr;
+ desc = RTNAME(CUFAllocDesciptor)(a->SizeInBytes());
+ EXPECT_TRUE(desc != nullptr);
+ RTNAME(CUFFreeDesciptor)(desc);
+}
diff --git a/libc/cmake/modules/LLVMLibCTestRules.cmake b/libc/cmake/modules/LLVMLibCTestRules.cmake
index 539ed04..d809785 100644
--- a/libc/cmake/modules/LLVMLibCTestRules.cmake
+++ b/libc/cmake/modules/LLVMLibCTestRules.cmake
@@ -348,7 +348,7 @@ function(add_libc_fuzzer target_name)
endfunction(add_libc_fuzzer)
# Get libgcc_s to be used in hermetic and integration tests.
-if(NOT LIBC_CC_SUPPORTS_NOSTDLIBPP)
+if(NOT MSVC AND NOT LIBC_CC_SUPPORTS_NOSTDLIBPP)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -print-file-name=libgcc_s.so.1
OUTPUT_VARIABLE LIBGCC_S_LOCATION)
string(STRIP ${LIBGCC_S_LOCATION} LIBGCC_S_LOCATION)
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 8f0e130..d9b0fd8 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -201,7 +201,12 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.strtoull
# time.h entrypoints
+ libc.src.time.asctime
+ libc.src.time.asctime_r
libc.src.time.difftime
+ libc.src.time.gmtime
+ libc.src.time.gmtime_r
+ libc.src.time.mktime
# internal entrypoints
libc.startup.baremetal.init
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index 5d2b6fe..60d3070 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -197,7 +197,12 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.strtoull
# time.h entrypoints
+ libc.src.time.asctime
+ libc.src.time.asctime_r
libc.src.time.difftime
+ libc.src.time.gmtime
+ libc.src.time.gmtime_r
+ libc.src.time.mktime
# internal entrypoints
libc.startup.baremetal.init
diff --git a/libc/config/darwin/arm/entrypoints.txt b/libc/config/darwin/arm/entrypoints.txt
index dfbba63..b4af7df 100644
--- a/libc/config/darwin/arm/entrypoints.txt
+++ b/libc/config/darwin/arm/entrypoints.txt
@@ -165,6 +165,10 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fdim
libc.src.math.fdimf
libc.src.math.fdiml
+ libc.src.math.fdiv
+ libc.src.math.fdivl
+ libc.src.math.ffma
+ libc.src.math.ffmal
libc.src.math.floor
libc.src.math.floorf
libc.src.math.floorl
@@ -181,6 +185,8 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.frexp
libc.src.math.frexpf
libc.src.math.frexpl
+ libc.src.math.fsub
+ libc.src.math.fsubl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
diff --git a/libc/config/darwin/x86_64/entrypoints.txt b/libc/config/darwin/x86_64/entrypoints.txt
index 5aa8354..89f8802 100644
--- a/libc/config/darwin/x86_64/entrypoints.txt
+++ b/libc/config/darwin/x86_64/entrypoints.txt
@@ -132,6 +132,10 @@ set(TARGET_LIBM_ENTRYPOINTS
#libc.src.math.fdim
#libc.src.math.fdimf
#libc.src.math.fdiml
+ #libc.src.math.fdiv
+ #libc.src.math.fdivl
+ #libc.src.math.ffma
+ #libc.src.math.ffmal
#libc.src.math.floor
#libc.src.math.floorf
#libc.src.math.floorl
@@ -148,6 +152,8 @@ set(TARGET_LIBM_ENTRYPOINTS
#libc.src.math.frexp
#libc.src.math.frexpf
#libc.src.math.frexpl
+ #libc.src.math.fsub
+ #libc.src.math.fsubl
#libc.src.math.hypot
#libc.src.math.hypotf
#libc.src.math.ilogb
diff --git a/libc/config/gpu/entrypoints.txt b/libc/config/gpu/entrypoints.txt
index c70dc08..e1a16a3 100644
--- a/libc/config/gpu/entrypoints.txt
+++ b/libc/config/gpu/entrypoints.txt
@@ -165,6 +165,8 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.strtoll
libc.src.stdlib.strtoul
libc.src.stdlib.strtoull
+ libc.src.stdlib.at_quick_exit
+ libc.src.stdlib.quick_exit
# TODO: Implement these correctly
libc.src.stdlib.aligned_alloc
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index 149c368..b92b96c 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -461,8 +461,11 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fromfpxl
libc.src.math.fsqrt
libc.src.math.fsqrtl
+ libc.src.math.fsub
+ libc.src.math.fsubl
libc.src.math.getpayload
libc.src.math.getpayloadf
+ libc.src.math.getpayloadl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
@@ -600,6 +603,10 @@ if(LIBC_TYPES_HAS_FLOAT16)
# libc.src.math.f16subl
libc.src.math.fabsf16
libc.src.math.fdimf16
+ libc.src.math.fdiv
+ libc.src.math.fdivl
+ libc.src.math.ffma
+ libc.src.math.ffmal
libc.src.math.floorf16
libc.src.math.fmaxf16
libc.src.math.fmaximum_mag_numf16
@@ -677,6 +684,8 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.dsubf128
libc.src.math.fabsf128
libc.src.math.fdimf128
+ libc.src.math.fdivf128
+ libc.src.math.ffmaf128
libc.src.math.floorf128
libc.src.math.fmaxf128
libc.src.math.fmaximum_mag_numf128
@@ -694,6 +703,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.fromfpf128
libc.src.math.fromfpxf128
libc.src.math.fsqrtf128
+ libc.src.math.fsubf128
libc.src.math.getpayloadf128
libc.src.math.ilogbf128
libc.src.math.ldexpf128
@@ -709,6 +719,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.nextafterf128
libc.src.math.nextdownf128
libc.src.math.nextupf128
+ libc.src.math.remainderf128
libc.src.math.remquof128
libc.src.math.rintf128
libc.src.math.roundevenf128
@@ -801,6 +812,11 @@ if(LLVM_LIBC_FULL_BUILD)
libc.src.pthread.pthread_rwlockattr_init
libc.src.pthread.pthread_rwlockattr_setkind_np
libc.src.pthread.pthread_rwlockattr_setpshared
+ libc.src.pthread.pthread_spin_destroy
+ libc.src.pthread.pthread_spin_init
+ libc.src.pthread.pthread_spin_lock
+ libc.src.pthread.pthread_spin_trylock
+ libc.src.pthread.pthread_spin_unlock
libc.src.pthread.pthread_self
libc.src.pthread.pthread_setname_np
libc.src.pthread.pthread_setspecific
diff --git a/libc/config/linux/api.td b/libc/config/linux/api.td
index 320f3e9..6a7c642 100644
--- a/libc/config/linux/api.td
+++ b/libc/config/linux/api.td
@@ -143,6 +143,7 @@ def PThreadAPI : PublicAPI<"pthread.h"> {
"pthread_once_t",
"pthread_rwlockattr_t",
"pthread_rwlock_t",
+ "pthread_spinlock_t",
"pthread_t",
];
}
diff --git a/libc/config/linux/arm/entrypoints.txt b/libc/config/linux/arm/entrypoints.txt
index 3dc8aca..c6b7b6c 100644
--- a/libc/config/linux/arm/entrypoints.txt
+++ b/libc/config/linux/arm/entrypoints.txt
@@ -246,6 +246,10 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fdim
libc.src.math.fdimf
libc.src.math.fdiml
+ libc.src.math.fdiv
+ libc.src.math.fdivl
+ libc.src.math.ffma
+ libc.src.math.ffmal
libc.src.math.floor
libc.src.math.floorf
libc.src.math.floorl
@@ -293,8 +297,11 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fromfpx
libc.src.math.fromfpxf
libc.src.math.fromfpxl
+ libc.src.math.fsub
+ libc.src.math.fsubl
libc.src.math.getpayload
libc.src.math.getpayloadf
+ libc.src.math.getpayloadl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index 15a6827..a90fdbe 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -409,6 +409,10 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fdim
libc.src.math.fdimf
libc.src.math.fdiml
+ libc.src.math.fdiv
+ libc.src.math.fdivl
+ libc.src.math.ffma
+ libc.src.math.ffmal
libc.src.math.floor
libc.src.math.floorf
libc.src.math.floorl
@@ -460,8 +464,11 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fromfpxl
libc.src.math.fsqrt
libc.src.math.fsqrtl
+ libc.src.math.fsub
+ libc.src.math.fsubl
libc.src.math.getpayload
libc.src.math.getpayloadf
+ libc.src.math.getpayloadl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
@@ -576,10 +583,16 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.canonicalizef128
libc.src.math.ceilf128
libc.src.math.copysignf128
+ libc.src.math.daddf128
+ libc.src.math.ddivf128
+ libc.src.math.dfmaf128
libc.src.math.dmulf128
libc.src.math.dsqrtf128
+ libc.src.math.dsubf128
libc.src.math.fabsf128
libc.src.math.fdimf128
+ libc.src.math.fdivf128
+ libc.src.math.ffmaf128
libc.src.math.floorf128
libc.src.math.fmaxf128
libc.src.math.fmaximum_mag_numf128
@@ -597,6 +610,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.fromfpf128
libc.src.math.fromfpxf128
libc.src.math.fsqrtf128
+ libc.src.math.fsubf128
libc.src.math.getpayloadf128
libc.src.math.ilogbf128
libc.src.math.ldexpf128
@@ -612,11 +626,13 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.nextafterf128
libc.src.math.nextdownf128
libc.src.math.nextupf128
+ libc.src.math.remainderf128
libc.src.math.remquof128
libc.src.math.rintf128
libc.src.math.roundevenf128
libc.src.math.roundf128
libc.src.math.scalbnf128
+ libc.src.math.setpayloadf128
libc.src.math.sqrtf128
libc.src.math.totalorderf128
libc.src.math.totalordermagf128
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 0c54fb2..9ec86f1 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -409,6 +409,10 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fdim
libc.src.math.fdimf
libc.src.math.fdiml
+ libc.src.math.fdiv
+ libc.src.math.fdivl
+ libc.src.math.ffma
+ libc.src.math.ffmal
libc.src.math.floor
libc.src.math.floorf
libc.src.math.floorl
@@ -460,8 +464,11 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fromfpxl
libc.src.math.fsqrt
libc.src.math.fsqrtl
+ libc.src.math.fsub
+ libc.src.math.fsubl
libc.src.math.getpayload
libc.src.math.getpayloadf
+ libc.src.math.getpayloadl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
@@ -576,6 +583,8 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.canonicalizef16
libc.src.math.ceilf16
libc.src.math.copysignf16
+ libc.src.math.exp10f16
+ libc.src.math.exp2f16
libc.src.math.expf16
libc.src.math.f16add
libc.src.math.f16addf
@@ -671,6 +680,8 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.dsubf128
libc.src.math.fabsf128
libc.src.math.fdimf128
+ libc.src.math.fdivf128
+ libc.src.math.ffmaf128
libc.src.math.floorf128
libc.src.math.fmaxf128
libc.src.math.fmaximum_mag_numf128
@@ -688,6 +699,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.fromfpf128
libc.src.math.fromfpxf128
libc.src.math.fsqrtf128
+ libc.src.math.fsubf128
libc.src.math.getpayloadf128
libc.src.math.ilogbf128
libc.src.math.ldexpf128
@@ -703,6 +715,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.nextafterf128
libc.src.math.nextdownf128
libc.src.math.nextupf128
+ libc.src.math.remainderf128
libc.src.math.remquof128
libc.src.math.rintf128
libc.src.math.roundevenf128
diff --git a/libc/config/windows/entrypoints.txt b/libc/config/windows/entrypoints.txt
index d66e7f1..d281835 100644
--- a/libc/config/windows/entrypoints.txt
+++ b/libc/config/windows/entrypoints.txt
@@ -92,6 +92,9 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.realloc
libc.src.stdlib.aligned_alloc
libc.src.stdlib.free
+
+ # errno.h entrypoints
+ libc.src.errno.errno
)
set(TARGET_LIBM_ENTRYPOINTS
@@ -152,6 +155,10 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fdim
libc.src.math.fdimf
libc.src.math.fdiml
+ libc.src.math.fdiv
+ libc.src.math.fdivl
+ libc.src.math.ffma
+ libc.src.math.ffmal
libc.src.math.floor
libc.src.math.floorf
libc.src.math.floorl
@@ -194,6 +201,8 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.frexp
libc.src.math.frexpf
libc.src.math.frexpl
+ libc.src.math.fsub
+ libc.src.math.fsubl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
diff --git a/libc/docs/dev/undefined_behavior.rst b/libc/docs/dev/undefined_behavior.rst
index 9f50545..d0d882b 100644
--- a/libc/docs/dev/undefined_behavior.rst
+++ b/libc/docs/dev/undefined_behavior.rst
@@ -98,3 +98,11 @@ Unrecognized ``clockid_t`` values for ``pthread_rwlock_clock*`` APIs
----------------------------------------------------------------------
POSIX.1-2024 only demands support for ``CLOCK_REALTIME`` and ``CLOCK_MONOTONIC``. Currently,
as in LLVM libc, if other clock ids are used, they will be treated as monotonic clocks.
+
+PThread SpinLock Destroy
+------------------------
+POSIX.1 Issue 7 updates the spinlock destroy behavior description such that the return code for
+uninitialized spinlock and invalid spinlock is left undefined. We follow the recommendation as in
+POSIX.1-2024, where EINVAL is returned if the spinlock is invalid (here we only check for null pointers) or
+EBUSY is returned if the spinlock is currently locked. The lock is poisoned after a successful destroy. That is,
+subsequent operations on the lock object without any reinitialization will return EINVAL.
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index 14ef59e..c665ce3 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -118,7 +118,7 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| ddiv | N/A | N/A | | N/A | |check|\* | 7.12.14.4 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| dfma | N/A | N/A | |check| | N/A | |check|\* | 7.12.14.5 | F.10.11 |
+| dfma | N/A | N/A | |check| | N/A | |check|\* | 7.12.14.5 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| dmul | N/A | N/A | |check| | N/A | |check|\* | 7.12.14.3 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
@@ -140,9 +140,9 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| fdim | |check| | |check| | |check| | |check| | |check| | 7.12.12.1 | F.10.9.1 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| fdiv | N/A | | | N/A | | 7.12.14.4 | F.10.11 |
+| fdiv | N/A | |check| | |check| | N/A | |check|\* | 7.12.14.4 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| ffma | N/A | | | N/A | | 7.12.14.5 | F.10.11 |
+| ffma | N/A | |check| | |check| | N/A | |check|\* | 7.12.14.5 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| floor | |check| | |check| | |check| | |check| | |check| | 7.12.9.2 | F.10.6.2 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
@@ -176,9 +176,9 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| fromfpx | |check| | |check| | |check| | |check| | |check| | 7.12.9.11 | F.10.6.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| fsub | N/A | | | N/A | | 7.12.14.2 | F.10.11 |
+| fsub | N/A | |check| | |check| | N/A | |check|\* | 7.12.14.2 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| getpayload | |check| | |check| | | |check| | |check| | F.10.13.1 | N/A |
+| getpayload | |check| | |check| | |check| | |check| | |check| | F.10.13.1 | N/A |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| ilogb | |check| | |check| | |check| | |check| | |check| | 7.12.6.8 | F.10.3.8 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
@@ -210,7 +210,7 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| nextup | |check| | |check| | |check| | |check| | |check| | 7.12.11.5 | F.10.8.5 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| remainder | |check| | |check| | |check| | |check| | | 7.12.10.2 | F.10.7.2 |
+| remainder | |check| | |check| | |check| | |check| | |check| | 7.12.10.2 | F.10.7.2 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| remquo | |check| | |check| | |check| | |check| | |check| | 7.12.10.3 | F.10.7.3 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
@@ -286,11 +286,11 @@ Higher Math Functions
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| exp | |check| | |check| | | |check| | | 7.12.6.1 | F.10.3.1 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| exp10 | |check| | |check| | | | | 7.12.6.2 | F.10.3.2 |
+| exp10 | |check| | |check| | | |check| | | 7.12.6.2 | F.10.3.2 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| exp10m1 | | | | | | 7.12.6.3 | F.10.3.3 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| exp2 | |check| | |check| | | | | 7.12.6.4 | F.10.3.4 |
+| exp2 | |check| | |check| | | |check| | | 7.12.6.4 | F.10.3.4 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| exp2m1 | |check| | | | | | 7.12.6.5 | F.10.3.5 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
diff --git a/libc/docs/overlay_mode.rst b/libc/docs/overlay_mode.rst
index f9b5666..37368ff 100644
--- a/libc/docs/overlay_mode.rst
+++ b/libc/docs/overlay_mode.rst
@@ -39,7 +39,7 @@ the CMake configure step as follows:
$> cd llvm-project # The llvm-project checkout
$> mkdir build
$> cd build
- $> cmake ../llvm -G Ninja -DLLVM_ENABLE_PROJECTS="libc" \
+ $> cmake ../llvm -G Ninja -DLLVM_ENABLE_RUNTIMES="libc" \
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \
-DCMAKE_BUILD_TYPE=<Debug|Release> \ # Select build type
-DCMAKE_INSTALL_PREFIX=<Your prefix of choice> # Optional
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index 37cae19..cbde24e 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -386,6 +386,7 @@ add_header_macro(
.llvm-libc-types.pthread_once_t
.llvm-libc-types.pthread_rwlock_t
.llvm-libc-types.pthread_rwlockattr_t
+ .llvm-libc-types.pthread_spinlock_t
.llvm-libc-types.pthread_t
)
diff --git a/libc/include/llvm-libc-macros/float-macros.h b/libc/include/llvm-libc-macros/float-macros.h
index 81c1df8..a25ef60 100644
--- a/libc/include/llvm-libc-macros/float-macros.h
+++ b/libc/include/llvm-libc-macros/float-macros.h
@@ -161,6 +161,18 @@
#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
#endif // LDBL_MAX_10_EXP
+#ifndef FLT_HAS_SUBNORM
+#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+#endif // FLT_HAS_SUBNORM
+
+#ifndef DBL_HAS_SUBNORM
+#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+#endif // DBL_HAS_SUBNORM
+
+#ifndef LDBL_HAS_SUBNORM
+#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+#endif // LDBL_HAS_SUBNORM
+
// TODO: Add FLT16 and FLT128 constants.
#endif // LLVM_LIBC_MACROS_FLOAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/limits-macros.h b/libc/include/llvm-libc-macros/limits-macros.h
index 456487e..d4aa7ae 100644
--- a/libc/include/llvm-libc-macros/limits-macros.h
+++ b/libc/include/llvm-libc-macros/limits-macros.h
@@ -19,12 +19,10 @@
#endif // __CHAR_BIT__
#endif // CHAR_BIT
-// TODO: https://github.com/llvm/llvm-project/issues/79358
-// Define MB_LEN_MAX if missing.
-// clang: MB_LEN_MAX = 1 -
-// https://github.com/llvm/llvm-project/blob/main/clang/lib/Headers/limits.h#L64
-// glibc: MB_LEN_MAX = 16 -
-// https://github.com/bminor/glibc/blob/master/include/limits.h#L32
+#ifndef MB_LEN_MAX
+// Represents a single UTF-32 wide character in the default locale.
+#define MB_LEN_MAX 4
+#endif // MB_LEN_MAX
// *_WIDTH macros
diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt
index d8b9755..9e77ab2 100644
--- a/libc/include/llvm-libc-types/CMakeLists.txt
+++ b/libc/include/llvm-libc-types/CMakeLists.txt
@@ -56,6 +56,7 @@ add_header(pthread_mutexattr_t HDR pthread_mutexattr_t.h)
add_header(pthread_once_t HDR pthread_once_t.h DEPENDS .__futex_word)
add_header(pthread_rwlock_t HDR pthread_rwlock_t.h DEPENDS .__futex_word .pid_t)
add_header(pthread_rwlockattr_t HDR pthread_rwlockattr_t.h)
+add_header(pthread_spinlock_t HDR pthread_spinlock_t.h DEPENDS .pid_t)
add_header(pthread_t HDR pthread_t.h DEPENDS .__thread_type)
add_header(rlim_t HDR rlim_t.h)
add_header(time_t HDR time_t.h)
diff --git a/libc/include/llvm-libc-types/pthread_spinlock_t.h b/libc/include/llvm-libc-types/pthread_spinlock_t.h
new file mode 100644
index 0000000..03eb02d
--- /dev/null
+++ b/libc/include/llvm-libc-types/pthread_spinlock_t.h
@@ -0,0 +1,17 @@
+//===-- Definition of pthread_spinlock_t type -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_PTHREAD_SPINLOCK_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_SPINLOCK_T_H
+#include "llvm-libc-types/pid_t.h"
+typedef struct {
+ unsigned char __lockword;
+ pid_t __owner;
+} pthread_spinlock_t;
+
+#endif // LLVM_LIBC_TYPES_PTHREAD_SPINLOCK_T_H
diff --git a/libc/newhdrgen/yaml/math.yaml b/libc/newhdrgen/yaml/math.yaml
index 1a5aa0a..d22546e 100644
--- a/libc/newhdrgen/yaml/math.yaml
+++ b/libc/newhdrgen/yaml/math.yaml
@@ -794,6 +794,14 @@ functions:
arguments:
- type: long double
- type: long double
+ - name: remainderf128
+ standards:
+ - stdc
+ return_type: float128
+ arguments:
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT128
- name: remquo
standards:
- stdc
@@ -1540,6 +1548,46 @@ functions:
- type: int
- type: unsigned int
guard: LIBC_TYPES_HAS_FLOAT16
+ - name: fsub
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: double
+ - type: double
+ - name: fsubl
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: long double
+ - type: long double
+ - name: fsubf128
+ standards:
+ - llvm_libc_ext
+ return_type: float
+ arguments:
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT128
+ - name: getpayload
+ standards:
+ - stdc
+ return_type: double
+ arguments:
+ - type: double *
+ - name: getpayloadl
+ standards:
+ - stdc
+ return_type: long double
+ arguments:
+ - type: long double *
+ - name: getpayloadf
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: float *
- name: getpayloadf16
standards:
- stdc
@@ -1547,6 +1595,13 @@ functions:
arguments:
- type: _Float16 *
guard: LIBC_TYPES_HAS_FLOAT16
+ - name: getpayloadf128
+ standards:
+ - stdc
+ return_type: float128
+ arguments:
+ - type: float128 *
+ guard: LIBC_TYPES_HAS_FLOAT128
- name: ilogbf16
standards:
- stdc
@@ -1845,6 +1900,53 @@ functions:
- type: float128
- type: float128
guard: LIBC_TYPES_HAS_FLOAT128
+ - name: fdiv
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: double
+ - type: double
+ - name: fdivl
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: long double
+ - type: long double
+ - name: fdivf128
+ standards:
+ - llvm_libc_ext
+ return_type: float
+ arguments:
+ - type: float128
+ - type: float128
+ guard: LIBC_TYPES_HAS_FLOAT128
+ - name: ffma
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: double
+ - type: double
+ - type: double
+ - name: ffmal
+ standards:
+ - stdc
+ return_type: float
+ arguments:
+ - type: long double
+ - type: long double
+ - type: long double
+ - name: ffmaf128
+ standards:
+ - llvm_libc_ext
+ return_type: float
+ arguments:
+ - type: float128
+ - type: float128
+ - type: float128
+ guards: LIBC_TYPES_HAS_FLOAT128
- name: floorf128
standards:
- stdc
diff --git a/libc/newhdrgen/yaml/pthread.yaml b/libc/newhdrgen/yaml/pthread.yaml
index b6934e3..f3c59ee 100644
--- a/libc/newhdrgen/yaml/pthread.yaml
+++ b/libc/newhdrgen/yaml/pthread.yaml
@@ -14,6 +14,7 @@ types:
- type_name: __pthread_start_t
- type_name: __pthread_once_func_t
- type_name: __atfork_callback_t
+ - type_name: pthread_spinlock_t
enums: []
functions:
- name: pthread_atfork
@@ -404,3 +405,29 @@ functions:
return_type: int
arguments:
- type: pthread_rwlock_t *
+ - name: pthread_spin_init
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_spinlock_t *
+ - type: int
+ - name: pthread_spin_destroy
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_spinlock_t *
+ - name: pthread_spin_lock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_spinlock_t *
+ - name: pthread_spin_trylock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_spinlock_t *
+ - name: pthread_spin_unlock
+ standards: POSIX
+ return_type: int
+ arguments:
+ - type: pthread_spinlock_t *
diff --git a/libc/newhdrgen/yaml_to_classes.py b/libc/newhdrgen/yaml_to_classes.py
index 37a4f78..37a4f78 100755..100644
--- a/libc/newhdrgen/yaml_to_classes.py
+++ b/libc/newhdrgen/yaml_to_classes.py
diff --git a/libc/spec/gnu_ext.td b/libc/spec/gnu_ext.td
index 46f1abf..b2a2b8a 100644
--- a/libc/spec/gnu_ext.td
+++ b/libc/spec/gnu_ext.td
@@ -36,17 +36,17 @@ def GnuExtensions : StandardSpec<"GNUExtensions"> {
FunctionSpec<
"lgamma_r",
RetValSpec<DoubleType>,
- [ArgSpec<DoubleType, IntPtr>]
+ [ArgSpec<DoubleType>, ArgSpec<IntPtr>]
>,
FunctionSpec<
"lgammaf_r",
RetValSpec<FloatType>,
- [ArgSpec<FloatType, IntPtr>]
+ [ArgSpec<FloatType>, ArgSpec<IntPtr>]
>,
FunctionSpec<
"lgammal_r",
RetValSpec<LongDoubleType>,
- [ArgSpec<LongDoubleType, IntPtr>]
+ [ArgSpec<LongDoubleType>, ArgSpec<IntPtr>]
>,
]
>;
diff --git a/libc/spec/llvm_libc_ext.td b/libc/spec/llvm_libc_ext.td
index f86a8c1..c4cbca8 100644
--- a/libc/spec/llvm_libc_ext.td
+++ b/libc/spec/llvm_libc_ext.td
@@ -71,6 +71,10 @@ def LLVMLibcExt : StandardSpec<"llvm_libc_ext"> {
GuardedFunctionSpec<"f16sub", RetValSpec<Float16Type>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"f16subf", RetValSpec<Float16Type>, [ArgSpec<FloatType>, ArgSpec<FloatType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"f16subl", RetValSpec<Float16Type>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>], "LIBC_TYPES_HAS_FLOAT16">,
+
+ GuardedFunctionSpec<"fdivf128", RetValSpec<FloatType>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ GuardedFunctionSpec<"ffmaf128", RetValSpec<FloatType>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
GuardedFunctionSpec<"fmulf128", RetValSpec<FloatType>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
@@ -94,8 +98,10 @@ def LLVMLibcExt : StandardSpec<"llvm_libc_ext"> {
GuardedFunctionSpec<"fsqrtf128", RetValSpec<FloatType>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+ FunctionSpec<"fsubf128", RetValSpec<FloatType>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
FunctionSpec<"powi", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>]>,
- FunctionSpec<"powif", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>]>,
+ FunctionSpec<"powif", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>]>,
]
>;
diff --git a/libc/spec/posix.td b/libc/spec/posix.td
index 0edf908..085f2ec 100644
--- a/libc/spec/posix.td
+++ b/libc/spec/posix.td
@@ -132,6 +132,9 @@ def POSIX : StandardSpec<"POSIX"> {
PtrType PThreadRWLockTPtr = PtrType<PThreadRWLockTType>;
RestrictedPtrType RestrictedPThreadRWLockTPtr = RestrictedPtrType<PThreadRWLockTType>;
+ NamedType PThreadSpinLockTType = NamedType<"pthread_spinlock_t">;
+ PtrType PThreadSpinLockTPtr = PtrType<PThreadSpinLockTType>;
+
PtrType PThreadTPtr = PtrType<PThreadTType>;
RestrictedPtrType RestrictedPThreadTPtr = RestrictedPtrType<PThreadTType>;
@@ -1049,6 +1052,7 @@ def POSIX : StandardSpec<"POSIX"> {
PThreadOnceT,
PThreadRWLockAttrTType,
PThreadRWLockTType,
+ PThreadSpinLockTType,
PThreadStartT,
PThreadTSSDtorT,
PThreadTType,
@@ -1360,6 +1364,31 @@ def POSIX : StandardSpec<"POSIX"> {
RetValSpec<IntType>,
[ArgSpec<PThreadRWLockTPtr>]
>,
+ FunctionSpec<
+ "pthread_spin_init",
+ RetValSpec<IntType>,
+ [ArgSpec<PThreadSpinLockTPtr>, ArgSpec<IntType>]
+ >,
+ FunctionSpec<
+ "pthread_spin_destroy",
+ RetValSpec<IntType>,
+ [ArgSpec<PThreadSpinLockTPtr>]
+ >,
+ FunctionSpec<
+ "pthread_spin_lock",
+ RetValSpec<IntType>,
+ [ArgSpec<PThreadSpinLockTPtr>]
+ >,
+ FunctionSpec<
+ "pthread_spin_trylock",
+ RetValSpec<IntType>,
+ [ArgSpec<PThreadSpinLockTPtr>]
+ >,
+ FunctionSpec<
+ "pthread_spin_unlock",
+ RetValSpec<IntType>,
+ [ArgSpec<PThreadSpinLockTPtr>]
+ >
]
>;
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 506b1c6..5dde6ac 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -414,6 +414,12 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"fdiml", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
GuardedFunctionSpec<"fdimf16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>, ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"fdimf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fdiv", RetValSpec<FloatType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fdivl", RetValSpec<FloatType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+
+ FunctionSpec<"ffma", RetValSpec<FloatType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"ffmal", RetValSpec<FloatType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
FunctionSpec<"floor", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"floorf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
@@ -510,6 +516,9 @@ def StdC : StandardSpec<"stdc"> {
GuardedFunctionSpec<"fromfpxf16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"fromfpxf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>], "LIBC_TYPES_HAS_FLOAT128">,
+ FunctionSpec<"fsub", RetValSpec<FloatType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fsubl", RetValSpec<FloatType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+
FunctionSpec<"ufromfp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
FunctionSpec<"ufromfpf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
FunctionSpec<"ufromfpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
@@ -582,6 +591,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"exp2", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"exp2f", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
+ GuardedFunctionSpec<"exp2f16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
FunctionSpec<"exp2m1f", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
@@ -590,11 +600,13 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"exp10", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"exp10f", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
+ GuardedFunctionSpec<"exp10f16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
FunctionSpec<"remainder", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"remainderf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"remainderl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
GuardedFunctionSpec<"remainderf16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>, ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
+ GuardedFunctionSpec<"remainderf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"remquo", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>, ArgSpec<IntPtr>]>,
FunctionSpec<"remquof", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>, ArgSpec<IntPtr>]>,
@@ -739,10 +751,11 @@ def StdC : StandardSpec<"stdc"> {
GuardedFunctionSpec<"totalordermagf128", RetValSpec<IntType>, [ArgSpec<Float128Ptr>, ArgSpec<Float128Ptr>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"getpayload", RetValSpec<DoubleType>, [ArgSpec<DoublePtr>]>,
- FunctionSpec<"getpayloadf", RetValSpec<FloatType>, [ArgSpec<FloatPtr>]>,
+ FunctionSpec<"getpayloadf", RetValSpec<FloatType>, [ArgSpec<FloatPtr>]>,
+ FunctionSpec<"getpayloadl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoublePtr>]>,
GuardedFunctionSpec<"getpayloadf16", RetValSpec<Float16Type>, [ArgSpec<Float16Ptr>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"getpayloadf128", RetValSpec<Float128Type>, [ArgSpec<Float128Ptr>], "LIBC_TYPES_HAS_FLOAT128">,
-
+
FunctionSpec<"setpayload", RetValSpec<IntType>, [ArgSpec<DoublePtr>, ArgSpec<DoubleType>]>,
FunctionSpec<"setpayloadf", RetValSpec<IntType>, [ArgSpec<FloatPtr>, ArgSpec<FloatType>]>,
GuardedFunctionSpec<"setpayloadf16", RetValSpec<IntType>, [ArgSpec<Float16Ptr>, ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
diff --git a/libc/src/__support/threads/spin_lock.h b/libc/src/__support/threads/spin_lock.h
index 8a365505..e176ad9 100644
--- a/libc/src/__support/threads/spin_lock.h
+++ b/libc/src/__support/threads/spin_lock.h
@@ -11,26 +11,17 @@
#include "src/__support/CPP/atomic.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/properties/architectures.h"
#include "src/__support/threads/sleep.h"
namespace LIBC_NAMESPACE_DECL {
-namespace spinlock {
-template <typename LockWord, typename Return>
-using AtomicOp = Return (cpp::Atomic<LockWord>::*)(LockWord, cpp::MemoryOrder,
- cpp::MemoryScope);
-}
-
-template <typename LockWord, spinlock::AtomicOp<LockWord, LockWord> Acquire,
- spinlock::AtomicOp<LockWord, void> Release>
-class SpinLockAdaptor {
- cpp::Atomic<LockWord> flag;
+class SpinLock {
+ cpp::Atomic<unsigned char> flag;
public:
- LIBC_INLINE constexpr SpinLockAdaptor() : flag{false} {}
+ LIBC_INLINE constexpr SpinLock() : flag{0} {}
LIBC_INLINE bool try_lock() {
- return !flag.*Acquire(static_cast<LockWord>(1), cpp::MemoryOrder::ACQUIRE);
+ return !flag.exchange(1u, cpp::MemoryOrder::ACQUIRE);
}
LIBC_INLINE void lock() {
// clang-format off
@@ -60,22 +51,15 @@ public:
while (flag.load(cpp::MemoryOrder::RELAXED))
sleep_briefly();
}
- LIBC_INLINE void unlock() {
- flag.*Release(static_cast<LockWord>(0), cpp::MemoryOrder::RELEASE);
+ LIBC_INLINE void unlock() { flag.store(0u, cpp::MemoryOrder::RELEASE); }
+ LIBC_INLINE bool is_locked() { return flag.load(cpp::MemoryOrder::ACQUIRE); }
+ LIBC_INLINE bool is_invalid() {
+ return flag.load(cpp::MemoryOrder::ACQUIRE) > 1;
}
+ // poison the lock
+ LIBC_INLINE ~SpinLock() { flag.store(0xffu, cpp::MemoryOrder::RELEASE); }
};
-// It is reported that atomic operations with higher-order semantics
-// lead to better performance on GPUs.
-#ifdef LIBC_TARGET_ARCH_IS_GPU
-using SpinLock =
- SpinLockAdaptor<unsigned int, &cpp::Atomic<unsigned int>::fetch_or,
- &cpp::Atomic<unsigned int>::fetch_and>;
-#else
-using SpinLock = SpinLockAdaptor<bool, &cpp::Atomic<bool>::exchange,
- &cpp::Atomic<bool>::store>;
-#endif
-
} // namespace LIBC_NAMESPACE_DECL
#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_SPIN_LOCK_H
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index aec94d4..a0bd526 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -110,11 +110,13 @@ add_math_entrypoint_object(expf16)
add_math_entrypoint_object(exp2)
add_math_entrypoint_object(exp2f)
+add_math_entrypoint_object(exp2f16)
add_math_entrypoint_object(exp2m1f)
add_math_entrypoint_object(exp10)
add_math_entrypoint_object(exp10f)
+add_math_entrypoint_object(exp10f16)
add_math_entrypoint_object(expm1)
add_math_entrypoint_object(expm1f)
@@ -166,6 +168,14 @@ add_math_entrypoint_object(fdiml)
add_math_entrypoint_object(fdimf16)
add_math_entrypoint_object(fdimf128)
+add_math_entrypoint_object(fdiv)
+add_math_entrypoint_object(fdivl)
+add_math_entrypoint_object(fdivf128)
+
+add_math_entrypoint_object(ffma)
+add_math_entrypoint_object(ffmal)
+add_math_entrypoint_object(ffmaf128)
+
add_math_entrypoint_object(floor)
add_math_entrypoint_object(floorf)
add_math_entrypoint_object(floorl)
@@ -263,8 +273,13 @@ add_math_entrypoint_object(fromfpxl)
add_math_entrypoint_object(fromfpxf16)
add_math_entrypoint_object(fromfpxf128)
+add_math_entrypoint_object(fsub)
+add_math_entrypoint_object(fsubl)
+add_math_entrypoint_object(fsubf128)
+
add_math_entrypoint_object(getpayload)
add_math_entrypoint_object(getpayloadf)
+add_math_entrypoint_object(getpayloadl)
add_math_entrypoint_object(getpayloadf16)
add_math_entrypoint_object(getpayloadf128)
@@ -385,6 +400,7 @@ add_math_entrypoint_object(remainder)
add_math_entrypoint_object(remainderf)
add_math_entrypoint_object(remainderl)
add_math_entrypoint_object(remainderf16)
+add_math_entrypoint_object(remainderf128)
add_math_entrypoint_object(remquo)
add_math_entrypoint_object(remquof)
diff --git a/libc/src/math/exp10f16.h b/libc/src/math/exp10f16.h
new file mode 100644
index 0000000..62a62f7
--- /dev/null
+++ b/libc/src/math/exp10f16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for exp10f16 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_EXP10F16_H
+#define LLVM_LIBC_SRC_MATH_EXP10F16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float16 exp10f16(float16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_EXP10F16_H
diff --git a/libc/src/math/exp2f16.h b/libc/src/math/exp2f16.h
new file mode 100644
index 0000000..71361b9
--- /dev/null
+++ b/libc/src/math/exp2f16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for exp2f16 -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_EXP2F16_H
+#define LLVM_LIBC_SRC_MATH_EXP2F16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float16 exp2f16(float16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_EXP2F16_H
diff --git a/libc/src/math/fdiv.h b/libc/src/math/fdiv.h
new file mode 100644
index 0000000..4d60afa
--- /dev/null
+++ b/libc/src/math/fdiv.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fdiv --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FDIV_H
+#define LLVM_LIBC_SRC_MATH_FDIV_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float fdiv(double x, double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FDIV_H
diff --git a/libc/src/math/fdivf128.h b/libc/src/math/fdivf128.h
new file mode 100644
index 0000000..1a15038
--- /dev/null
+++ b/libc/src/math/fdivf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fdivf128 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FDIVF128_H
+#define LLVM_LIBC_SRC_MATH_FDIVF128_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float fdivf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FDIVF128_H
diff --git a/libc/src/math/fdivl.h b/libc/src/math/fdivl.h
new file mode 100644
index 0000000..1943eaa
--- /dev/null
+++ b/libc/src/math/fdivl.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fdivl -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FDIVL_H
+#define LLVM_LIBC_SRC_MATH_FDIVL_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float fdivl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FDIVL_H
diff --git a/libc/src/math/ffma.h b/libc/src/math/ffma.h
new file mode 100644
index 0000000..98d89f2
--- /dev/null
+++ b/libc/src/math/ffma.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for ffma --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FFMA_H
+#define LLVM_LIBC_SRC_MATH_FFMA_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float ffma(double x, double y, double z);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FFMA_H
diff --git a/libc/src/math/ffmaf128.h b/libc/src/math/ffmaf128.h
new file mode 100644
index 0000000..7410997
--- /dev/null
+++ b/libc/src/math/ffmaf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for ffmaf128 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FFMAF128_H
+#define LLVM_LIBC_SRC_MATH_FFMAF128_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float ffmaf128(float128 x, float128 y, float128 z);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FFMAF128_H
diff --git a/libc/src/math/ffmal.h b/libc/src/math/ffmal.h
new file mode 100644
index 0000000..75fd40a
--- /dev/null
+++ b/libc/src/math/ffmal.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for ffmal -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FFMAL_H
+#define LLVM_LIBC_SRC_MATH_FFMAL_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float ffmal(long double x, long double y, long double z);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FFMAL_H
diff --git a/libc/src/math/fsub.h b/libc/src/math/fsub.h
new file mode 100644
index 0000000..f17f0fd
--- /dev/null
+++ b/libc/src/math/fsub.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fsub --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FSUB_H
+#define LLVM_LIBC_SRC_MATH_FSUB_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float fsub(double x, double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FSUB_H
diff --git a/libc/src/math/fsubf128.h b/libc/src/math/fsubf128.h
new file mode 100644
index 0000000..4f41c7d
--- /dev/null
+++ b/libc/src/math/fsubf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fsubf128 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FSUBF128_H
+#define LLVM_LIBC_SRC_MATH_FSUBF128_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float fsubf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FSUBF128_H
diff --git a/libc/src/math/fsubl.h b/libc/src/math/fsubl.h
new file mode 100644
index 0000000..32570ca
--- /dev/null
+++ b/libc/src/math/fsubl.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fsubl -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FSUBL_H
+#define LLVM_LIBC_SRC_MATH_FSUBL_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float fsubl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FSUBL_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 2fe6cc4..be5cc2e 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -1396,6 +1396,29 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ exp2f16
+ SRCS
+ exp2f16.cpp
+ HDRS
+ ../exp2f16.h
+ DEPENDS
+ .expxf16
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.__support.CPP.array
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.nearest_integer
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.FPUtil.rounding_mode
+ libc.src.__support.macros.optimization
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
exp2m1f
SRCS
exp2m1f.cpp
@@ -1476,6 +1499,30 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ exp10f16
+ SRCS
+ exp10f16.cpp
+ HDRS
+ ../exp10f16.h
+ DEPENDS
+ .expxf16
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.__support.CPP.array
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.nearest_integer
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.FPUtil.rounding_mode
+ libc.src.__support.macros.optimization
+ libc.src.__support.macros.properties.cpu_features
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
expm1
SRCS
expm1.cpp
@@ -2885,6 +2932,43 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ fsub
+ SRCS
+ fsub.cpp
+ HDRS
+ ../fsub.h
+ DEPENDS
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fsubl
+ SRCS
+ fsubl.cpp
+ HDRS
+ ../fsubl.h
+ DEPENDS
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fsubf128
+ SRCS
+ fsubf128.cpp
+ HDRS
+ ../fsubf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.add_sub
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
sqrt
SRCS
sqrt.cpp
@@ -3045,6 +3129,19 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ remainderf128
+ SRCS
+ remainderf128.cpp
+ HDRS
+ ../remainderf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.division_and_remainder_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
hypotf
SRCS
hypotf.cpp
@@ -3124,6 +3221,80 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ fdiv
+ SRCS
+ fdiv.cpp
+ HDRS
+ ../fdiv.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.generic.div
+)
+
+add_entrypoint_object(
+ fdivl
+ SRCS
+ fdivl.cpp
+ HDRS
+ ../fdivl.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.generic.div
+)
+
+add_entrypoint_object(
+ fdivf128
+ SRCS
+ fdivf128.cpp
+ HDRS
+ ../fdivf128.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.div
+)
+
+add_entrypoint_object(
+ ffma
+ SRCS
+ ffma.cpp
+ HDRS
+ ../ffma.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.fma
+)
+
+add_entrypoint_object(
+ ffmal
+ SRCS
+ ffmal.cpp
+ HDRS
+ ../ffmal.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.fma
+)
+
+add_entrypoint_object(
+ ffmaf128
+ SRCS
+ ffmaf128.cpp
+ HDRS
+ ../ffmaf128.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.fma
+)
+
+add_entrypoint_object(
hypot
SRCS
hypot.cpp
@@ -4241,6 +4412,18 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ getpayloadl
+ SRCS
+ getpayloadl.cpp
+ HDRS
+ ../getpayloadl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
getpayloadf16
SRCS
getpayloadf16.cpp
@@ -4738,3 +4921,11 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
)
+
+add_header_library(
+ expxf16
+ HDRS
+ expxf16.h
+ DEPENDS
+ libc.src.__support.CPP.array
+)
diff --git a/libc/src/math/generic/exp10f16.cpp b/libc/src/math/generic/exp10f16.cpp
new file mode 100644
index 0000000..9959f745
--- /dev/null
+++ b/libc/src/math/generic/exp10f16.cpp
@@ -0,0 +1,170 @@
+//===-- Half-precision 10^x function --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/exp10f16.h"
+#include "expxf16.h"
+#include "hdr/errno_macros.h"
+#include "hdr/fenv_macros.h"
+#include "src/__support/CPP/array.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/nearest_integer.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h"
+#include "src/__support/macros/properties/cpu_features.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+#ifdef LIBC_TARGET_CPU_HAS_FMA
+static constexpr size_t N_EXP10F16_EXCEPTS = 5;
+#else
+static constexpr size_t N_EXP10F16_EXCEPTS = 8;
+#endif
+
+static constexpr fputil::ExceptValues<float16, N_EXP10F16_EXCEPTS>
+ EXP10F16_EXCEPTS = {{
+ // x = 0x1.8f4p-2, exp10f16(x) = 0x1.3ap+1 (RZ)
+ {0x363dU, 0x40e8U, 1U, 0U, 1U},
+ // x = 0x1.95cp-2, exp10f16(x) = 0x1.3ecp+1 (RZ)
+ {0x3657U, 0x40fbU, 1U, 0U, 0U},
+ // x = -0x1.018p-4, exp10f16(x) = 0x1.bbp-1 (RZ)
+ {0xac06U, 0x3aecU, 1U, 0U, 0U},
+ // x = -0x1.c28p+0, exp10f16(x) = 0x1.1ccp-6 (RZ)
+ {0xbf0aU, 0x2473U, 1U, 0U, 0U},
+ // x = -0x1.e1cp+1, exp10f16(x) = 0x1.694p-13 (RZ)
+ {0xc387U, 0x09a5U, 1U, 0U, 0U},
+#ifndef LIBC_TARGET_CPU_HAS_FMA
+ // x = 0x1.0cp+1, exp10f16(x) = 0x1.f04p+6 (RZ)
+ {0x4030U, 0x57c1U, 1U, 0U, 1U},
+ // x = 0x1.1b8p+1, exp10f16(x) = 0x1.47cp+7 (RZ)
+ {0x406eU, 0x591fU, 1U, 0U, 1U},
+ // x = 0x1.1b8p+2, exp10f16(x) = 0x1.a4p+14 (RZ)
+ {0x446eU, 0x7690U, 1U, 0U, 1U},
+#endif
+ }};
+
+// Generated by Sollya with the following commands:
+// > display = hexadecimal;
+// > round(log2(10), SG, RN);
+static constexpr float LOG2F_10 = 0x1.a934fp+1f;
+
+// Generated by Sollya with the following commands:
+// > display = hexadecimal;
+// > round(log10(2), SG, RN);
+static constexpr float LOG10F_2 = 0x1.344136p-2f;
+
+LLVM_LIBC_FUNCTION(float16, exp10f16, (float16 x)) {
+ using FPBits = fputil::FPBits<float16>;
+ FPBits x_bits(x);
+
+ uint16_t x_u = x_bits.uintval();
+ uint16_t x_abs = x_u & 0x7fffU;
+
+ // When |x| >= 5, or x is NaN.
+ if (LIBC_UNLIKELY(x_abs >= 0x4500U)) {
+ // exp10(NaN) = NaN
+ if (x_bits.is_nan()) {
+ if (x_bits.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+
+ return x;
+ }
+
+ // When x >= 5.
+ if (x_bits.is_pos()) {
+ // exp10(+inf) = +inf
+ if (x_bits.is_inf())
+ return FPBits::inf().get_val();
+
+ switch (fputil::quick_get_round()) {
+ case FE_TONEAREST:
+ case FE_UPWARD:
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_OVERFLOW);
+ return FPBits::inf().get_val();
+ default:
+ return FPBits::max_normal().get_val();
+ }
+ }
+
+ // When x <= -8.
+ if (x_u >= 0xc800U) {
+ // exp10(-inf) = +0
+ if (x_bits.is_inf())
+ return FPBits::zero().get_val();
+
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_UNDERFLOW | FE_INEXACT);
+
+ if (fputil::fenv_is_round_up())
+ return FPBits::min_subnormal().get_val();
+ return FPBits::zero().get_val();
+ }
+ }
+
+ // When x is 1, 2, 3, or 4. These are hard-to-round cases with exact results.
+ if (LIBC_UNLIKELY((x_u & ~(0x3c00U | 0x4000U | 0x4200U | 0x4400U)) == 0)) {
+ switch (x_u) {
+ case 0x3c00U: // x = 1.0f16
+ return static_cast<float16>(10.0);
+ case 0x4000U: // x = 2.0f16
+ return static_cast<float16>(100.0);
+ case 0x4200U: // x = 3.0f16
+ return static_cast<float16>(1'000.0);
+ case 0x4400U: // x = 4.0f16
+ return static_cast<float16>(10'000.0);
+ }
+ }
+
+ if (auto r = EXP10F16_EXCEPTS.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+
+ // For -8 < x < 5, to compute 10^x, we perform the following range reduction:
+ // find hi, mid, lo, such that:
+ // x = (hi + mid) * log2(10) + lo, in which
+ // hi is an integer,
+ // mid * 2^3 is an integer,
+ // -2^(-4) <= lo < 2^(-4).
+ // In particular,
+ // hi + mid = round(x * 2^3) * 2^(-3).
+ // Then,
+ // 10^x = 10^(hi + mid + lo) = 2^((hi + mid) * log2(10)) + 10^lo
+ // We store 2^mid in the lookup table EXP2_MID_BITS, and compute 2^hi * 2^mid
+ // by adding hi to the exponent field of 2^mid. 10^lo is computed using a
+ // degree-4 minimax polynomial generated by Sollya.
+
+ float xf = x;
+ float kf = fputil::nearest_integer(xf * (LOG2F_10 * 0x1.0p+3f));
+ int x_hi_mid = static_cast<int>(kf);
+ int x_hi = x_hi_mid >> 3;
+ int x_mid = x_hi_mid & 0x7;
+ // lo = x - (hi + mid) = round(x * 2^3 * log2(10)) * log10(2) * (-2^(-3)) + x
+ float lo = fputil::multiply_add(kf, LOG10F_2 * -0x1.0p-3f, xf);
+
+ uint32_t exp2_hi_mid_bits =
+ EXP2_MID_BITS[x_mid] +
+ static_cast<uint32_t>(x_hi << fputil::FPBits<float>::FRACTION_LEN);
+ float exp2_hi_mid = fputil::FPBits<float>(exp2_hi_mid_bits).get_val();
+ // Degree-4 minimax polynomial generated by Sollya with the following
+ // commands:
+ // > display = hexadecimal;
+ // > P = fpminimax((10^x - 1)/x, 3, [|SG...|], [-2^-4, 2^-4]);
+ // > 1 + x * P;
+ float exp10_lo = fputil::polyeval(lo, 0x1p+0f, 0x1.26bb14p+1f, 0x1.53526p+1f,
+ 0x1.04b434p+1f, 0x1.2bcf9ep+0f);
+ return static_cast<float16>(exp2_hi_mid * exp10_lo);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/exp2f16.cpp b/libc/src/math/generic/exp2f16.cpp
new file mode 100644
index 0000000..66b7956
--- /dev/null
+++ b/libc/src/math/generic/exp2f16.cpp
@@ -0,0 +1,127 @@
+//===-- Half-precision 2^x function ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/exp2f16.h"
+#include "expxf16.h"
+#include "hdr/errno_macros.h"
+#include "hdr/fenv_macros.h"
+#include "src/__support/CPP/array.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/nearest_integer.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+static constexpr fputil::ExceptValues<float16, 3> EXP2F16_EXCEPTS = {{
+ // (input, RZ output, RU offset, RD offset, RN offset)
+ // x = 0x1.714p-11, exp2f16(x) = 0x1p+0 (RZ)
+ {0x11c5U, 0x3c00U, 1U, 0U, 1U},
+ // x = -0x1.558p-4, exp2f16(x) = 0x1.e34p-1 (RZ)
+ {0xad56U, 0x3b8dU, 1U, 0U, 0U},
+ // x = -0x1.d5cp-4, exp2f16(x) = 0x1.d8cp-1 (RZ)
+ {0xaf57U, 0x3b63U, 1U, 0U, 0U},
+}};
+
+LLVM_LIBC_FUNCTION(float16, exp2f16, (float16 x)) {
+ using FPBits = fputil::FPBits<float16>;
+ FPBits x_bits(x);
+
+ uint16_t x_u = x_bits.uintval();
+ uint16_t x_abs = x_u & 0x7fffU;
+
+ // When |x| >= 16, or x is NaN.
+ if (LIBC_UNLIKELY(x_abs >= 0x4c00U)) {
+ // exp2(NaN) = NaN
+ if (x_bits.is_nan()) {
+ if (x_bits.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+
+ return x;
+ }
+
+ // When x >= 16.
+ if (x_bits.is_pos()) {
+ // exp2(+inf) = +inf
+ if (x_bits.is_inf())
+ return FPBits::inf().get_val();
+
+ switch (fputil::quick_get_round()) {
+ case FE_TONEAREST:
+ case FE_UPWARD:
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_OVERFLOW);
+ return FPBits::inf().get_val();
+ default:
+ return FPBits::max_normal().get_val();
+ }
+ }
+
+ // When x <= -25.
+ if (x_u >= 0xce40U) {
+ // exp2(-inf) = +0
+ if (x_bits.is_inf())
+ return FPBits::zero().get_val();
+
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_UNDERFLOW | FE_INEXACT);
+
+ if (fputil::fenv_is_round_up())
+ return FPBits::min_subnormal().get_val();
+ return FPBits::zero().get_val();
+ }
+ }
+
+ if (auto r = EXP2F16_EXCEPTS.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+
+ // For -25 < x < 16, to compute 2^x, we perform the following range reduction:
+ // find hi, mid, lo, such that:
+ // x = hi + mid + lo, in which
+ // hi is an integer,
+ // mid * 2^3 is an integer,
+ // -2^(-4) <= lo < 2^(-4).
+ // In particular,
+ // hi + mid = round(x * 2^3) * 2^(-3).
+ // Then,
+ // 2^x = 2^(hi + mid + lo) = 2^hi * 2^mid * 2^lo.
+ // We store 2^mid in the lookup table EXP2_MID_BITS, and compute 2^hi * 2^mid
+ // by adding hi to the exponent field of 2^mid. 2^lo is computed using a
+ // degree-3 minimax polynomial generated by Sollya.
+
+ float xf = x;
+ float kf = fputil::nearest_integer(xf * 0x1.0p+3f);
+ int x_hi_mid = static_cast<int>(kf);
+ int x_hi = x_hi_mid >> 3;
+ int x_mid = x_hi_mid & 0x7;
+ // lo = x - (hi + mid) = round(x * 2^3) * (-2^(-3)) + x
+ float lo = fputil::multiply_add(kf, -0x1.0p-3f, xf);
+
+ uint32_t exp2_hi_mid_bits =
+ EXP2_MID_BITS[x_mid] +
+ static_cast<uint32_t>(x_hi << fputil::FPBits<float>::FRACTION_LEN);
+ float exp2_hi_mid = fputil::FPBits<float>(exp2_hi_mid_bits).get_val();
+ // Degree-3 minimax polynomial generated by Sollya with the following
+ // commands:
+ // > display = hexadecimal;
+ // > P = fpminimax((2^x - 1)/x, 2, [|SG...|], [-2^-4, 2^-4]);
+ // > 1 + x * P;
+ float exp2_lo = fputil::polyeval(lo, 0x1p+0f, 0x1.62e43p-1f, 0x1.ec0aa6p-3f,
+ 0x1.c6b4a6p-5f);
+ return static_cast<float16>(exp2_hi_mid * exp2_lo);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/expxf16.h b/libc/src/math/generic/expxf16.h
new file mode 100644
index 0000000..c33aca33
--- /dev/null
+++ b/libc/src/math/generic/expxf16.h
@@ -0,0 +1,28 @@
+//===-- Common utilities for half-precision exponential functions ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_GENERIC_EXPXF16_H
+#define LLVM_LIBC_SRC_MATH_GENERIC_EXPXF16_H
+
+#include "src/__support/CPP/array.h"
+#include "src/__support/macros/config.h"
+#include <stdint.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+// Generated by Sollya with the following commands:
+// > display = hexadecimal;
+// > for i from 0 to 7 do printsingle(round(2^(i * 2^-3), SG, RN));
+constexpr cpp::array<uint32_t, 8> EXP2_MID_BITS = {
+ 0x3f80'0000U, 0x3f8b'95c2U, 0x3f98'37f0U, 0x3fa5'fed7U,
+ 0x3fb5'04f3U, 0x3fc5'672aU, 0x3fd7'44fdU, 0x3fea'c0c7U,
+};
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_GENERIC_EXPXF16_H
diff --git a/libc/src/math/generic/fdiv.cpp b/libc/src/math/generic/fdiv.cpp
new file mode 100644
index 0000000..1d97fad
--- /dev/null
+++ b/libc/src/math/generic/fdiv.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fdiv function -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fdiv.h"
+#include "src/__support/FPUtil/generic/div.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, fdiv, (double x, double y)) {
+ return fputil::generic::div<float>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fdivf128.cpp b/libc/src/math/generic/fdivf128.cpp
new file mode 100644
index 0000000..192f13f
--- /dev/null
+++ b/libc/src/math/generic/fdivf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fdivf128 function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fdivf128.h"
+#include "src/__support/FPUtil/generic/div.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, fdivf128, (float128 x, float128 y)) {
+ return fputil::generic::div<float>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fdivl.cpp b/libc/src/math/generic/fdivl.cpp
new file mode 100644
index 0000000..dcd5deb
--- /dev/null
+++ b/libc/src/math/generic/fdivl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fdivl function ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fdivl.h"
+#include "src/__support/FPUtil/generic/div.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, fdivl, (long double x, long double y)) {
+ return fputil::generic::div<float>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/ffma.cpp b/libc/src/math/generic/ffma.cpp
new file mode 100644
index 0000000..a4c834d
--- /dev/null
+++ b/libc/src/math/generic/ffma.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of ffma function -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ffma.h"
+#include "src/__support/FPUtil/FMA.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, ffma, (double x, double y, double z)) {
+ return fputil::fma<float>(x, y, z);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/ffmaf128.cpp b/libc/src/math/generic/ffmaf128.cpp
new file mode 100644
index 0000000..55da930
--- /dev/null
+++ b/libc/src/math/generic/ffmaf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of ffmaf128 function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ffmaf128.h"
+#include "src/__support/FPUtil/FMA.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, ffmaf128, (float128 x, float128 y, float128 z)) {
+ return fputil::fma<float>(x, y, z);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/ffmal.cpp b/libc/src/math/generic/ffmal.cpp
new file mode 100644
index 0000000..d5cd4f7
--- /dev/null
+++ b/libc/src/math/generic/ffmal.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of ffmal function ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ffmal.h"
+#include "src/__support/FPUtil/FMA.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, ffmal,
+ (long double x, long double y, long double z)) {
+ return fputil::fma<float>(x, y, z);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fsub.cpp b/libc/src/math/generic/fsub.cpp
new file mode 100644
index 0000000..97e2801
--- /dev/null
+++ b/libc/src/math/generic/fsub.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fsub function -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fsub.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, fsub, (double x, double y)) {
+ return fputil::generic::sub<float>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fsubf128.cpp b/libc/src/math/generic/fsubf128.cpp
new file mode 100644
index 0000000..3efb349
--- /dev/null
+++ b/libc/src/math/generic/fsubf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fsubf128 function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fsubf128.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, fsubf128, (float128 x, float128 y)) {
+ return fputil::generic::sub<float>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fsubl.cpp b/libc/src/math/generic/fsubl.cpp
new file mode 100644
index 0000000..cad5a2d
--- /dev/null
+++ b/libc/src/math/generic/fsubl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fsubl function ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fsubl.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float, fsubl, (long double x, long double y)) {
+ return fputil::generic::sub<float>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/getpayloadl.cpp b/libc/src/math/generic/getpayloadl.cpp
new file mode 100644
index 0000000..028dc1e
--- /dev/null
+++ b/libc/src/math/generic/getpayloadl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of getpayloadl function ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/getpayloadl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(long double, getpayloadl, (const long double *x)) {
+ return fputil::getpayload(*x);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/pow.cpp b/libc/src/math/generic/pow.cpp
index d2c5f75..20f9144 100644
--- a/libc/src/math/generic/pow.cpp
+++ b/libc/src/math/generic/pow.cpp
@@ -358,42 +358,64 @@ LLVM_LIBC_FUNCTION(double, pow, (double x, double y)) {
// Then m_x = (1 + dx) / r, and
// log2(m_x) = log2( (1 + dx) / r )
// = log2(1 + dx) - log2(r).
- // Perform exact range reduction
+
+ // In order for the overall computations x^y = 2^(y * log2(x)) to have the
+ // relative errors < 2^-52 (1ULP), we will need to evaluate the exponent part
+ // y * log2(x) with absolute errors < 2^-52 (or better, 2^-53). Since the
+ // whole exponent range for double precision is bounded by
+ // |y * log2(x)| < 1076 ~ 2^10, we need to evaluate log2(x) with absolute
+ // errors < 2^-53 * 2^-10 = 2^-63.
+
+ // With that requirement, we use the following degree-6 polynomial
+ // approximation:
+ // P(dx) ~ log2(1 + dx) / dx
+ // Generated by Sollya with:
+ // > P = fpminimax(log2(1 + x)/x, 6, [|D...|], [-2^-8, 2^-7]); P;
+ // > dirtyinfnorm(log2(1 + x) - x*P, [-2^-8, 2^-7]);
+ // 0x1.d03cc...p-66
+ constexpr double COEFFS[] = {0x1.71547652b82fep0, -0x1.71547652b82e7p-1,
+ 0x1.ec709dc3b1fd5p-2, -0x1.7154766124215p-2,
+ 0x1.2776bd90259d8p-2, -0x1.ec586c6f3d311p-3,
+ 0x1.9c4775eccf524p-3};
+ // Error: ulp(dx^2) <= (2^-7)^2 * 2^-52 = 2^-66
+ // Extra errors from various computations and rounding directions, the overall
+ // errors we can be bounded by 2^-65.
+
double dx;
+ DoubleDouble dx_c0;
+
+ // Perform exact range reduction and exact product dx * c0.
#ifdef LIBC_TARGET_CPU_HAS_FMA
dx = fputil::multiply_add(RD[idx_x], m_x.get_val(), -1.0); // Exact
+ dx_c0 = fputil::exact_mult(COEFFS[0], dx);
#else
double c = FPBits(m_x.uintval() & 0x3fff'e000'0000'0000).get_val();
dx = fputil::multiply_add(RD[idx_x], m_x.get_val() - c, CD[idx_x]); // Exact
+ dx_c0 = fputil::exact_mult<true>(COEFFS[0], dx);
#endif // LIBC_TARGET_CPU_HAS_FMA
- // Degree-5 polynomial approximation:
- // dx * P(dx) ~ log2(1 + dx)
- // Generated by Sollya with:
- // > P = fpminimax(log2(1 + x)/x, 5, [|D...|], [-2^-8, 2^-7]);
- // > dirtyinfnorm(log2(1 + x)/x - P, [-2^-8, 2^-7]);
- // 0x1.653...p-52
- constexpr double COEFFS[] = {0x1.71547652b82fep0, -0x1.71547652b7a07p-1,
- 0x1.ec709dc458db1p-2, -0x1.715479c2266c9p-2,
- 0x1.2776ae1ddf8fp-2, -0x1.e7b2178870157p-3};
-
- double dx2 = dx * dx; // Exact
- double c0 = fputil::multiply_add(dx, COEFFS[1], COEFFS[0]);
- double c1 = fputil::multiply_add(dx, COEFFS[3], COEFFS[2]);
- double c2 = fputil::multiply_add(dx, COEFFS[5], COEFFS[4]);
+ double dx2 = dx * dx;
+ double c0 = fputil::multiply_add(dx, COEFFS[2], COEFFS[1]);
+ double c1 = fputil::multiply_add(dx, COEFFS[4], COEFFS[3]);
+ double c2 = fputil::multiply_add(dx, COEFFS[6], COEFFS[5]);
double p = fputil::polyeval(dx2, c0, c1, c2);
// s = e_x - log2(r) + dx * P(dx)
// Absolute error bound:
- // |log2(x) - log2_x.hi - log2_x.lo| < 2^-58.
- // Relative error bound:
- // |(log2_x.hi + log2_x.lo)/log2(x) - 1| < 2^-51.
- double log2_x_hi = e_x + LOG2_R_DD[idx_x].hi; // Exact
- // Error
- double log2_x_lo = fputil::multiply_add(dx, p, LOG2_R_DD[idx_x].lo);
-
- DoubleDouble log2_x = fputil::exact_add(log2_x_hi, log2_x_lo);
+ // |log2(x) - log2_x.hi - log2_x.lo| < 2^-65.
+
+ // Notice that e_x - log2(r).hi is exact, so we perform an exact sum of
+ // e_x - log2(r).hi and the high part of the product dx * c0:
+ // log2_x_hi.hi + log2_x_hi.lo = e_x - log2(r).hi + (dx * c0).hi
+ DoubleDouble log2_x_hi =
+ fputil::exact_add(e_x + LOG2_R_DD[idx_x].hi, dx_c0.hi);
+ // The low part is dx^2 * p + low part of (dx * c0) + low part of -log2(r).
+ double log2_x_lo =
+ fputil::multiply_add(dx2, p, dx_c0.lo + LOG2_R_DD[idx_x].lo);
+ // Perform accurate sums.
+ DoubleDouble log2_x = fputil::exact_add(log2_x_hi.hi, log2_x_lo);
+ log2_x.lo += log2_x_hi.lo;
// To compute 2^(y * log2(x)), we break the exponent into 3 parts:
// y * log(2) = hi + mid + lo, where
diff --git a/libc/src/math/generic/remainderf128.cpp b/libc/src/math/generic/remainderf128.cpp
new file mode 100644
index 0000000..52b6c51
--- /dev/null
+++ b/libc/src/math/generic/remainderf128.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of remainderf128 function --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/remainderf128.h"
+#include "src/__support/FPUtil/DivisionAndRemainderOperations.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(float128, remainderf128, (float128 x, float128 y)) {
+ int quotient;
+ return fputil::remquo(x, y, quotient);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/getpayloadl.h b/libc/src/math/getpayloadl.h
new file mode 100644
index 0000000..1ae9f86
--- /dev/null
+++ b/libc/src/math/getpayloadl.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for getpayloadl -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_GETPAYLOADL_H
+#define LLVM_LIBC_SRC_MATH_GETPAYLOADL_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+long double getpayloadl(const long double *x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_GETPAYLOADL_H
diff --git a/libc/src/math/remainderf128.h b/libc/src/math/remainderf128.h
new file mode 100644
index 0000000..57f770a
--- /dev/null
+++ b/libc/src/math/remainderf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for remainderf128 -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_REMAINDERF128_H
+#define LLVM_LIBC_SRC_MATH_REMAINDERF128_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float128 remainderf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_REMAINDERF128_H
diff --git a/libc/src/pthread/CMakeLists.txt b/libc/src/pthread/CMakeLists.txt
index 70d10e6..e7e92e5 100644
--- a/libc/src/pthread/CMakeLists.txt
+++ b/libc/src/pthread/CMakeLists.txt
@@ -645,6 +645,71 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ pthread_spin_init
+ SRCS
+ pthread_spin_init.cpp
+ HDRS
+ pthread_spin_init.h
+ DEPENDS
+ libc.include.pthread
+ libc.src.__support.threads.spin_lock
+ libc.src.__support.threads.identifier
+ libc.hdr.errno_macros
+)
+
+add_entrypoint_object(
+ pthread_spin_destroy
+ SRCS
+ pthread_spin_destroy.cpp
+ HDRS
+ pthread_spin_destroy.h
+ DEPENDS
+ libc.include.pthread
+ libc.src.__support.threads.spin_lock
+ libc.src.__support.threads.identifier
+ libc.hdr.errno_macros
+)
+
+add_entrypoint_object(
+ pthread_spin_lock
+ SRCS
+ pthread_spin_lock.cpp
+ HDRS
+ pthread_spin_lock.h
+ DEPENDS
+ libc.include.pthread
+ libc.src.__support.threads.spin_lock
+ libc.src.__support.threads.identifier
+ libc.hdr.errno_macros
+)
+
+add_entrypoint_object(
+ pthread_spin_trylock
+ SRCS
+ pthread_spin_trylock.cpp
+ HDRS
+ pthread_spin_trylock.h
+ DEPENDS
+ libc.include.pthread
+ libc.src.__support.threads.spin_lock
+ libc.src.__support.threads.identifier
+ libc.hdr.errno_macros
+)
+
+add_entrypoint_object(
+ pthread_spin_unlock
+ SRCS
+ pthread_spin_unlock.cpp
+ HDRS
+ pthread_spin_unlock.h
+ DEPENDS
+ libc.include.pthread
+ libc.src.__support.threads.spin_lock
+ libc.src.__support.threads.identifier
+ libc.hdr.errno_macros
+)
+
+add_entrypoint_object(
pthread_once
SRCS
pthread_once.cpp
diff --git a/libc/src/pthread/pthread_spin_destroy.cpp b/libc/src/pthread/pthread_spin_destroy.cpp
new file mode 100644
index 0000000..7d93dd9
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_destroy.cpp
@@ -0,0 +1,47 @@
+//===-- Implementation of pthread_spin_destroy function -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_spin_destroy.h"
+#include "hdr/errno_macros.h"
+#include "src/__support/common.h"
+#include "src/__support/threads/spin_lock.h"
+namespace LIBC_NAMESPACE_DECL {
+
+static_assert(sizeof(pthread_spinlock_t::__lockword) == sizeof(SpinLock) &&
+ alignof(decltype(pthread_spinlock_t::__lockword)) ==
+ alignof(SpinLock),
+ "pthread_spinlock_t::__lockword and SpinLock must be of the same "
+ "size and alignment");
+
+LLVM_LIBC_FUNCTION(int, pthread_spin_destroy,
+ ([[maybe_unused]] pthread_spinlock_t * lock)) {
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_lock() or pthread_spin_trylock() does not refer to an
+ // initialized spin lock object, it is recommended that the function should
+ // fail and report an [EINVAL] error.
+ if (!lock)
+ return EINVAL;
+ auto spin_lock = reinterpret_cast<SpinLock *>(&lock->__lockword);
+ if (!spin_lock || spin_lock->is_invalid())
+ return EINVAL;
+
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_destroy() or pthread_spin_init() refers to a locked spin
+ // lock object, or detects that the value specified by the lock argument to
+ // pthread_spin_init() refers to an already initialized spin lock object, it
+ // is recommended that the function should fail and report an [EBUSY] error.
+ if (spin_lock->is_locked())
+ return EBUSY;
+
+ // poison the lock
+ spin_lock->~SpinLock();
+ lock->__owner = 0;
+ return 0;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/pthread/pthread_spin_destroy.h b/libc/src/pthread/pthread_spin_destroy.h
new file mode 100644
index 0000000..2581e9e
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_destroy.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for pthread_spin_destroy function --*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_DESTROY_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_DESTROY_H
+
+#include "src/__support/macros/config.h"
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+int pthread_spin_destroy(pthread_spinlock_t *lock);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_DESTROY_H
diff --git a/libc/src/pthread/pthread_spin_init.cpp b/libc/src/pthread/pthread_spin_init.cpp
new file mode 100644
index 0000000..5497247
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_init.cpp
@@ -0,0 +1,37 @@
+//===-- Implementation of pthread_spin_init function ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_spin_init.h"
+#include "hdr/errno_macros.h"
+#include "src/__support/CPP/new.h"
+#include "src/__support/common.h"
+#include "src/__support/threads/spin_lock.h"
+#include <pthread.h> // for PTHREAD_PROCESS_SHARED, PTHREAD_PROCESS_PRIVATE
+
+namespace LIBC_NAMESPACE_DECL {
+
+static_assert(sizeof(pthread_spinlock_t::__lockword) == sizeof(SpinLock) &&
+ alignof(decltype(pthread_spinlock_t::__lockword)) ==
+ alignof(SpinLock),
+ "pthread_spinlock_t::__lockword and SpinLock must be of the same "
+ "size and alignment");
+
+LLVM_LIBC_FUNCTION(int, pthread_spin_init,
+ (pthread_spinlock_t * lock, [[maybe_unused]] int pshared)) {
+ if (!lock)
+ return EINVAL;
+ if (pshared != PTHREAD_PROCESS_SHARED && pshared != PTHREAD_PROCESS_PRIVATE)
+ return EINVAL;
+ // The spin lock here is a simple atomic flag, so we don't need to do any
+ // special handling for pshared.
+ ::new (&lock->__lockword) SpinLock();
+ lock->__owner = 0;
+ return 0;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/pthread/pthread_spin_init.h b/libc/src/pthread/pthread_spin_init.h
new file mode 100644
index 0000000..89f7e3a
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_init.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for pthread_spin_init function ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_INIT_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_INIT_H
+
+#include "src/__support/macros/config.h"
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+int pthread_spin_init(pthread_spinlock_t *lock, int pshared);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_INIT_H
diff --git a/libc/src/pthread/pthread_spin_lock.cpp b/libc/src/pthread/pthread_spin_lock.cpp
new file mode 100644
index 0000000..61c8db1
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_lock.cpp
@@ -0,0 +1,47 @@
+//===-- Implementation of pthread_spin_lock function ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_spin_lock.h"
+#include "hdr/errno_macros.h"
+#include "src/__support/common.h"
+#include "src/__support/threads/identifier.h"
+#include "src/__support/threads/spin_lock.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+static_assert(sizeof(pthread_spinlock_t::__lockword) == sizeof(SpinLock) &&
+ alignof(decltype(pthread_spinlock_t::__lockword)) ==
+ alignof(SpinLock),
+ "pthread_spinlock_t::__lockword and SpinLock must be of the same "
+ "size and alignment");
+
+LLVM_LIBC_FUNCTION(int, pthread_spin_lock, (pthread_spinlock_t * lock)) {
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_lock() or pthread_spin_trylock() does not refer to an
+ // initialized spin lock object, it is recommended that the function should
+ // fail and report an [EINVAL] error.
+ if (!lock)
+ return EINVAL;
+ auto spin_lock = reinterpret_cast<SpinLock *>(&lock->__lockword);
+ if (spin_lock->is_invalid())
+ return EINVAL;
+
+ pid_t self_tid = internal::gettid();
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_lock() refers to a spin lock object for which the calling
+ // thread already holds the lock, it is recommended that the function should
+ // fail and report an [EDEADLK] error.
+ if (lock->__owner == self_tid)
+ return EDEADLK;
+
+ spin_lock->lock();
+ lock->__owner = self_tid;
+ return 0;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/pthread/pthread_spin_lock.h b/libc/src/pthread/pthread_spin_lock.h
new file mode 100644
index 0000000..835aa85
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_lock.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for pthread_spin_lock function ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_LOCK_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_LOCK_H
+
+#include "src/__support/macros/config.h"
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+int pthread_spin_lock(pthread_spinlock_t *lock);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_LOCK_H
diff --git a/libc/src/pthread/pthread_spin_trylock.cpp b/libc/src/pthread/pthread_spin_trylock.cpp
new file mode 100644
index 0000000..99b0eac
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_trylock.cpp
@@ -0,0 +1,41 @@
+//===-- Implementation of pthread_spin_trylock function -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_spin_trylock.h"
+#include "hdr/errno_macros.h"
+#include "src/__support/common.h"
+#include "src/__support/threads/identifier.h"
+#include "src/__support/threads/spin_lock.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+static_assert(sizeof(pthread_spinlock_t::__lockword) == sizeof(SpinLock) &&
+ alignof(decltype(pthread_spinlock_t::__lockword)) ==
+ alignof(SpinLock),
+ "pthread_spinlock_t::__lockword and SpinLock must be of the same "
+ "size and alignment");
+
+LLVM_LIBC_FUNCTION(int, pthread_spin_trylock, (pthread_spinlock_t * lock)) {
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_lock() or pthread_spin_trylock() does not refer to an
+ // initialized spin lock object, it is recommended that the function should
+ // fail and report an [EINVAL] error.
+ if (!lock)
+ return EINVAL;
+ auto spin_lock = reinterpret_cast<SpinLock *>(&lock->__lockword);
+ if (!spin_lock || spin_lock->is_invalid())
+ return EINVAL;
+ // Try to acquire the lock without blocking.
+ if (!spin_lock->try_lock())
+ return EBUSY;
+ // We have acquired the lock. Update the owner field.
+ lock->__owner = internal::gettid();
+ return 0;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/pthread/pthread_spin_trylock.h b/libc/src/pthread/pthread_spin_trylock.h
new file mode 100644
index 0000000..e175ab8
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_trylock.h
@@ -0,0 +1,22 @@
+//===-- Implementation header for pthread_spin_trylock function ---*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_TRYLOCK_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_TRYLOCK_H
+
+#include "src/__support/macros/config.h"
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+int pthread_spin_trylock(pthread_spinlock_t *lock);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_TRYLOCK_H
diff --git a/libc/src/pthread/pthread_spin_unlock.cpp b/libc/src/pthread/pthread_spin_unlock.cpp
new file mode 100644
index 0000000..a02f2b3
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_unlock.cpp
@@ -0,0 +1,44 @@
+//===-- Implementation of pthread_spin_unlock function --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_spin_unlock.h"
+#include "hdr/errno_macros.h"
+#include "src/__support/common.h"
+#include "src/__support/threads/identifier.h"
+#include "src/__support/threads/spin_lock.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+static_assert(sizeof(pthread_spinlock_t::__lockword) == sizeof(SpinLock) &&
+ alignof(decltype(pthread_spinlock_t::__lockword)) ==
+ alignof(SpinLock),
+ "pthread_spinlock_t::__lockword and SpinLock must be of the same "
+ "size and alignment");
+
+LLVM_LIBC_FUNCTION(int, pthread_spin_unlock, (pthread_spinlock_t * lock)) {
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_lock() or pthread_spin_trylock() does not refer to an
+ // initialized spin lock object, it is recommended that the function should
+ // fail and report an [EINVAL] error.
+ if (!lock)
+ return EINVAL;
+ auto spin_lock = reinterpret_cast<SpinLock *>(&lock->__lockword);
+ if (!spin_lock || spin_lock->is_invalid())
+ return EINVAL;
+ // If an implementation detects that the value specified by the lock argument
+ // to pthread_spin_unlock() refers to a spin lock object for which the current
+ // thread does not hold the lock, it is recommended that the function should
+ // fail and report an [EPERM] error.
+ if (lock->__owner != internal::gettid())
+ return EPERM;
+ // Release the lock.
+ lock->__owner = 0;
+ spin_lock->unlock();
+ return 0;
+}
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/pthread/pthread_spin_unlock.h b/libc/src/pthread/pthread_spin_unlock.h
new file mode 100644
index 0000000..4918613
--- /dev/null
+++ b/libc/src/pthread/pthread_spin_unlock.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for pthread_spin_unlock function ---*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_UNLOCK_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_UNLOCK_H
+
+#include "src/__support/macros/config.h"
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+int pthread_spin_unlock(pthread_spinlock_t *lock);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_SPIN_UNLOCK_H
diff --git a/libc/src/time/mktime.cpp b/libc/src/time/mktime.cpp
index 9ea316d..72cd2291 100644
--- a/libc/src/time/mktime.cpp
+++ b/libc/src/time/mktime.cpp
@@ -42,12 +42,18 @@ LLVM_LIBC_FUNCTION(time_t, mktime, (struct tm * tm_out)) {
return time_utils::out_of_range();
if (tm_out->tm_mday > 19)
return time_utils::out_of_range();
- if (tm_out->tm_hour > 3)
- return time_utils::out_of_range();
- if (tm_out->tm_min > 14)
- return time_utils::out_of_range();
- if (tm_out->tm_sec > 7)
- return time_utils::out_of_range();
+ else if (tm_out->tm_mday == 19) {
+ if (tm_out->tm_hour > 3)
+ return time_utils::out_of_range();
+ else if (tm_out->tm_hour == 3) {
+ if (tm_out->tm_min > 14)
+ return time_utils::out_of_range();
+ else if (tm_out->tm_min == 14) {
+ if (tm_out->tm_sec > 7)
+ return time_utils::out_of_range();
+ }
+ }
+ }
}
// Years are ints. A 32-bit year will fit into a 64-bit time_t.
diff --git a/libc/src/time/time_utils.h b/libc/src/time/time_utils.h
index 106870a..47f55f7 100644
--- a/libc/src/time/time_utils.h
+++ b/libc/src/time/time_utils.h
@@ -92,7 +92,12 @@ extern int64_t update_from_seconds(int64_t total_seconds, struct tm *tm);
// POSIX.1-2017 requires this.
LIBC_INLINE time_t out_of_range() {
+#ifdef EOVERFLOW
+ // For non-POSIX uses of the standard C time functions, where EOVERFLOW is
+ // not defined, it's OK not to set errno at all. The plain C standard doesn't
+ // require it.
libc_errno = EOVERFLOW;
+#endif
return TimeConstants::OUT_OF_RANGE_RETURN_VALUE;
}
diff --git a/libc/test/integration/src/pthread/CMakeLists.txt b/libc/test/integration/src/pthread/CMakeLists.txt
index eb26822..48d4368 100644
--- a/libc/test/integration/src/pthread/CMakeLists.txt
+++ b/libc/test/integration/src/pthread/CMakeLists.txt
@@ -24,9 +24,9 @@ add_integration_test(
SRCS
pthread_rwlock_test.cpp
DEPENDS
+ libc.hdr.time_macros
+ libc.hdr.errno_macros
libc.include.pthread
- libc.include.time
- libc.include.errno
libc.src.pthread.pthread_rwlock_destroy
libc.src.pthread.pthread_rwlock_init
libc.src.pthread.pthread_rwlock_rdlock
@@ -60,6 +60,24 @@ add_integration_test(
)
add_integration_test(
+ pthread_spinlock_test
+ SUITE
+ libc-pthread-integration-tests
+ SRCS
+ pthread_spinlock_test.cpp
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.include.pthread
+ libc.src.pthread.pthread_spin_init
+ libc.src.pthread.pthread_spin_destroy
+ libc.src.pthread.pthread_spin_lock
+ libc.src.pthread.pthread_spin_trylock
+ libc.src.pthread.pthread_spin_unlock
+ libc.src.pthread.pthread_create
+ libc.src.pthread.pthread_join
+)
+
+add_integration_test(
pthread_test
SUITE
libc-pthread-integration-tests
diff --git a/libc/test/integration/src/pthread/pthread_rwlock_test.cpp b/libc/test/integration/src/pthread/pthread_rwlock_test.cpp
index 9f5fba1..4cd4255 100644
--- a/libc/test/integration/src/pthread/pthread_rwlock_test.cpp
+++ b/libc/test/integration/src/pthread/pthread_rwlock_test.cpp
@@ -6,6 +6,8 @@
//
//===----------------------------------------------------------------------===//
+#include "hdr/errno_macros.h"
+#include "hdr/time_macros.h"
#include "src/__support/CPP/atomic.h"
#include "src/__support/CPP/new.h"
#include "src/__support/OSUtil/syscall.h"
@@ -40,9 +42,7 @@
#include "src/time/clock_gettime.h"
#include "src/unistd/fork.h"
#include "test/IntegrationTest/test.h"
-#include <errno.h>
#include <pthread.h>
-#include <time.h>
namespace LIBC_NAMESPACE_DECL {
namespace rwlock {
diff --git a/libc/test/integration/src/pthread/pthread_spinlock_test.cpp b/libc/test/integration/src/pthread/pthread_spinlock_test.cpp
new file mode 100644
index 0000000..233daf8
--- /dev/null
+++ b/libc/test/integration/src/pthread/pthread_spinlock_test.cpp
@@ -0,0 +1,145 @@
+//===-- Tests for pthread_spinlock ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "hdr/errno_macros.h"
+#include "src/pthread/pthread_create.h"
+#include "src/pthread/pthread_join.h"
+#include "src/pthread/pthread_spin_destroy.h"
+#include "src/pthread/pthread_spin_init.h"
+#include "src/pthread/pthread_spin_lock.h"
+#include "src/pthread/pthread_spin_trylock.h"
+#include "src/pthread/pthread_spin_unlock.h"
+#include "test/IntegrationTest/test.h"
+#include <pthread.h>
+
+namespace {
+void smoke_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+}
+
+void trylock_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_trylock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_trylock(&lock), EBUSY);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_trylock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+}
+
+void destroy_held_lock_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), EBUSY);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+}
+
+void use_after_destroy_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(&lock), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_trylock(&lock), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), EINVAL);
+}
+
+void unlock_without_holding_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), EPERM);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+}
+
+void deadlock_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(&lock), EDEADLK);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&lock), 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+}
+
+void null_lock_test() {
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(nullptr, 0), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(nullptr), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_trylock(nullptr), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(nullptr), EINVAL);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(nullptr), EINVAL);
+}
+
+void pshared_attribute_test() {
+ pthread_spinlock_t lock;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_SHARED),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE),
+ 0);
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&lock), 0);
+
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&lock, -1), EINVAL);
+}
+
+void multi_thread_test() {
+ struct shared_data {
+ pthread_spinlock_t lock;
+ int count = 0;
+ } shared;
+ pthread_t thread[10];
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_init(&shared.lock, 0), 0);
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQ(
+ LIBC_NAMESPACE::pthread_create(
+ &thread[i], nullptr,
+ [](void *arg) -> void * {
+ auto *data = static_cast<shared_data *>(arg);
+ for (int j = 0; j < 1000; ++j) {
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_lock(&data->lock), 0);
+ data->count += j;
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_unlock(&data->lock), 0);
+ }
+ return nullptr;
+ },
+ &shared),
+ 0);
+ }
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_join(thread[i], nullptr), 0);
+ }
+ ASSERT_EQ(LIBC_NAMESPACE::pthread_spin_destroy(&shared.lock), 0);
+ ASSERT_EQ(shared.count, 1000 * 999 * 5);
+}
+
+} // namespace
+
+TEST_MAIN() {
+ smoke_test();
+ trylock_test();
+ destroy_held_lock_test();
+ use_after_destroy_test();
+ unlock_without_holding_test();
+ deadlock_test();
+ multi_thread_test();
+ null_lock_test();
+ pshared_attribute_test();
+ return 0;
+}
diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt
index ecc8ff1..f3703eb 100644
--- a/libc/test/src/math/CMakeLists.txt
+++ b/libc/test/src/math/CMakeLists.txt
@@ -940,6 +940,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ exp2_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ exp2_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.math.exp2
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
exp2f_test
NEED_MPFR
SUITE
@@ -953,16 +966,14 @@ add_fp_unittest(
)
add_fp_unittest(
- exp2_test
- NEED_MPFR
- SUITE
- libc-math-unittests
- SRCS
- exp2_test.cpp
- DEPENDS
- libc.src.errno.errno
- libc.src.math.exp2
- libc.src.__support.FPUtil.fp_bits
+ exp2f16_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ exp2f16_test.cpp
+ DEPENDS
+ libc.src.math.exp2f16
)
add_fp_unittest(
@@ -980,6 +991,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ exp10_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ exp10_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.math.exp10
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
exp10f_test
NEED_MPFR
SUITE
@@ -993,16 +1017,14 @@ add_fp_unittest(
)
add_fp_unittest(
- exp10_test
- NEED_MPFR
- SUITE
- libc-math-unittests
- SRCS
- exp10_test.cpp
- DEPENDS
- libc.src.errno.errno
- libc.src.math.exp10
- libc.src.__support.FPUtil.fp_bits
+ exp10f16_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ exp10f16_test.cpp
+ DEPENDS
+ libc.src.math.exp10f16
)
add_fp_unittest(
@@ -2320,6 +2342,32 @@ add_fp_unittest(
)
add_fp_unittest(
+ fsub_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ fsub_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.fsub
+)
+
+add_fp_unittest(
+ fsubl_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ fsubl_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.fsubl
+)
+
+add_fp_unittest(
dsqrtl_test
NEED_MPFR
SUITE
@@ -2444,6 +2492,63 @@ add_fp_unittest(
libc.src.math.dsubl
)
+add_fp_unittest(
+ fdiv_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ fdiv_test.cpp
+ HDRS
+ DivTest.h
+ DEPENDS
+ libc.src.math.fdiv
+)
+
+add_fp_unittest(
+ fdivl_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ fdivl_test.cpp
+ HDRS
+ DivTest.h
+ DEPENDS
+ libc.src.math.fdivl
+)
+
+
+add_fp_unittest(
+ ffma_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ ffma_test.cpp
+ HDRS
+ FmaTest.h
+ DEPENDS
+ libc.src.math.ffma
+ libc.src.stdlib.rand
+ libc.src.stdlib.srand
+)
+
+add_fp_unittest(
+ ffmal_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ ffmal_test.cpp
+ HDRS
+ FmaTest.h
+ DEPENDS
+ libc.src.math.ffmal
+ libc.src.stdlib.rand
+ libc.src.stdlib.srand
+)
+
add_subdirectory(generic)
add_subdirectory(smoke)
diff --git a/libc/test/src/math/exp10f16_test.cpp b/libc/test/src/math/exp10f16_test.cpp
new file mode 100644
index 0000000..fc49331
--- /dev/null
+++ b/libc/test/src/math/exp10f16_test.cpp
@@ -0,0 +1,40 @@
+//===-- Exhaustive test for exp10f16 --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/exp10f16.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+#include "utils/MPFRWrapper/MPFRUtils.h"
+
+using LlvmLibcExp10f16Test = LIBC_NAMESPACE::testing::FPTest<float16>;
+
+namespace mpfr = LIBC_NAMESPACE::testing::mpfr;
+
+// Range: [0, Inf];
+static constexpr uint16_t POS_START = 0x0000U;
+static constexpr uint16_t POS_STOP = 0x7c00U;
+
+// Range: [-Inf, 0];
+static constexpr uint16_t NEG_START = 0x8000U;
+static constexpr uint16_t NEG_STOP = 0xfc00U;
+
+TEST_F(LlvmLibcExp10f16Test, PositiveRange) {
+ for (uint16_t v = POS_START; v <= POS_STOP; ++v) {
+ float16 x = FPBits(v).get_val();
+ EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10, x,
+ LIBC_NAMESPACE::exp10f16(x), 0.5);
+ }
+}
+
+TEST_F(LlvmLibcExp10f16Test, NegativeRange) {
+ for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) {
+ float16 x = FPBits(v).get_val();
+ EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10, x,
+ LIBC_NAMESPACE::exp10f16(x), 0.5);
+ }
+}
diff --git a/libc/test/src/math/exp2f16_test.cpp b/libc/test/src/math/exp2f16_test.cpp
new file mode 100644
index 0000000..503d8c2
--- /dev/null
+++ b/libc/test/src/math/exp2f16_test.cpp
@@ -0,0 +1,40 @@
+//===-- Exhaustive test for exp2f16 ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/exp2f16.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+#include "utils/MPFRWrapper/MPFRUtils.h"
+
+using LlvmLibcExp2f16Test = LIBC_NAMESPACE::testing::FPTest<float16>;
+
+namespace mpfr = LIBC_NAMESPACE::testing::mpfr;
+
+// Range: [0, Inf];
+static constexpr uint16_t POS_START = 0x0000U;
+static constexpr uint16_t POS_STOP = 0x7c00U;
+
+// Range: [-Inf, 0];
+static constexpr uint16_t NEG_START = 0x8000U;
+static constexpr uint16_t NEG_STOP = 0xfc00U;
+
+TEST_F(LlvmLibcExp2f16Test, PositiveRange) {
+ for (uint16_t v = POS_START; v <= POS_STOP; ++v) {
+ float16 x = FPBits(v).get_val();
+ EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2, x,
+ LIBC_NAMESPACE::exp2f16(x), 0.5);
+ }
+}
+
+TEST_F(LlvmLibcExp2f16Test, NegativeRange) {
+ for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) {
+ float16 x = FPBits(v).get_val();
+ EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2, x,
+ LIBC_NAMESPACE::exp2f16(x), 0.5);
+ }
+}
diff --git a/libc/test/src/math/fdiv_test.cpp b/libc/test/src/math/fdiv_test.cpp
new file mode 100644
index 0000000..f0bd602
--- /dev/null
+++ b/libc/test/src/math/fdiv_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fdiv ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DivTest.h"
+
+#include "src/math/fdiv.h"
+
+LIST_DIV_TESTS(float, double, LIBC_NAMESPACE::fdiv)
diff --git a/libc/test/src/math/fdivl_test.cpp b/libc/test/src/math/fdivl_test.cpp
new file mode 100644
index 0000000..bc2b0e4
--- /dev/null
+++ b/libc/test/src/math/fdivl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fdivl -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DivTest.h"
+
+#include "src/math/fdivl.h"
+
+LIST_DIV_TESTS(float, long double, LIBC_NAMESPACE::fdivl)
diff --git a/libc/test/src/math/ffma_test.cpp b/libc/test/src/math/ffma_test.cpp
new file mode 100644
index 0000000..4b6a25c
--- /dev/null
+++ b/libc/test/src/math/ffma_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ffma ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FmaTest.h"
+
+#include "src/math/ffma.h"
+
+LIST_NARROWING_FMA_TESTS(float, double, LIBC_NAMESPACE::ffma)
diff --git a/libc/test/src/math/ffmal_test.cpp b/libc/test/src/math/ffmal_test.cpp
new file mode 100644
index 0000000..200d9e1
--- /dev/null
+++ b/libc/test/src/math/ffmal_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ffmal -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FmaTest.h"
+
+#include "src/math/ffmal.h"
+
+LIST_NARROWING_FMA_TESTS(float, long double, LIBC_NAMESPACE::ffmal)
diff --git a/libc/test/src/math/fsub_test.cpp b/libc/test/src/math/fsub_test.cpp
new file mode 100644
index 0000000..3c825f7
--- /dev/null
+++ b/libc/test/src/math/fsub_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fsub ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/fsub.h"
+
+LIST_SUB_TESTS(float, double, LIBC_NAMESPACE::fsub)
diff --git a/libc/test/src/math/fsubl_test.cpp b/libc/test/src/math/fsubl_test.cpp
new file mode 100644
index 0000000..87234326
--- /dev/null
+++ b/libc/test/src/math/fsubl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fsubl -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/fsubl.h"
+
+LIST_SUB_TESTS(float, long double, LIBC_NAMESPACE::fsubl)
diff --git a/libc/test/src/math/performance_testing/CMakeLists.txt b/libc/test/src/math/performance_testing/CMakeLists.txt
index b43d21a..8e529ca 100644
--- a/libc/test/src/math/performance_testing/CMakeLists.txt
+++ b/libc/test/src/math/performance_testing/CMakeLists.txt
@@ -154,6 +154,17 @@ add_perf_binary(
)
add_perf_binary(
+ exp10f16_perf
+ SRCS
+ exp10f16_perf.cpp
+ DEPENDS
+ .single_input_single_output_diff
+ libc.src.math.exp10f16
+ COMPILE_OPTIONS
+ -fno-builtin
+)
+
+add_perf_binary(
exp2f_perf
SRCS
exp2f_perf.cpp
@@ -165,6 +176,17 @@ add_perf_binary(
)
add_perf_binary(
+ exp2f16_perf
+ SRCS
+ exp2f16_perf.cpp
+ DEPENDS
+ .single_input_single_output_diff
+ libc.src.math.exp2f16
+ COMPILE_OPTIONS
+ -fno-builtin
+)
+
+add_perf_binary(
expf_perf
SRCS
expf_perf.cpp
diff --git a/libc/test/src/math/performance_testing/exp10f16_perf.cpp b/libc/test/src/math/performance_testing/exp10f16_perf.cpp
new file mode 100644
index 0000000..b9e76d4
--- /dev/null
+++ b/libc/test/src/math/performance_testing/exp10f16_perf.cpp
@@ -0,0 +1,22 @@
+//===-- Performance test for exp10f16 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SingleInputSingleOutputPerf.h"
+
+#include "src/math/exp10f16.h"
+
+// LLVM libc might be the only libc implementation with support for float16 math
+// functions currently. We can't compare our float16 functions against the
+// system libc, so we compare them against this placeholder function.
+static float16 placeholderf16(float16 x) { return x; }
+
+int main() {
+ SINGLE_INPUT_SINGLE_OUTPUT_PERF_EX(float16, LIBC_NAMESPACE::exp10f16,
+ ::placeholderf16, 20'000,
+ "exp10f16_perf.log")
+}
diff --git a/libc/test/src/math/performance_testing/exp2f16_perf.cpp b/libc/test/src/math/performance_testing/exp2f16_perf.cpp
new file mode 100644
index 0000000..aa58de2
--- /dev/null
+++ b/libc/test/src/math/performance_testing/exp2f16_perf.cpp
@@ -0,0 +1,22 @@
+//===-- Performance test for exp2f16 --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SingleInputSingleOutputPerf.h"
+
+#include "src/math/exp2f16.h"
+
+// LLVM libc might be the only libc implementation with support for float16 math
+// functions currently. We can't compare our float16 functions against the
+// system libc, so we compare them against this placeholder function.
+static float16 placeholderf16(float16 x) { return x; }
+
+int main() {
+ SINGLE_INPUT_SINGLE_OUTPUT_PERF_EX(float16, LIBC_NAMESPACE::exp2f16,
+ ::placeholderf16, 20'000,
+ "exp2f16_perf.log")
+}
diff --git a/libc/test/src/math/pow_test.cpp b/libc/test/src/math/pow_test.cpp
index 468a6bb..20e3ddf 100644
--- a/libc/test/src/math/pow_test.cpp
+++ b/libc/test/src/math/pow_test.cpp
@@ -18,15 +18,21 @@ namespace mpfr = LIBC_NAMESPACE::testing::mpfr;
TEST_F(LlvmLibcPowTest, TrickyInputs) {
constexpr mpfr::BinaryInput<double> INPUTS[] = {
- {0x1.0853408534085p-2, 0x1.0D148E03BCBA8p-1},
- {0x1.65FBD65FBD657p-1, 0x1.F10D148E03BB6p+1},
+ {0x1.0853408534085p-2, 0x1.0d148e03bcba8p-1},
+ {0x1.65fbd65fbd657p-1, 0x1.f10d148e03bb6p+1},
+ {0x1.c046a084d2e12p-1, 0x1.1f9p+12},
+ {0x1.ae37ed1670326p-1, 0x1.f967df66a202p-1},
+ {0x1.ffffffffffffcp-1, 0x1.fffffffffffffp-2},
+ {0x1.f558a88a8aadep-1, 0x1.88ap+12},
+ {0x1.e84d32731e593p-1, 0x1.2cb8p+13},
+ {0x1.ffffffffffffcp-1, 0x1.fffffffffffffp-2},
};
for (auto input : INPUTS) {
double x = input.x;
double y = input.y;
- EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Pow, input,
- LIBC_NAMESPACE::pow(x, y), 0.5);
+ EXPECT_MPFR_MATCH(mpfr::Operation::Pow, input, LIBC_NAMESPACE::pow(x, y),
+ 1.5);
}
}
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index ebd9ca0..21818e4 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -1030,6 +1030,18 @@ add_fp_unittest(
)
add_fp_unittest(
+ exp2_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ exp2_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.math.exp2
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
exp2f_test
SUITE
libc-math-smoke-tests
@@ -1042,15 +1054,15 @@ add_fp_unittest(
)
add_fp_unittest(
- exp2_test
- SUITE
- libc-math-smoke-tests
- SRCS
- exp2_test.cpp
- DEPENDS
- libc.src.errno.errno
- libc.src.math.exp2
- libc.src.__support.FPUtil.fp_bits
+ exp2f16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ exp2f16_test.cpp
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.errno.errno
+ libc.src.math.exp2f16
)
add_fp_unittest(
@@ -1065,6 +1077,18 @@ add_fp_unittest(
)
add_fp_unittest(
+ exp10_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ exp10_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.math.exp10
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
exp10f_test
SUITE
libc-math-smoke-tests
@@ -1077,15 +1101,15 @@ add_fp_unittest(
)
add_fp_unittest(
- exp10_test
- SUITE
- libc-math-smoke-tests
- SRCS
- exp10_test.cpp
- DEPENDS
- libc.src.errno.errno
- libc.src.math.exp10
- libc.src.__support.FPUtil.fp_bits
+ exp10f16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ exp10f16_test.cpp
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.errno.errno
+ libc.src.math.exp10f16
)
add_fp_unittest(
@@ -3850,6 +3874,18 @@ add_fp_unittest(
)
add_fp_unittest(
+ getpayloadl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ getpayloadl_test.cpp
+ HDRS
+ GetPayloadTest.h
+ DEPENDS
+ libc.src.math.getpayloadl
+)
+
+add_fp_unittest(
getpayloadf16_test
SUITE
libc-math-smoke-tests
@@ -4058,8 +4094,8 @@ add_fp_unittest(
HDRS
DivTest.h
DEPENDS
+ libc.hdr.errno_macros
libc.hdr.fenv_macros
- libc.src.__support.FPUtil.basic_operations
libc.src.math.f16div
)
@@ -4072,8 +4108,8 @@ add_fp_unittest(
HDRS
DivTest.h
DEPENDS
+ libc.hdr.errno_macros
libc.hdr.fenv_macros
- libc.src.__support.FPUtil.basic_operations
libc.src.math.f16divf
)
@@ -4086,8 +4122,8 @@ add_fp_unittest(
HDRS
DivTest.h
DEPENDS
+ libc.hdr.errno_macros
libc.hdr.fenv_macros
- libc.src.__support.FPUtil.basic_operations
libc.src.math.f16divl
)
@@ -4100,8 +4136,8 @@ add_fp_unittest(
HDRS
DivTest.h
DEPENDS
+ libc.hdr.errno_macros
libc.hdr.fenv_macros
- libc.src.__support.FPUtil.basic_operations
libc.src.math.f16divf128
)
@@ -4202,6 +4238,84 @@ add_fp_unittest(
)
add_fp_unittest(
+ fdiv_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fdiv_test.cpp
+ HDRS
+ DivTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.fdiv
+)
+
+add_fp_unittest(
+ fdivl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fdivl_test.cpp
+ HDRS
+ DivTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.fdivl
+)
+
+add_fp_unittest(
+ fdivf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fdivf128_test.cpp
+ HDRS
+ DivTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.fdivf128
+)
+
+add_fp_unittest(
+ ffma_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ffma_test.cpp
+ HDRS
+ FmaTest.h
+ DEPENDS
+ libc.src.math.ffma
+)
+
+add_fp_unittest(
+ ffmal_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ffmal_test.cpp
+ HDRS
+ FmaTest.h
+ DEPENDS
+ libc.src.math.ffmal
+)
+
+add_fp_unittest(
+ ffmaf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ffmaf128_test.cpp
+ HDRS
+ FmaTest.h
+ DEPENDS
+ libc.src.math.ffmaf128
+)
+
+add_fp_unittest(
fsqrt_test
SUITE
libc-math-smoke-tests
@@ -4239,6 +4353,48 @@ add_fp_unittest(
)
add_fp_unittest(
+ fsub_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fsub_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.fsub
+)
+
+add_fp_unittest(
+ fsubl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fsubl_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.fsubl
+)
+
+add_fp_unittest(
+ fsubf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fsubf128_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.fsubf128
+)
+
+add_fp_unittest(
dsqrtl_test
SUITE
libc-math-smoke-tests
diff --git a/libc/test/src/math/smoke/DivTest.h b/libc/test/src/math/smoke/DivTest.h
index b30fc17..6661796 100644
--- a/libc/test/src/math/smoke/DivTest.h
+++ b/libc/test/src/math/smoke/DivTest.h
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_DIVTEST_H
#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_DIVTEST_H
+#include "hdr/errno_macros.h"
#include "hdr/fenv_macros.h"
-#include "src/__support/FPUtil/BasicOperations.h"
#include "test/UnitTest/FEnvSafeTest.h"
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/RoundingModeUtils.h"
diff --git a/libc/test/src/math/smoke/GetPayloadTest.h b/libc/test/src/math/smoke/GetPayloadTest.h
index 922a2f0..d904571 100644
--- a/libc/test/src/math/smoke/GetPayloadTest.h
+++ b/libc/test/src/math/smoke/GetPayloadTest.h
@@ -39,10 +39,10 @@ public:
EXPECT_FP_EQ(T(0.0), funcWrapper(func, neg_aNaN));
// Essentially this:
- // T default_snan_payload = StorageType(1) << (FPBits::SIG_LEN - 2);
+ // T default_snan_payload = StorageType(1) << (FPBits::FRACTION_LEN - 2);
// but supports StorageType being a BigInt.
FPBits default_snan_payload_bits = FPBits::one();
- default_snan_payload_bits.set_biased_exponent(FPBits::SIG_LEN - 2 +
+ default_snan_payload_bits.set_biased_exponent(FPBits::FRACTION_LEN - 2 +
FPBits::EXP_BIAS);
T default_snan_payload = default_snan_payload_bits.get_val();
diff --git a/libc/test/src/math/smoke/SetPayloadTest.h b/libc/test/src/math/smoke/SetPayloadTest.h
index 4b0dacf..4bea78f 100644
--- a/libc/test/src/math/smoke/SetPayloadTest.h
+++ b/libc/test/src/math/smoke/SetPayloadTest.h
@@ -33,7 +33,12 @@ public:
EXPECT_EQ(1, func(&res, T(-1.0)));
EXPECT_EQ(1, func(&res, T(0x42.1p+0)));
EXPECT_EQ(1, func(&res, T(-0x42.1p+0)));
- EXPECT_EQ(1, func(&res, T(StorageType(1) << (FPBits::FRACTION_LEN - 1))));
+
+ FPBits nan_payload_bits = FPBits::one();
+ nan_payload_bits.set_biased_exponent(FPBits::FRACTION_LEN - 1 +
+ FPBits::EXP_BIAS);
+ T nan_payload = nan_payload_bits.get_val();
+ EXPECT_EQ(1, func(&res, nan_payload));
}
void testValidPayloads(SetPayloadFunc func) {
@@ -57,7 +62,15 @@ public:
EXPECT_EQ(FPBits::quiet_nan(Sign::POS, 0x123).uintval(),
FPBits(res).uintval());
- EXPECT_EQ(0, func(&res, T(FPBits::FRACTION_MASK >> 1)));
+ // The following code is creating a NaN payload manually to prevent a
+ // conversion from BigInt to float128.
+ FPBits nan_payload_bits = FPBits::one();
+ nan_payload_bits.set_biased_exponent(FPBits::SIG_LEN - 2 +
+ FPBits::EXP_BIAS);
+ nan_payload_bits.set_mantissa(FPBits::SIG_MASK - 3);
+ T nan_payload = nan_payload_bits.get_val();
+
+ EXPECT_EQ(0, func(&res, nan_payload));
EXPECT_TRUE(FPBits(res).is_quiet_nan());
EXPECT_EQ(
FPBits::quiet_nan(Sign::POS, FPBits::FRACTION_MASK >> 1).uintval(),
diff --git a/libc/test/src/math/smoke/exp10f16_test.cpp b/libc/test/src/math/smoke/exp10f16_test.cpp
new file mode 100644
index 0000000..006dfafa
--- /dev/null
+++ b/libc/test/src/math/smoke/exp10f16_test.cpp
@@ -0,0 +1,65 @@
+//===-- Unittests for exp10f16 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "hdr/fenv_macros.h"
+#include "src/errno/libc_errno.h"
+#include "src/math/exp10f16.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+using LlvmLibcExp10f16Test = LIBC_NAMESPACE::testing::FPTest<float16>;
+
+TEST_F(LlvmLibcExp10f16Test, SpecialNumbers) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::exp10f16(aNaN));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, LIBC_NAMESPACE::exp10f16(sNaN), FE_INVALID);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::exp10f16(inf));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(zero),
+ LIBC_NAMESPACE::exp10f16(neg_inf));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ LIBC_NAMESPACE::exp10f16(zero));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ LIBC_NAMESPACE::exp10f16(neg_zero));
+ EXPECT_MATH_ERRNO(0);
+}
+
+TEST_F(LlvmLibcExp10f16Test, Overflow) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, LIBC_NAMESPACE::exp10f16(max_normal),
+ FE_OVERFLOW);
+ EXPECT_MATH_ERRNO(ERANGE);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ inf, LIBC_NAMESPACE::exp10f16(static_cast<float16>(5.0)), FE_OVERFLOW);
+ EXPECT_MATH_ERRNO(ERANGE);
+}
+
+TEST_F(LlvmLibcExp10f16Test, Underflow) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(zero, LIBC_NAMESPACE::exp10f16(neg_max_normal),
+ FE_UNDERFLOW | FE_INEXACT);
+ EXPECT_MATH_ERRNO(ERANGE);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ zero, LIBC_NAMESPACE::exp10f16(static_cast<float16>(-8.0)),
+ FE_UNDERFLOW | FE_INEXACT);
+ EXPECT_MATH_ERRNO(ERANGE);
+}
diff --git a/libc/test/src/math/smoke/exp2f16_test.cpp b/libc/test/src/math/smoke/exp2f16_test.cpp
new file mode 100644
index 0000000..cd87e61
--- /dev/null
+++ b/libc/test/src/math/smoke/exp2f16_test.cpp
@@ -0,0 +1,65 @@
+//===-- Unittests for exp2f16 ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "hdr/fenv_macros.h"
+#include "src/errno/libc_errno.h"
+#include "src/math/exp2f16.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+using LlvmLibcExp2f16Test = LIBC_NAMESPACE::testing::FPTest<float16>;
+
+TEST_F(LlvmLibcExp2f16Test, SpecialNumbers) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::exp2f16(aNaN));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, LIBC_NAMESPACE::exp2f16(sNaN), FE_INVALID);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::exp2f16(inf));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(zero),
+ LIBC_NAMESPACE::exp2f16(neg_inf));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ LIBC_NAMESPACE::exp2f16(zero));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ LIBC_NAMESPACE::exp2f16(neg_zero));
+ EXPECT_MATH_ERRNO(0);
+}
+
+TEST_F(LlvmLibcExp2f16Test, Overflow) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, LIBC_NAMESPACE::exp2f16(max_normal),
+ FE_OVERFLOW);
+ EXPECT_MATH_ERRNO(ERANGE);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ inf, LIBC_NAMESPACE::exp2f16(static_cast<float16>(16.0)), FE_OVERFLOW);
+ EXPECT_MATH_ERRNO(ERANGE);
+}
+
+TEST_F(LlvmLibcExp2f16Test, Underflow) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(zero, LIBC_NAMESPACE::exp2f16(neg_max_normal),
+ FE_UNDERFLOW | FE_INEXACT);
+ EXPECT_MATH_ERRNO(ERANGE);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ zero, LIBC_NAMESPACE::exp2f16(static_cast<float16>(-25.0)),
+ FE_UNDERFLOW | FE_INEXACT);
+ EXPECT_MATH_ERRNO(ERANGE);
+}
diff --git a/libc/test/src/math/smoke/fdiv_test.cpp b/libc/test/src/math/smoke/fdiv_test.cpp
new file mode 100644
index 0000000..f0bd602
--- /dev/null
+++ b/libc/test/src/math/smoke/fdiv_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fdiv ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DivTest.h"
+
+#include "src/math/fdiv.h"
+
+LIST_DIV_TESTS(float, double, LIBC_NAMESPACE::fdiv)
diff --git a/libc/test/src/math/smoke/fdivf128_test.cpp b/libc/test/src/math/smoke/fdivf128_test.cpp
new file mode 100644
index 0000000..a687e07
--- /dev/null
+++ b/libc/test/src/math/smoke/fdivf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fdivf128 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DivTest.h"
+
+#include "src/math/fdivf128.h"
+
+LIST_DIV_TESTS(float, float128, LIBC_NAMESPACE::fdivf128)
diff --git a/libc/test/src/math/smoke/fdivl_test.cpp b/libc/test/src/math/smoke/fdivl_test.cpp
new file mode 100644
index 0000000..bc2b0e4
--- /dev/null
+++ b/libc/test/src/math/smoke/fdivl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fdivl -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DivTest.h"
+
+#include "src/math/fdivl.h"
+
+LIST_DIV_TESTS(float, long double, LIBC_NAMESPACE::fdivl)
diff --git a/libc/test/src/math/smoke/ffma_test.cpp b/libc/test/src/math/smoke/ffma_test.cpp
new file mode 100644
index 0000000..4b6a25c
--- /dev/null
+++ b/libc/test/src/math/smoke/ffma_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ffma ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FmaTest.h"
+
+#include "src/math/ffma.h"
+
+LIST_NARROWING_FMA_TESTS(float, double, LIBC_NAMESPACE::ffma)
diff --git a/libc/test/src/math/smoke/ffmaf128_test.cpp b/libc/test/src/math/smoke/ffmaf128_test.cpp
new file mode 100644
index 0000000..a8f81e2
--- /dev/null
+++ b/libc/test/src/math/smoke/ffmaf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ffmaf128 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FmaTest.h"
+
+#include "src/math/ffmaf128.h"
+
+LIST_NARROWING_FMA_TESTS(float, float128, LIBC_NAMESPACE::ffmaf128)
diff --git a/libc/test/src/math/smoke/ffmal_test.cpp b/libc/test/src/math/smoke/ffmal_test.cpp
new file mode 100644
index 0000000..200d9e1
--- /dev/null
+++ b/libc/test/src/math/smoke/ffmal_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ffmal -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FmaTest.h"
+
+#include "src/math/ffmal.h"
+
+LIST_NARROWING_FMA_TESTS(float, long double, LIBC_NAMESPACE::ffmal)
diff --git a/libc/test/src/math/smoke/fsub_test.cpp b/libc/test/src/math/smoke/fsub_test.cpp
new file mode 100644
index 0000000..3c825f7
--- /dev/null
+++ b/libc/test/src/math/smoke/fsub_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fsub ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/fsub.h"
+
+LIST_SUB_TESTS(float, double, LIBC_NAMESPACE::fsub)
diff --git a/libc/test/src/math/smoke/fsubf128_test.cpp b/libc/test/src/math/smoke/fsubf128_test.cpp
new file mode 100644
index 0000000..8f46d91
--- /dev/null
+++ b/libc/test/src/math/smoke/fsubf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fsubf128 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/fsubf128.h"
+
+LIST_SUB_TESTS(float, float128, LIBC_NAMESPACE::fsubf128)
diff --git a/libc/test/src/math/smoke/fsubl_test.cpp b/libc/test/src/math/smoke/fsubl_test.cpp
new file mode 100644
index 0000000..87234326
--- /dev/null
+++ b/libc/test/src/math/smoke/fsubl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fsubl -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/math/fsubl.h"
+
+LIST_SUB_TESTS(float, long double, LIBC_NAMESPACE::fsubl)
diff --git a/libc/test/src/math/smoke/getpayloadl_test.cpp b/libc/test/src/math/smoke/getpayloadl_test.cpp
new file mode 100644
index 0000000..d783548
--- /dev/null
+++ b/libc/test/src/math/smoke/getpayloadl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for getpayloadl -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GetPayloadTest.h"
+
+#include "src/math/getpayloadl.h"
+
+LIST_GETPAYLOAD_TESTS(long double, LIBC_NAMESPACE::getpayloadl)
diff --git a/libc/test/src/time/mktime_test.cpp b/libc/test/src/time/mktime_test.cpp
index 3142a4f..84e6c7e 100644
--- a/libc/test/src/time/mktime_test.cpp
+++ b/libc/test/src/time/mktime_test.cpp
@@ -380,22 +380,115 @@ TEST(LlvmLibcMkTime, InvalidDays) {
TEST(LlvmLibcMkTime, EndOf32BitEpochYear) {
// Test for maximum value of a signed 32-bit integer.
// Test implementation can encode time for Tue 19 January 2038 03:14:07 UTC.
- struct tm tm_data {
- .tm_sec = 7, .tm_min = 14, .tm_hour = 3, .tm_mday = 19,
- .tm_mon = Month::JANUARY, .tm_year = tm_year(2038), .tm_wday = 0,
- .tm_yday = 0, .tm_isdst = 0
- };
- EXPECT_THAT(LIBC_NAMESPACE::mktime(&tm_data), Succeeds(0x7FFFFFFF));
- EXPECT_TM_EQ((tm{.tm_sec = 7,
- .tm_min = 14,
- .tm_hour = 3,
- .tm_mday = 19,
- .tm_mon = Month::JANUARY,
- .tm_year = tm_year(2038),
- .tm_wday = 2,
- .tm_yday = 7,
- .tm_isdst = 0}),
- tm_data);
+ {
+ struct tm tm_data {
+ .tm_sec = 7, .tm_min = 14, .tm_hour = 3, .tm_mday = 19,
+ .tm_mon = Month::JANUARY, .tm_year = tm_year(2038), .tm_wday = 0,
+ .tm_yday = 0, .tm_isdst = 0
+ };
+ EXPECT_THAT(LIBC_NAMESPACE::mktime(&tm_data), Succeeds(0x7FFFFFFF));
+ EXPECT_TM_EQ((tm{.tm_sec = 7,
+ .tm_min = 14,
+ .tm_hour = 3,
+ .tm_mday = 19,
+ .tm_mon = Month::JANUARY,
+ .tm_year = tm_year(2038),
+ .tm_wday = 2,
+ .tm_yday = 7,
+ .tm_isdst = 0}),
+ tm_data);
+ }
+
+ // Now test some times before that, to ensure they are not rejected.
+ {
+ // 2038-01-19 03:13:59 tests that even a large seconds field is
+ // accepted if the minutes field is smaller.
+ struct tm tm_data {
+ .tm_sec = 59, .tm_min = 13, .tm_hour = 3, .tm_mday = 19,
+ .tm_mon = Month::JANUARY, .tm_year = tm_year(2038), .tm_wday = 0,
+ .tm_yday = 0, .tm_isdst = 0
+ };
+ EXPECT_THAT(LIBC_NAMESPACE::mktime(&tm_data), Succeeds(0x7FFFFFFF - 8));
+ EXPECT_TM_EQ((tm{.tm_sec = 59,
+ .tm_min = 13,
+ .tm_hour = 3,
+ .tm_mday = 19,
+ .tm_mon = Month::JANUARY,
+ .tm_year = tm_year(2038),
+ .tm_wday = 2,
+ .tm_yday = 7,
+ .tm_isdst = 0}),
+ tm_data);
+ }
+
+ {
+ // 2038-01-19 02:59:59 tests that large seconds and minutes are
+ // accepted if the hours field is smaller.
+ struct tm tm_data {
+ .tm_sec = 59, .tm_min = 59, .tm_hour = 2, .tm_mday = 19,
+ .tm_mon = Month::JANUARY, .tm_year = tm_year(2038), .tm_wday = 0,
+ .tm_yday = 0, .tm_isdst = 0
+ };
+ EXPECT_THAT(LIBC_NAMESPACE::mktime(&tm_data),
+ Succeeds(0x7FFFFFFF - 8 - 14 * TimeConstants::SECONDS_PER_MIN));
+ EXPECT_TM_EQ((tm{.tm_sec = 59,
+ .tm_min = 59,
+ .tm_hour = 2,
+ .tm_mday = 19,
+ .tm_mon = Month::JANUARY,
+ .tm_year = tm_year(2038),
+ .tm_wday = 2,
+ .tm_yday = 7,
+ .tm_isdst = 0}),
+ tm_data);
+ }
+
+ {
+ // 2038-01-18 23:59:59 tests that large seconds, minutes and hours
+ // are accepted if the days field is smaller.
+ struct tm tm_data {
+ .tm_sec = 59, .tm_min = 59, .tm_hour = 23, .tm_mday = 18,
+ .tm_mon = Month::JANUARY, .tm_year = tm_year(2038), .tm_wday = 0,
+ .tm_yday = 0, .tm_isdst = 0
+ };
+ EXPECT_THAT(LIBC_NAMESPACE::mktime(&tm_data),
+ Succeeds(0x7FFFFFFF - 8 - 14 * TimeConstants::SECONDS_PER_MIN -
+ 3 * TimeConstants::SECONDS_PER_HOUR));
+ EXPECT_TM_EQ((tm{.tm_sec = 59,
+ .tm_min = 59,
+ .tm_hour = 23,
+ .tm_mday = 18,
+ .tm_mon = Month::JANUARY,
+ .tm_year = tm_year(2038),
+ .tm_wday = 2,
+ .tm_yday = 7,
+ .tm_isdst = 0}),
+ tm_data);
+ }
+
+ {
+ // 2038-01-18 23:59:59 tests that the final second of 2037 is
+ // accepted.
+ struct tm tm_data {
+ .tm_sec = 59, .tm_min = 59, .tm_hour = 23, .tm_mday = 31,
+ .tm_mon = Month::DECEMBER, .tm_year = tm_year(2037), .tm_wday = 0,
+ .tm_yday = 0, .tm_isdst = 0
+ };
+ EXPECT_THAT(LIBC_NAMESPACE::mktime(&tm_data),
+ Succeeds(0x7FFFFFFF - 8 - 14 * TimeConstants::SECONDS_PER_MIN -
+ 3 * TimeConstants::SECONDS_PER_HOUR -
+ 18 * TimeConstants::SECONDS_PER_DAY));
+ EXPECT_TM_EQ((tm{.tm_sec = 59,
+ .tm_min = 59,
+ .tm_hour = 23,
+ .tm_mday = 31,
+ .tm_mon = Month::DECEMBER,
+ .tm_year = tm_year(2037),
+ .tm_wday = 2,
+ .tm_yday = 7,
+ .tm_isdst = 0}),
+ tm_data);
+ }
}
TEST(LlvmLibcMkTime, Max64BitYear) {
diff --git a/libc/utils/MPFRWrapper/MPFRUtils.cpp b/libc/utils/MPFRWrapper/MPFRUtils.cpp
index 4263c9d..7ce6a70 100644
--- a/libc/utils/MPFRWrapper/MPFRUtils.cpp
+++ b/libc/utils/MPFRWrapper/MPFRUtils.cpp
@@ -1081,14 +1081,18 @@ void explain_ternary_operation_one_output_error(
template void explain_ternary_operation_one_output_error(
Operation, const TernaryInput<float> &, float, double, RoundingMode);
template void explain_ternary_operation_one_output_error(
+ Operation, const TernaryInput<double> &, float, double, RoundingMode);
+template void explain_ternary_operation_one_output_error(
Operation, const TernaryInput<double> &, double, double, RoundingMode);
+template void explain_ternary_operation_one_output_error(
+ Operation, const TernaryInput<long double> &, float, double, RoundingMode);
+template void explain_ternary_operation_one_output_error(
+ Operation, const TernaryInput<long double> &, double, double, RoundingMode);
template void
explain_ternary_operation_one_output_error(Operation,
const TernaryInput<long double> &,
long double, double, RoundingMode);
-template void explain_ternary_operation_one_output_error(
- Operation, const TernaryInput<long double> &, double, double, RoundingMode);
#ifdef LIBC_TYPES_HAS_FLOAT16
template void explain_ternary_operation_one_output_error(
Operation, const TernaryInput<float> &, float16, double, RoundingMode);
@@ -1268,15 +1272,20 @@ template bool compare_ternary_operation_one_output(Operation,
float, double, RoundingMode);
template bool compare_ternary_operation_one_output(Operation,
const TernaryInput<double> &,
+ float, double, RoundingMode);
+template bool compare_ternary_operation_one_output(Operation,
+ const TernaryInput<double> &,
double, double,
RoundingMode);
+template bool compare_ternary_operation_one_output(
+ Operation, const TernaryInput<long double> &, float, double, RoundingMode);
+template bool compare_ternary_operation_one_output(
+ Operation, const TernaryInput<long double> &, double, double, RoundingMode);
template bool
compare_ternary_operation_one_output(Operation,
const TernaryInput<long double> &,
long double, double, RoundingMode);
-template bool compare_ternary_operation_one_output(
- Operation, const TernaryInput<long double> &, double, double, RoundingMode);
#ifdef LIBC_TYPES_HAS_FLOAT16
template bool compare_ternary_operation_one_output(Operation,
const TernaryInput<float> &,
diff --git a/libc/utils/gpu/loader/Loader.h b/libc/utils/gpu/loader/Loader.h
index 2151013..8722f59 100644
--- a/libc/utils/gpu/loader/Loader.h
+++ b/libc/utils/gpu/loader/Loader.h
@@ -85,7 +85,7 @@ void *copy_argument_vector(int argc, const char **argv, Allocator alloc) {
}
// Ensure the vector is null terminated.
- reinterpret_cast<void **>(dev_argv)[argv_size] = nullptr;
+ reinterpret_cast<void **>(dev_argv)[argc] = nullptr;
return dev_argv;
}
diff --git a/libc/utils/gpu/loader/amdgpu/amdhsa-loader.cpp b/libc/utils/gpu/loader/amdgpu/amdhsa-loader.cpp
index c1dcce8..1beef81 100644
--- a/libc/utils/gpu/loader/amdgpu/amdhsa-loader.cpp
+++ b/libc/utils/gpu/loader/amdgpu/amdhsa-loader.cpp
@@ -180,7 +180,7 @@ hsa_status_t launch_kernel(hsa_agent_t dev_agent, hsa_executable_t executable,
if (hsa_status_t err =
hsa_amd_memory_pool_allocate(pool, size,
/*flags=*/0, &dev_ptr))
- handle_error(err);
+ dev_ptr = nullptr;
hsa_amd_agents_allow_access(1, &dev_agent, nullptr, dev_ptr);
buffer->data[0] = reinterpret_cast<uintptr_t>(dev_ptr);
};
diff --git a/libc/utils/gpu/loader/nvptx/nvptx-loader.cpp b/libc/utils/gpu/loader/nvptx/nvptx-loader.cpp
index 9fd3de2..1b210b9 100644
--- a/libc/utils/gpu/loader/nvptx/nvptx-loader.cpp
+++ b/libc/utils/gpu/loader/nvptx/nvptx-loader.cpp
@@ -198,7 +198,7 @@ CUresult launch_kernel(CUmodule binary, CUstream stream,
uint64_t size = buffer->data[0];
CUdeviceptr dev_ptr;
if (CUresult err = cuMemAllocAsync(&dev_ptr, size, memory_stream))
- handle_error(err);
+ dev_ptr = 0UL;
// Wait until the memory allocation is complete.
while (cuStreamQuery(memory_stream) == CUDA_ERROR_NOT_READY)
diff --git a/libc/utils/gpu/server/CMakeLists.txt b/libc/utils/gpu/server/CMakeLists.txt
index 94347ef..13509dc 100644
--- a/libc/utils/gpu/server/CMakeLists.txt
+++ b/libc/utils/gpu/server/CMakeLists.txt
@@ -14,6 +14,8 @@ target_compile_options(llvmlibc_rpc_server PUBLIC
$<$<CXX_COMPILER_ID:GNU>:-Wno-attributes>)
target_compile_definitions(llvmlibc_rpc_server PUBLIC
LIBC_COPT_USE_C_ASSERT
+ LIBC_COPT_MEMCPY_USE_EMBEDDED_TINY
+ LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64
LIBC_COPT_ARRAY_ARG_LIST
LIBC_COPT_PRINTF_DISABLE_WRITE_INT
LIBC_COPT_PRINTF_DISABLE_INDEX_MODE
diff --git a/libc/utils/gpu/server/rpc_server.cpp b/libc/utils/gpu/server/rpc_server.cpp
index ed23d22..0d4d1ad 100644
--- a/libc/utils/gpu/server/rpc_server.cpp
+++ b/libc/utils/gpu/server/rpc_server.cpp
@@ -11,6 +11,9 @@
#define __has_builtin(x) 0
#endif
+// Make sure these are included first so they don't conflict with the system.
+#include <limits.h>
+
#include "llvmlibc_rpc_server.h"
#include "src/__support/RPC/rpc.h"
diff --git a/libcxx/docs/Status/Cxx20Issues.csv b/libcxx/docs/Status/Cxx20Issues.csv
index 0053ca6..340f004 100644
--- a/libcxx/docs/Status/Cxx20Issues.csv
+++ b/libcxx/docs/Status/Cxx20Issues.csv
@@ -117,7 +117,7 @@
"`LWG3127 <https://wg21.link/LWG3127>`__","``basic_osyncstream::rdbuf``\ needs a ``const_cast``\ ","San Diego","|Complete|","18.0",""
"`LWG3128 <https://wg21.link/LWG3128>`__","``strstream::rdbuf``\ needs a ``const_cast``\ ","San Diego","|Nothing To Do|","",""
"`LWG3129 <https://wg21.link/LWG3129>`__","``regex_token_iterator``\ constructor uses wrong pointer arithmetic","San Diego","","",""
-"`LWG3130 <https://wg21.link/LWG3130>`__","|sect|\ [input.output] needs many ``addressof``\ ","San Diego","","",""
+"`LWG3130 <https://wg21.link/LWG3130>`__","|sect|\ [input.output] needs many ``addressof``\ ","San Diego","|Complete|","20.0",""
"`LWG3131 <https://wg21.link/LWG3131>`__","``addressof``\ all the things","San Diego","","",""
"`LWG3132 <https://wg21.link/LWG3132>`__","Library needs to ban macros named ``expects``\ or ``ensures``\ ","San Diego","|Nothing To Do|","",""
"`LWG3134 <https://wg21.link/LWG3134>`__","[fund.ts.v3] LFTSv3 contains extraneous [meta] variable templates that should have been deleted by P09961","San Diego","Resolved by `P1210R0 <https://wg21.link/P1210R0>`__","",""
diff --git a/libcxx/include/fstream b/libcxx/include/fstream
index ab5ebf8..a77b7ce 100644
--- a/libcxx/include/fstream
+++ b/libcxx/include/fstream
@@ -191,6 +191,7 @@ typedef basic_fstream<wchar_t> wfstream;
#include <__config>
#include <__fwd/fstream.h>
#include <__locale>
+#include <__memory/addressof.h>
#include <__type_traits/enable_if.h>
#include <__type_traits/is_same.h>
#include <__utility/move.h>
@@ -1136,11 +1137,12 @@ private:
};
template <class _CharT, class _Traits>
-inline basic_ifstream<_CharT, _Traits>::basic_ifstream() : basic_istream<char_type, traits_type>(&__sb_) {}
+inline basic_ifstream<_CharT, _Traits>::basic_ifstream()
+ : basic_istream<char_type, traits_type>(std::addressof(__sb_)) {}
template <class _CharT, class _Traits>
inline basic_ifstream<_CharT, _Traits>::basic_ifstream(const char* __s, ios_base::openmode __mode)
- : basic_istream<char_type, traits_type>(&__sb_) {
+ : basic_istream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode | ios_base::in) == nullptr)
this->setstate(ios_base::failbit);
}
@@ -1148,15 +1150,16 @@ inline basic_ifstream<_CharT, _Traits>::basic_ifstream(const char* __s, ios_base
# ifdef _LIBCPP_HAS_OPEN_WITH_WCHAR
template <class _CharT, class _Traits>
inline basic_ifstream<_CharT, _Traits>::basic_ifstream(const wchar_t* __s, ios_base::openmode __mode)
- : basic_istream<char_type, traits_type>(&__sb_) {
+ : basic_istream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode | ios_base::in) == nullptr)
this->setstate(ios_base::failbit);
}
# endif
+// extension
template <class _CharT, class _Traits>
inline basic_ifstream<_CharT, _Traits>::basic_ifstream(const string& __s, ios_base::openmode __mode)
- : basic_istream<char_type, traits_type>(&__sb_) {
+ : basic_istream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode | ios_base::in) == nullptr)
this->setstate(ios_base::failbit);
}
@@ -1164,7 +1167,7 @@ inline basic_ifstream<_CharT, _Traits>::basic_ifstream(const string& __s, ios_ba
template <class _CharT, class _Traits>
inline basic_ifstream<_CharT, _Traits>::basic_ifstream(basic_ifstream&& __rhs)
: basic_istream<char_type, traits_type>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
- this->set_rdbuf(&__sb_);
+ this->set_rdbuf(std::addressof(__sb_));
}
template <class _CharT, class _Traits>
@@ -1187,7 +1190,7 @@ inline _LIBCPP_HIDE_FROM_ABI void swap(basic_ifstream<_CharT, _Traits>& __x, bas
template <class _CharT, class _Traits>
inline basic_filebuf<_CharT, _Traits>* basic_ifstream<_CharT, _Traits>::rdbuf() const {
- return const_cast<basic_filebuf<char_type, traits_type>*>(&__sb_);
+ return const_cast<basic_filebuf<char_type, traits_type>*>(std::addressof(__sb_));
}
template <class _CharT, class _Traits>
@@ -1293,11 +1296,12 @@ private:
};
template <class _CharT, class _Traits>
-inline basic_ofstream<_CharT, _Traits>::basic_ofstream() : basic_ostream<char_type, traits_type>(&__sb_) {}
+inline basic_ofstream<_CharT, _Traits>::basic_ofstream()
+ : basic_ostream<char_type, traits_type>(std::addressof(__sb_)) {}
template <class _CharT, class _Traits>
inline basic_ofstream<_CharT, _Traits>::basic_ofstream(const char* __s, ios_base::openmode __mode)
- : basic_ostream<char_type, traits_type>(&__sb_) {
+ : basic_ostream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode | ios_base::out) == nullptr)
this->setstate(ios_base::failbit);
}
@@ -1305,15 +1309,16 @@ inline basic_ofstream<_CharT, _Traits>::basic_ofstream(const char* __s, ios_base
# ifdef _LIBCPP_HAS_OPEN_WITH_WCHAR
template <class _CharT, class _Traits>
inline basic_ofstream<_CharT, _Traits>::basic_ofstream(const wchar_t* __s, ios_base::openmode __mode)
- : basic_ostream<char_type, traits_type>(&__sb_) {
+ : basic_ostream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode | ios_base::out) == nullptr)
this->setstate(ios_base::failbit);
}
# endif
+// extension
template <class _CharT, class _Traits>
inline basic_ofstream<_CharT, _Traits>::basic_ofstream(const string& __s, ios_base::openmode __mode)
- : basic_ostream<char_type, traits_type>(&__sb_) {
+ : basic_ostream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode | ios_base::out) == nullptr)
this->setstate(ios_base::failbit);
}
@@ -1321,7 +1326,7 @@ inline basic_ofstream<_CharT, _Traits>::basic_ofstream(const string& __s, ios_ba
template <class _CharT, class _Traits>
inline basic_ofstream<_CharT, _Traits>::basic_ofstream(basic_ofstream&& __rhs)
: basic_ostream<char_type, traits_type>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
- this->set_rdbuf(&__sb_);
+ this->set_rdbuf(std::addressof(__sb_));
}
template <class _CharT, class _Traits>
@@ -1344,7 +1349,7 @@ inline _LIBCPP_HIDE_FROM_ABI void swap(basic_ofstream<_CharT, _Traits>& __x, bas
template <class _CharT, class _Traits>
inline basic_filebuf<_CharT, _Traits>* basic_ofstream<_CharT, _Traits>::rdbuf() const {
- return const_cast<basic_filebuf<char_type, traits_type>*>(&__sb_);
+ return const_cast<basic_filebuf<char_type, traits_type>*>(std::addressof(__sb_));
}
template <class _CharT, class _Traits>
@@ -1454,11 +1459,12 @@ private:
};
template <class _CharT, class _Traits>
-inline basic_fstream<_CharT, _Traits>::basic_fstream() : basic_iostream<char_type, traits_type>(&__sb_) {}
+inline basic_fstream<_CharT, _Traits>::basic_fstream()
+ : basic_iostream<char_type, traits_type>(std::addressof(__sb_)) {}
template <class _CharT, class _Traits>
inline basic_fstream<_CharT, _Traits>::basic_fstream(const char* __s, ios_base::openmode __mode)
- : basic_iostream<char_type, traits_type>(&__sb_) {
+ : basic_iostream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode) == nullptr)
this->setstate(ios_base::failbit);
}
@@ -1466,7 +1472,7 @@ inline basic_fstream<_CharT, _Traits>::basic_fstream(const char* __s, ios_base::
# ifdef _LIBCPP_HAS_OPEN_WITH_WCHAR
template <class _CharT, class _Traits>
inline basic_fstream<_CharT, _Traits>::basic_fstream(const wchar_t* __s, ios_base::openmode __mode)
- : basic_iostream<char_type, traits_type>(&__sb_) {
+ : basic_iostream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode) == nullptr)
this->setstate(ios_base::failbit);
}
@@ -1474,15 +1480,16 @@ inline basic_fstream<_CharT, _Traits>::basic_fstream(const wchar_t* __s, ios_bas
template <class _CharT, class _Traits>
inline basic_fstream<_CharT, _Traits>::basic_fstream(const string& __s, ios_base::openmode __mode)
- : basic_iostream<char_type, traits_type>(&__sb_) {
+ : basic_iostream<char_type, traits_type>(std::addressof(__sb_)) {
if (__sb_.open(__s, __mode) == nullptr)
this->setstate(ios_base::failbit);
}
+// extension
template <class _CharT, class _Traits>
inline basic_fstream<_CharT, _Traits>::basic_fstream(basic_fstream&& __rhs)
: basic_iostream<char_type, traits_type>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
- this->set_rdbuf(&__sb_);
+ this->set_rdbuf(std::addressof(__sb_));
}
template <class _CharT, class _Traits>
@@ -1505,7 +1512,7 @@ inline _LIBCPP_HIDE_FROM_ABI void swap(basic_fstream<_CharT, _Traits>& __x, basi
template <class _CharT, class _Traits>
inline basic_filebuf<_CharT, _Traits>* basic_fstream<_CharT, _Traits>::rdbuf() const {
- return const_cast<basic_filebuf<char_type, traits_type>*>(&__sb_);
+ return const_cast<basic_filebuf<char_type, traits_type>*>(std::addressof(__sb_));
}
template <class _CharT, class _Traits>
diff --git a/libcxx/include/ios b/libcxx/include/ios
index d8a3643..426838b 100644
--- a/libcxx/include/ios
+++ b/libcxx/include/ios
@@ -218,6 +218,7 @@ storage-class-specifier const error_category& iostream_category() noexcept;
# include <__fwd/ios.h>
# include <__ios/fpos.h>
# include <__locale>
+# include <__memory/addressof.h>
# include <__system_error/error_category.h>
# include <__system_error/error_code.h>
# include <__system_error/error_condition.h>
@@ -621,11 +622,11 @@ protected:
private:
basic_ostream<char_type, traits_type>* __tie_;
-#if defined(_LIBCPP_ABI_IOS_ALLOW_ARBITRARY_FILL_VALUE)
+# if defined(_LIBCPP_ABI_IOS_ALLOW_ARBITRARY_FILL_VALUE)
using _FillType = _FillHelper<traits_type>;
-#else
+# else
using _FillType = _SentinelValueFill<traits_type>;
-#endif
+# endif
mutable _FillType __fill_;
};
@@ -640,7 +641,7 @@ basic_ios<_CharT, _Traits>::~basic_ios() {}
template <class _CharT, class _Traits>
inline _LIBCPP_HIDE_FROM_ABI void basic_ios<_CharT, _Traits>::init(basic_streambuf<char_type, traits_type>* __sb) {
ios_base::init(__sb);
- __tie_ = nullptr;
+ __tie_ = nullptr;
__fill_.__init();
}
@@ -707,7 +708,7 @@ inline _LIBCPP_HIDE_FROM_ABI _CharT basic_ios<_CharT, _Traits>::fill(char_type _
template <class _CharT, class _Traits>
basic_ios<_CharT, _Traits>& basic_ios<_CharT, _Traits>::copyfmt(const basic_ios& __rhs) {
- if (this != &__rhs) {
+ if (this != std::addressof(__rhs)) {
__call_callbacks(erase_event);
ios_base::copyfmt(__rhs);
__tie_ = __rhs.__tie_;
diff --git a/libcxx/include/sstream b/libcxx/include/sstream
index 272d886..78a7f2d 100644
--- a/libcxx/include/sstream
+++ b/libcxx/include/sstream
@@ -872,13 +872,14 @@ private:
public:
// [istringstream.cons] Constructors:
- _LIBCPP_HIDE_FROM_ABI basic_istringstream() : basic_istream<_CharT, _Traits>(&__sb_), __sb_(ios_base::in) {}
+ _LIBCPP_HIDE_FROM_ABI basic_istringstream()
+ : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(ios_base::in) {}
_LIBCPP_HIDE_FROM_ABI explicit basic_istringstream(ios_base::openmode __wch)
- : basic_istream<_CharT, _Traits>(&__sb_), __sb_(__wch | ios_base::in) {}
+ : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__wch | ios_base::in) {}
_LIBCPP_HIDE_FROM_ABI explicit basic_istringstream(const string_type& __s, ios_base::openmode __wch = ios_base::in)
- : basic_istream<_CharT, _Traits>(&__sb_), __sb_(__s, __wch | ios_base::in) {}
+ : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::in) {}
#if _LIBCPP_STD_VER >= 20
_LIBCPP_HIDE_FROM_ABI basic_istringstream(ios_base::openmode __wch, const _Allocator& __a)
@@ -924,7 +925,7 @@ public:
basic_istringstream(const basic_istringstream&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_istringstream(basic_istringstream&& __rhs)
: basic_istream<_CharT, _Traits>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
- basic_istream<_CharT, _Traits>::set_rdbuf(&__sb_);
+ basic_istream<_CharT, _Traits>::set_rdbuf(std::addressof(__sb_));
}
// [istringstream.assign] Assign and swap:
@@ -941,7 +942,7 @@ public:
// [istringstream.members] Member functions:
_LIBCPP_HIDE_FROM_ABI basic_stringbuf<char_type, traits_type, allocator_type>* rdbuf() const {
- return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(&__sb_);
+ return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(std::addressof(__sb_));
}
#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY)
@@ -1007,13 +1008,14 @@ private:
public:
// [ostringstream.cons] Constructors:
- _LIBCPP_HIDE_FROM_ABI basic_ostringstream() : basic_ostream<_CharT, _Traits>(&__sb_), __sb_(ios_base::out) {}
+ _LIBCPP_HIDE_FROM_ABI basic_ostringstream()
+ : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(ios_base::out) {}
_LIBCPP_HIDE_FROM_ABI explicit basic_ostringstream(ios_base::openmode __wch)
- : basic_ostream<_CharT, _Traits>(&__sb_), __sb_(__wch | ios_base::out) {}
+ : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__wch | ios_base::out) {}
_LIBCPP_HIDE_FROM_ABI explicit basic_ostringstream(const string_type& __s, ios_base::openmode __wch = ios_base::out)
- : basic_ostream<_CharT, _Traits>(&__sb_), __sb_(__s, __wch | ios_base::out) {}
+ : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::out) {}
#if _LIBCPP_STD_VER >= 20
_LIBCPP_HIDE_FROM_ABI basic_ostringstream(ios_base::openmode __wch, const _Allocator& __a)
@@ -1060,7 +1062,7 @@ public:
basic_ostringstream(const basic_ostringstream&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_ostringstream(basic_ostringstream&& __rhs)
: basic_ostream<_CharT, _Traits>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
- basic_ostream<_CharT, _Traits>::set_rdbuf(&__sb_);
+ basic_ostream<_CharT, _Traits>::set_rdbuf(std::addressof(__sb_));
}
// [ostringstream.assign] Assign and swap:
@@ -1078,7 +1080,7 @@ public:
// [ostringstream.members] Member functions:
_LIBCPP_HIDE_FROM_ABI basic_stringbuf<char_type, traits_type, allocator_type>* rdbuf() const {
- return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(&__sb_);
+ return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(std::addressof(__sb_));
}
#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY)
@@ -1145,14 +1147,14 @@ private:
public:
// [stringstream.cons] constructors
_LIBCPP_HIDE_FROM_ABI basic_stringstream()
- : basic_iostream<_CharT, _Traits>(&__sb_), __sb_(ios_base::in | ios_base::out) {}
+ : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(ios_base::in | ios_base::out) {}
_LIBCPP_HIDE_FROM_ABI explicit basic_stringstream(ios_base::openmode __wch)
- : basic_iostream<_CharT, _Traits>(&__sb_), __sb_(__wch) {}
+ : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__wch) {}
_LIBCPP_HIDE_FROM_ABI explicit basic_stringstream(const string_type& __s,
ios_base::openmode __wch = ios_base::in | ios_base::out)
- : basic_iostream<_CharT, _Traits>(&__sb_), __sb_(__s, __wch) {}
+ : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch) {}
#if _LIBCPP_STD_VER >= 20
_LIBCPP_HIDE_FROM_ABI basic_stringstream(ios_base::openmode __wch, const _Allocator& __a)
@@ -1201,7 +1203,7 @@ public:
basic_stringstream(const basic_stringstream&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_stringstream(basic_stringstream&& __rhs)
: basic_iostream<_CharT, _Traits>(std::move(__rhs)), __sb_(std::move(__rhs.__sb_)) {
- basic_istream<_CharT, _Traits>::set_rdbuf(&__sb_);
+ basic_istream<_CharT, _Traits>::set_rdbuf(std::addressof(__sb_));
}
// [stringstream.assign] Assign and swap:
@@ -1218,7 +1220,7 @@ public:
// [stringstream.members] Member functions:
_LIBCPP_HIDE_FROM_ABI basic_stringbuf<char_type, traits_type, allocator_type>* rdbuf() const {
- return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(&__sb_);
+ return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(std::addressof(__sb_));
}
#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY)
diff --git a/libcxx/include/string b/libcxx/include/string
index 7b0cd82..6e93a62 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -1048,7 +1048,7 @@ public:
__r_.first() = __str.__r_.first();
__str.__r_.first() = __rep();
__str.__annotate_new(0);
- if (!__is_long() && this != &__str)
+ if (!__is_long() && this != std::addressof(__str))
__annotate_new(size());
}
}
@@ -2711,7 +2711,7 @@ basic_string<_CharT, _Traits, _Allocator>::__move_assign(basic_string& __str, tr
__str.__set_short_size(0);
traits_type::assign(__str.__get_short_pointer()[0], value_type());
- if (__str_was_short && this != &__str)
+ if (__str_was_short && this != std::addressof(__str))
__str.__annotate_shrink(__str_old_size);
else
// ASan annotations: was long, so object memory is unpoisoned as new.
@@ -2725,7 +2725,7 @@ basic_string<_CharT, _Traits, _Allocator>::__move_assign(basic_string& __str, tr
// invariants hold (so functions without preconditions, such as the assignment operator,
// can be safely used on the object after it was moved from):"
// Quote: "v = std::move(v); // the value of v is unspecified"
- if (!__is_long() && &__str != this)
+ if (!__is_long() && std::addressof(__str) != this)
// If it is long string, delete was never called on original __str's buffer.
__annotate_new(__get_short_size());
}
@@ -3450,13 +3450,13 @@ inline _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocat
"swapping non-equal allocators");
if (!__is_long())
__annotate_delete();
- if (this != &__str && !__str.__is_long())
+ if (this != std::addressof(__str) && !__str.__is_long())
__str.__annotate_delete();
std::swap(__r_.first(), __str.__r_.first());
std::__swap_allocator(__alloc(), __str.__alloc());
if (!__is_long())
__annotate_new(__get_short_size());
- if (this != &__str && !__str.__is_long())
+ if (this != std::addressof(__str) && !__str.__is_long())
__str.__annotate_new(__str.__get_short_size());
}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/default.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/default.pass.cpp
index 5749de2..d15276b 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/default.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/default.pass.cpp
@@ -14,19 +14,25 @@
// basic_fstream();
#include <fstream>
-#include <type_traits>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
{
std::fstream fs;
}
+ {
+ std::basic_fstream<char, operator_hijacker_char_traits<char> > fs;
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wfstream fs;
}
+ {
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs;
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/move.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/move.pass.cpp
index b282e7f..95a04bd 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/move.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/move.pass.cpp
@@ -15,8 +15,10 @@
#include <fstream>
#include <cassert>
+
#include "test_macros.h"
#include "platform_support.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -33,6 +35,18 @@ int main(int, char**)
}
std::remove(temp.c_str());
+ {
+ std::basic_fstream<char, operator_hijacker_char_traits<char> > fso(
+ temp, std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_fstream<char, operator_hijacker_char_traits<char> > fs = std::move(fso);
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs << "3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == "3.25");
+ }
+ std::remove(temp.c_str());
+
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wfstream fso(temp, std::ios_base::in | std::ios_base::out
@@ -45,6 +59,18 @@ int main(int, char**)
assert(x == 3.25);
}
std::remove(temp.c_str());
+
+ {
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fso(
+ temp, std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs = std::move(fso);
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs << L"3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == L"3.25");
+ }
+ std::remove(temp.c_str());
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/path.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/path.pass.cpp
index d6bb56d..d5a2a8f 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/path.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/path.pass.cpp
@@ -27,6 +27,7 @@
#include "test_macros.h"
#include "test_iterators.h"
#include "platform_support.h"
+#include "operator_hijacker.h"
namespace fs = std::filesystem;
@@ -72,6 +73,17 @@ int main(int, char**) {
}
std::remove(p.string().c_str());
+ {
+ std::basic_fstream<char, operator_hijacker_char_traits<char> > fs(
+ p, std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs << "3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == "3.25");
+ }
+ std::remove(p.string().c_str());
+
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wfstream fs(p, std::ios_base::in | std::ios_base::out |
@@ -83,6 +95,18 @@ int main(int, char**) {
assert(x == 3.25);
}
std::remove(p.string().c_str());
+
+ {
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(
+ p, std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs << L"3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == L"3.25");
+ }
+ std::remove(p.string().c_str());
+
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/pointer.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/pointer.pass.cpp
index 18b22d6..df7d3b9 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/pointer.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/pointer.pass.cpp
@@ -17,8 +17,10 @@
#include <fstream>
#include <cassert>
+
#include "test_macros.h"
#include "platform_support.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -34,6 +36,17 @@ int main(int, char**)
}
std::remove(temp.c_str());
+ {
+ std::basic_fstream<char, operator_hijacker_char_traits<char> > fs(
+ temp.c_str(), std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs << "3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == "3.25");
+ }
+ std::remove(temp.c_str());
+
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wfstream fs(temp.c_str(), std::ios_base::in | std::ios_base::out
@@ -45,6 +58,17 @@ int main(int, char**)
assert(x == 3.25);
}
std::remove(temp.c_str());
+
+ {
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(
+ temp.c_str(), std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs << L"3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == L"3.25");
+ }
+ std::remove(temp.c_str());
#endif
#if TEST_STD_VER >= 23
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/string.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/string.pass.cpp
index 80b3fe4..ca0921a 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/string.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.cons/string.pass.cpp
@@ -15,8 +15,10 @@
#include <fstream>
#include <cassert>
+
#include "test_macros.h"
#include "platform_support.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -33,6 +35,17 @@ int main(int, char**)
}
std::remove(temp.c_str());
+ {
+ std::basic_fstream<char, operator_hijacker_char_traits<char> > fs(
+ temp, std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs << "3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == "3.25");
+ }
+ std::remove(temp.c_str());
+
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wfstream fs(temp,
@@ -45,6 +58,17 @@ int main(int, char**)
assert(x == 3.25);
}
std::remove(temp.c_str());
+
+ {
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(
+ temp, std::ios_base::in | std::ios_base::out | std::ios_base::trunc);
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs << L"3.25";
+ fs.seekg(0);
+ fs >> x;
+ assert(x == L"3.25");
+ }
+ std::remove(temp.c_str());
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/default.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/default.pass.cpp
index 043db15..70d1efc 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/default.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/default.pass.cpp
@@ -14,19 +14,25 @@
// basic_ifstream();
#include <fstream>
-#include <type_traits>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
{
std::ifstream fs;
}
+ {
+ std::basic_ifstream<char, operator_hijacker_char_traits<char> > fs;
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wifstream fs;
}
+ {
+ std::basic_ifstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs;
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/move.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/move.pass.cpp
index f21943a..81ec800 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/move.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/move.pass.cpp
@@ -17,8 +17,10 @@
#include <fstream>
#include <cassert>
+#include <ios>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -29,6 +31,13 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<char, operator_hijacker_char_traits<char> > fso("test.dat");
+ std::basic_ifstream<char, operator_hijacker_char_traits<char> > fs = std::move(fso);
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs >> x;
+ assert(x == "3.25");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wifstream fso("test.dat");
@@ -37,6 +46,13 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fso("test.dat");
+ std::basic_ifstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs = std::move(fso);
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs >> x;
+ assert(x == L"3.25");
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/path.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/path.pass.cpp
index 792b656..630aac1 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/path.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/path.pass.cpp
@@ -28,6 +28,7 @@
#include "test_macros.h"
#include "test_iterators.h"
+#include "operator_hijacker.h"
namespace fs = std::filesystem;
@@ -75,6 +76,12 @@ int main(int, char**) {
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<char, operator_hijacker_char_traits<char>> fs(fs::path("test.dat"));
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs >> x;
+ assert(x == "3.25");
+ }
// std::ifstream(const fs::path&, std::ios_base::openmode) is tested in
// test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
// which creates writable files.
@@ -86,6 +93,12 @@ int main(int, char**) {
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(fs::path("test.dat"));
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs >> x;
+ assert(x == L"3.25");
+ }
// std::wifstream(const fs::path&, std::ios_base::openmode) is tested in
// test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
// which creates writable files.
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/pointer.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/pointer.pass.cpp
index bd08804..6bbe6f1 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/pointer.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/pointer.pass.cpp
@@ -19,6 +19,7 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -28,6 +29,12 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<char, operator_hijacker_char_traits<char> > fs("test.dat");
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs >> x;
+ assert(x == "3.25");
+ }
// std::ifstream(const char*, std::ios_base::openmode) is tested in
// test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp
// which creates writable files.
@@ -39,6 +46,12 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs("test.dat");
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs >> x;
+ assert(x == L"3.25");
+ }
// std::wifstream(const char*, std::ios_base::openmode) is tested in
// test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp
// which creates writable files.
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/string.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/string.pass.cpp
index ae7976b..e1a9b53 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/string.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.cons/string.pass.cpp
@@ -19,6 +19,7 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -28,6 +29,12 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<char, operator_hijacker_char_traits<char> > fs(std::string("test.dat"));
+ std::basic_string<char, operator_hijacker_char_traits<char> > x;
+ fs >> x;
+ assert(x == "3.25");
+ }
// std::ifstream(const std::string&, std::ios_base::openmode) is tested in
// test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
// which creates writable files.
@@ -39,6 +46,12 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ {
+ std::basic_ifstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(std::string("test.dat"));
+ std::basic_string<wchar_t, operator_hijacker_char_traits<wchar_t> > x;
+ fs >> x;
+ assert(x == L"3.25");
+ }
// std::wifstream(const std::string&, std::ios_base::openmode) is tested in
// test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
// which creates writable files.
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/default.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/default.pass.cpp
index 3470133..a7b0918 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/default.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/default.pass.cpp
@@ -14,19 +14,25 @@
// basic_ofstream();
#include <fstream>
-#include <type_traits>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
{
std::ofstream fs;
}
+ {
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fs;
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wofstream fs;
}
+ {
+ std::basic_fstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs;
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/move.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/move.pass.cpp
index a06ad43..ec02fa2 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/move.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/move.pass.cpp
@@ -15,8 +15,10 @@
#include <fstream>
#include <cassert>
+
#include "test_macros.h"
#include "platform_support.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -34,6 +36,19 @@ int main(int, char**)
}
std::remove(temp.c_str());
+ {
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fso(temp.c_str());
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fs = std::move(fso);
+ fs << "3.25";
+ }
+ {
+ std::ifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wofstream fso(temp.c_str());
@@ -47,6 +62,19 @@ int main(int, char**)
assert(x == 3.25);
}
std::remove(temp.c_str());
+
+ {
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fso(temp.c_str());
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs = std::move(fso);
+ fs << L"3.25";
+ }
+ {
+ std::wifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
#endif
return 0;
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/path.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/path.pass.cpp
index 602bdad..c62c13d 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/path.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/path.pass.cpp
@@ -16,7 +16,7 @@
// class basic_ofstream
// template<class T>
-// explicit basic_ifstream(const T& s, ios_base::openmode mode = ios_base::in); // Since C++17
+// explicit basic_ofstream(const T& s, ios_base::openmode mode = ios_base::out); // Since C++17
// Constraints: is_same_v<T, filesystem::path> is true
#include <cassert>
@@ -25,6 +25,7 @@
#include <type_traits>
#include "platform_support.h"
+#include "operator_hijacker.h"
#include "test_macros.h"
#include "test_iterators.h"
@@ -60,7 +61,6 @@ static_assert(test_non_convert_to_path<char16_t>());
static_assert(test_non_convert_to_path<char32_t>());
int main(int, char**) {
- fs::path p = get_temp_file_name();
{
static_assert(!std::is_convertible<fs::path, std::ofstream>::value,
"ctor should be explicit");
@@ -68,6 +68,7 @@ int main(int, char**) {
std::ios_base::openmode>::value,
"");
}
+ fs::path p = get_temp_file_name();
{
std::ofstream stream(p);
stream << 3.25;
@@ -78,8 +79,38 @@ int main(int, char**) {
stream >> x;
assert(x == 3.25);
}
+ std::remove(p.string().c_str());
+
+ {
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > stream(p);
+ stream << "3.25";
+ }
+ {
+ std::ifstream stream(p);
+ double x = 0;
+ stream >> x;
+ assert(x == 3.25);
+ }
+ std::remove(p.string().c_str());
+
+ {
+ std::ofstream stream(p, std::ios_base::out);
+ stream << 3.25;
+ }
+ {
+ std::ifstream stream(p);
+ double x = 0;
+ stream >> x;
+ assert(x == 3.25);
+ }
+ std::remove(p.string().c_str());
+
+ {
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > stream(p, std::ios_base::out);
+ stream << "3.25";
+ }
{
- std::ifstream stream(p, std::ios_base::out);
+ std::ifstream stream(p);
double x = 0;
stream >> x;
assert(x == 3.25);
@@ -97,8 +128,38 @@ int main(int, char**) {
stream >> x;
assert(x == 3.25);
}
+ std::remove(p.string().c_str());
+
+ {
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > stream(p);
+ stream << L"3.25";
+ }
+ {
+ std::wifstream stream(p);
+ double x = 0;
+ stream >> x;
+ assert(x == 3.25);
+ }
+ std::remove(p.string().c_str());
+
{
- std::wifstream stream(p, std::ios_base::out);
+ std::wofstream stream(p, std::ios_base::out);
+ stream << 3.25;
+ }
+ {
+ std::wifstream stream(p);
+ double x = 0;
+ stream >> x;
+ assert(x == 3.25);
+ }
+ std::remove(p.string().c_str());
+
+ {
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > stream(p, std::ios_base::out);
+ stream << L"3.25";
+ }
+ {
+ std::wifstream stream(p);
double x = 0;
stream >> x;
assert(x == 3.25);
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp
index 23bd07a..af43ffd 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/pointer.pass.cpp
@@ -17,12 +17,16 @@
#include <fstream>
#include <cassert>
+#include <ios>
+
#include "test_macros.h"
+#include "operator_hijacker.h"
#include "platform_support.h"
int main(int, char**)
{
std::string temp = get_temp_file_name();
+
{
std::ofstream fs(temp.c_str());
fs << 3.25;
@@ -33,14 +37,43 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ std::remove(temp.c_str());
+
{
- std::ifstream fs(temp.c_str(), std::ios_base::out);
- double x = 0;
- fs >> x;
- assert(x == 3.25);
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fs(temp.c_str());
+ fs << "3.25";
+ }
+ {
+ std::ifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
+ {
+ std::ofstream fs(temp.c_str(), std::ios_base::out);
+ fs << 3.25;
+ }
+ {
+ std::ifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
}
std::remove(temp.c_str());
+ {
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fs(temp.c_str(), std::ios_base::out);
+ fs << "3.25";
+ }
+ {
+ std::ifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wofstream fs(temp.c_str());
@@ -52,13 +85,44 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ std::remove(temp.c_str());
+
{
- std::wifstream fs(temp.c_str(), std::ios_base::out);
- double x = 0;
- fs >> x;
- assert(x == 3.25);
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(temp.c_str());
+ fs << L"3.25";
+ }
+ {
+ std::ifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
+ {
+ std::wofstream fs(temp.c_str(), std::ios_base::out);
+ fs << 3.25;
+ }
+ {
+ std::wifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
+ {
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(temp.c_str(), std::ios_base::out);
+ fs << L"3.25";
+ }
+ {
+ std::ifstream fs(temp.c_str());
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
}
std::remove(temp.c_str());
+
#endif
#if TEST_STD_VER >= 23
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
index 4c0823e..33a7e9b 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.cons/string.pass.cpp
@@ -15,12 +15,16 @@
#include <fstream>
#include <cassert>
+#include <ios>
+
#include "test_macros.h"
+#include "operator_hijacker.h"
#include "platform_support.h"
int main(int, char**)
{
std::string temp = get_temp_file_name();
+
{
std::ofstream fs(temp);
fs << 3.25;
@@ -31,14 +35,43 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ std::remove(temp.c_str());
+
{
- std::ifstream fs(temp, std::ios_base::out);
- double x = 0;
- fs >> x;
- assert(x == 3.25);
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fs(temp);
+ fs << "3.25";
+ }
+ {
+ std::ifstream fs(temp);
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
+ {
+ std::ofstream fs(temp, std::ios_base::out);
+ fs << 3.25;
+ }
+ {
+ std::ifstream fs(temp);
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
}
std::remove(temp.c_str());
+ {
+ std::basic_ofstream<char, operator_hijacker_char_traits<char> > fs(temp, std::ios_base::out);
+ fs << "3.25";
+ }
+ {
+ std::ifstream fs(temp);
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wofstream fs(temp);
@@ -50,11 +83,41 @@ int main(int, char**)
fs >> x;
assert(x == 3.25);
}
+ std::remove(temp.c_str());
+
{
- std::wifstream fs(temp, std::ios_base::out);
- double x = 0;
- fs >> x;
- assert(x == 3.25);
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(temp);
+ fs << L"3.25";
+ }
+ {
+ std::wifstream fs(temp);
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
+ {
+ std::wofstream fs(temp, std::ios_base::out);
+ fs << 3.25;
+ }
+ {
+ std::wifstream fs(temp);
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
+ }
+ std::remove(temp.c_str());
+
+ {
+ std::basic_ofstream<wchar_t, operator_hijacker_char_traits<wchar_t> > fs(temp, std::ios_base::out);
+ fs << L"3.25";
+ }
+ {
+ std::wifstream fs(temp);
+ double x = 0;
+ fs >> x;
+ assert(x == 3.25);
}
std::remove(temp.c_str());
#endif
diff --git a/libcxx/test/std/input.output/iostreams.base/ios/basic.ios.members/copyfmt.pass.cpp b/libcxx/test/std/input.output/iostreams.base/ios/basic.ios.members/copyfmt.pass.cpp
index 949c87e..d78f7df 100644
--- a/libcxx/test/std/input.output/iostreams.base/ios/basic.ios.members/copyfmt.pass.cpp
+++ b/libcxx/test/std/input.output/iostreams.base/ios/basic.ios.members/copyfmt.pass.cpp
@@ -16,12 +16,15 @@
// basic_ios& copyfmt(const basic_ios& rhs);
#include <ios>
+#include <memory>
#include <streambuf>
+#include <sstream>
#include <cassert>
#include "platform_support.h" // locale name macros
#include "test_macros.h"
+#include "operator_hijacker.h"
struct testbuf
: public std::streambuf
@@ -191,5 +194,11 @@ int main(int, char**)
assert(ios1.fill() == '2');
#endif
+ {
+ std::basic_stringbuf<char, operator_hijacker_char_traits<char> > sb;
+ std::basic_ios<char, operator_hijacker_char_traits<char> > ios(std::addressof(sb));
+ ios.copyfmt(ios);
+ }
+
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/default.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/default.pass.cpp
index 2a90d5d..8c73df4 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/default.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/default.pass.cpp
@@ -19,6 +19,7 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
#if TEST_STD_VER >= 11
#include "test_convertible.h"
@@ -33,29 +34,54 @@ int main(int, char**)
{
{
std::istringstream ss;
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == "");
}
{
+ std::basic_istringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss;
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == "");
+ }
+ {
std::istringstream ss(std::ios_base::in);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == "");
}
+ {
+ std::basic_istringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(std::ios_base::in);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == "");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wistringstream ss;
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L"");
}
{
std::wistringstream ss(std::ios_base::in);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L"");
}
+ {
+ std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss;
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L"");
+ }
+ {
+ std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ std::ios_base::in);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L"");
+ }
#endif
#if TEST_STD_VER >= 11
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/mode.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/mode.alloc.pass.cpp
index 26dfc4b..e2bbd92 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/mode.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/mode.alloc.pass.cpp
@@ -20,19 +20,22 @@
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
-template <class CharT>
-static void test() {
- const test_allocator<CharT> a(2);
- const std::basic_istringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(std::ios_base::binary, a);
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, Allocator> ss(std::ios_base::binary, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view().empty());
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
+
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/move.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/move.pass.cpp
index 45ad2d2..00ac7cc 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/move.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/move.pass.cpp
@@ -17,6 +17,7 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
@@ -32,6 +33,18 @@ int main(int, char**)
ss >> i;
assert(i == 456);
}
+ {
+ std::basic_istringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss0(" 123 456");
+ std::basic_istringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(std::move(ss0));
+ assert(ss.rdbuf() != 0);
+ assert(ss.good());
+ assert(ss.str() == " 123 456");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wistringstream ss0(L" 123 456");
@@ -45,6 +58,20 @@ int main(int, char**)
ss >> i;
assert(i == 456);
}
+ {
+ std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss0(
+ L" 123 456");
+ std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ std::move(ss0));
+ assert(ss.rdbuf() != 0);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string-alloc.mode.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string-alloc.mode.pass.cpp
index 4fc3bfb..7e78466 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string-alloc.mode.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string-alloc.mode.pass.cpp
@@ -22,28 +22,31 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
+template <class CharT, class Allocator>
static void test() {
{
const std::basic_string<CharT> s(STR("testing"));
- const std::basic_istringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s);
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, Allocator> ss(s);
assert(ss.view() == SV("testing"));
}
{
const std::basic_string<CharT> s(STR("testing"));
- const std::basic_istringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, std::ios_base::binary);
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, std::ios_base::binary);
assert(ss.view() == SV("testing"));
}
}
int main(int, char**) {
- test<char>();
+ test<char, test_allocator<char>>();
+ test<char, operator_hijacker_allocator<char>>();
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t, test_allocator<wchar_t>>();
+ test<wchar_t, operator_hijacker_allocator<wchar_t>>();
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.alloc.pass.cpp
index ca9a6d1..4e4ac35 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.alloc.pass.cpp
@@ -23,23 +23,25 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
-static void test() {
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
const std::basic_string<CharT> s(STR("testing"));
- const test_allocator<CharT> a(2);
- const std::basic_istringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, a);
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view() == SV("testing"));
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.mode.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.mode.alloc.pass.cpp
index 109d037..5a88b8f 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.mode.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.mode.alloc.pass.cpp
@@ -22,23 +22,25 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
-static void test() {
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
const std::basic_string<CharT> s(STR("testing"));
- const test_allocator<CharT> a(2);
- const std::basic_istringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, std::ios_base::binary, a);
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, std::ios_base::binary, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view() == SV("testing"));
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.move.mode.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.move.mode.pass.cpp
index 57208a41..c7f7cac 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.move.mode.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.move.mode.pass.cpp
@@ -20,6 +20,7 @@
#include "make_string.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
@@ -31,10 +32,23 @@ static void test() {
assert(ss.str() == STR("testing"));
}
{
+ std::basic_string<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> s(STR("testing"));
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> ss(std::move(s));
+ assert((ss.str() ==
+ std::basic_string<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>>(STR("testing"))));
+ }
+ {
std::basic_string<CharT> s(STR("testing"));
const std::basic_istringstream<CharT> ss(std::move(s), std::ios_base::binary);
assert(ss.str() == STR("testing"));
}
+ {
+ std::basic_string<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> s(STR("testing"));
+ const std::basic_istringstream<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> ss(
+ std::move(s), std::ios_base::binary);
+ assert((ss.str() ==
+ std::basic_string<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>>(STR("testing"))));
+ }
}
int main(int, char**) {
diff --git a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.pass.cpp b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.pass.cpp
index 9b0593f..4a5965e7 100644
--- a/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/istringstream/istringstream.cons/string.pass.cpp
@@ -18,12 +18,13 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
{
std::istringstream ss(" 123 456");
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456");
int i = 0;
@@ -33,8 +34,19 @@ int main(int, char**)
assert(i == 456);
}
{
+ std::basic_istringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(" 123 456");
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ }
+ {
std::istringstream ss(" 123 456", std::ios_base::out);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456");
int i = 0;
@@ -43,10 +55,22 @@ int main(int, char**)
ss >> i;
assert(i == 456);
}
+ {
+ std::basic_istringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(
+ " 123 456", std::ios_base::out);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wistringstream ss(L" 123 456");
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456");
int i = 0;
@@ -56,8 +80,20 @@ int main(int, char**)
assert(i == 456);
}
{
+ std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ L" 123 456");
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ }
+ {
std::wistringstream ss(L" 123 456", std::ios_base::out);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456");
int i = 0;
@@ -66,6 +102,18 @@ int main(int, char**)
ss >> i;
assert(i == 456);
}
+ {
+ std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ L" 123 456", std::ios_base::out);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/default.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/default.pass.cpp
index 92796bb..a6b98a4 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/default.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/default.pass.cpp
@@ -18,6 +18,7 @@
#include <sstream>
#include <cassert>
+#include "operator_hijacker.h"
#include "test_macros.h"
#if TEST_STD_VER >= 11
#include "test_convertible.h"
@@ -33,29 +34,54 @@ int main(int, char**)
{
{
std::ostringstream ss;
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == "");
}
{
+ std::basic_ostringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss;
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == "");
+ }
+ {
std::ostringstream ss(std::ios_base::out);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == "");
}
+ {
+ std::basic_ostringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(std::ios_base::out);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == "");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wostringstream ss;
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L"");
}
{
+ std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss;
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L"");
+ }
+ {
std::wostringstream ss(std::ios_base::out);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L"");
}
+ {
+ std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ std::ios_base::out);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L"");
+ }
#endif // TEST_HAS_NO_WIDE_CHARACTERS
#if TEST_STD_VER >= 11
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/mode.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/mode.alloc.pass.cpp
index 6b6dc8f2..9e89f66 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/mode.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/mode.alloc.pass.cpp
@@ -20,19 +20,22 @@
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
-template <class CharT>
-static void test() {
- const test_allocator<CharT> a(2);
- const std::basic_ostringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(std::ios_base::binary, a);
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(std::ios_base::binary, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view().empty());
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
+
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/move.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/move.pass.cpp
index 6a63cd6..596a3e7 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/move.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/move.pass.cpp
@@ -16,6 +16,7 @@
#include <sstream>
#include <cassert>
+#include "operator_hijacker.h"
#include "test_macros.h"
int main(int, char**)
@@ -23,24 +24,46 @@ int main(int, char**)
{
std::ostringstream ss0(" 123 456");
std::ostringstream ss(std::move(ss0));
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456");
int i = 234;
ss << i << ' ' << 567;
assert(ss.str() == "234 5676");
}
+ {
+ std::basic_ostringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss0(" 123 456");
+ std::basic_ostringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(std::move(ss0));
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456");
+ int i = 234;
+ ss << i << ' ' << 567;
+ assert(ss.str() == "234 5676");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wostringstream ss0(L" 123 456");
std::wostringstream ss(std::move(ss0));
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456");
int i = 234;
ss << i << ' ' << 567;
assert(ss.str() == L"234 5676");
}
+ {
+ std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss0(
+ L" 123 456");
+ std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ std::move(ss0));
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456");
+ int i = 234;
+ ss << i << ' ' << 567;
+ assert(ss.str() == L"234 5676");
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string-alloc.mode.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string-alloc.mode.pass.cpp
index 36b615f..ca24c40 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string-alloc.mode.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string-alloc.mode.pass.cpp
@@ -22,28 +22,31 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
+template <class CharT, class Allocator>
static void test() {
{
const std::basic_string<CharT> s(STR("testing"));
- const std::basic_ostringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s);
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(s);
assert(ss.view() == SV("testing"));
}
{
const std::basic_string<CharT> s(STR("testing"));
- const std::basic_ostringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, std::ios_base::binary);
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, std::ios_base::binary);
assert(ss.view() == SV("testing"));
}
}
int main(int, char**) {
- test<char>();
+ test<char, test_allocator<char>>();
+ test<char, operator_hijacker_allocator<char>>();
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t, test_allocator<wchar_t>>();
+ test<wchar_t, operator_hijacker_allocator<wchar_t>>();
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.alloc.pass.cpp
index 1425988..2a0e9d7 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.alloc.pass.cpp
@@ -23,23 +23,25 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
-static void test() {
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
const std::basic_string<CharT> s(STR("testing"));
- const test_allocator<CharT> a(2);
- const std::basic_ostringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, a);
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view() == SV("testing"));
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.mode.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.mode.alloc.pass.cpp
index 18365e6..83b3709 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.mode.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.mode.alloc.pass.cpp
@@ -22,23 +22,25 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
-static void test() {
- const std::basic_string<CharT> s(STR("testing"));
- const test_allocator<CharT> a(2);
- const std::basic_ostringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, std::ios_base::binary, a);
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
+ const std::basic_string<CharT, std::char_traits<CharT>, Allocator> s(STR("testing"));
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, std::ios_base::binary, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view() == SV("testing"));
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.move.mode.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.move.mode.pass.cpp
index e24ac5c..3466429 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.move.mode.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.move.mode.pass.cpp
@@ -19,28 +19,35 @@
#include <cassert>
#include "make_string.h"
+#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
+#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
+template <class CharT, class Allocator>
static void test() {
{
- std::basic_string<CharT> s(STR("testing"));
- const std::basic_ostringstream<CharT> ss(std::move(s));
- assert(ss.str() == STR("testing"));
+ std::basic_string<CharT, std::char_traits<CharT>, Allocator> s(STR("testing"));
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(std::move(s));
+ assert(ss.str() == SV("testing"));
}
{
- std::basic_string<CharT> s(STR("testing"));
- const std::basic_ostringstream<CharT> ss(std::move(s), std::ios_base::binary);
- assert(ss.str() == STR("testing"));
+ std::basic_string<CharT, std::char_traits<CharT>, Allocator> s(STR("testing"));
+ const std::basic_ostringstream<CharT, std::char_traits<CharT>, Allocator> ss(std::move(s), std::ios_base::binary);
+ assert(ss.str() == SV("testing"));
}
}
int main(int, char**) {
- test<char>();
+ test<char, std::allocator<char>>();
+ test<char, test_allocator<char>>();
+ test<char, operator_hijacker_allocator<char>>();
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t, std::allocator<wchar_t>>();
+ test<wchar_t, test_allocator<wchar_t>>();
+ test<wchar_t, operator_hijacker_allocator<wchar_t>>();
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.pass.cpp b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.pass.cpp
index af3c320..9e9405a 100644
--- a/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/ostringstream/ostringstream.cons/string.pass.cpp
@@ -18,12 +18,13 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
{
std::ostringstream ss(" 123 456");
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456");
int i = 234;
@@ -31,18 +32,37 @@ int main(int, char**)
assert(ss.str() == "234 5676");
}
{
+ std::basic_ostringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(" 123 456");
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456");
+ int i = 234;
+ ss << i << ' ' << 567;
+ assert(ss.str() == "234 5676");
+ }
+ {
std::ostringstream ss(" 123 456", std::ios_base::in);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456");
int i = 234;
ss << i << ' ' << 567;
assert(ss.str() == "234 5676");
}
+ {
+ std::basic_ostringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(
+ " 123 456", std::ios_base::in);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456");
+ int i = 234;
+ ss << i << ' ' << 567;
+ assert(ss.str() == "234 5676");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wostringstream ss(L" 123 456");
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456");
int i = 234;
@@ -50,14 +70,34 @@ int main(int, char**)
assert(ss.str() == L"234 5676");
}
{
+ std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ L" 123 456");
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456");
+ int i = 234;
+ ss << i << ' ' << 567;
+ assert(ss.str() == L"234 5676");
+ }
+ {
std::wostringstream ss(L" 123 456", std::ios_base::in);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456");
int i = 234;
ss << i << ' ' << 567;
assert(ss.str() == L"234 5676");
}
+ {
+ std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ L" 123 456", std::ios_base::in);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456");
+ int i = 234;
+ ss << i << ' ' << 567;
+ assert(ss.str() == L"234 5676");
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/default.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/default.pass.cpp
index 5ce17f5c..4f9e7e0 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/default.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/default.pass.cpp
@@ -19,6 +19,7 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
#if TEST_STD_VER >= 11
#include "test_convertible.h"
@@ -33,26 +34,38 @@ int main(int, char**)
{
{
std::stringstream ss;
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == "");
}
{
+ std::basic_stringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss;
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == "");
+ }
+ {
std::stringstream ss(std::ios_base::in);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == "");
}
+ {
+ std::basic_stringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(std::ios_base::in);
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == "");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wstringstream ss;
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L"");
}
{
std::wstringstream ss(std::ios_base::in);
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L"");
}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/mode.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/mode.alloc.pass.cpp
index 72e5a7f..ed4c36e 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/mode.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/mode.alloc.pass.cpp
@@ -15,24 +15,26 @@
// basic_stringstream(ios_base::openmode which, const Allocator& a);
-#include <sstream>
#include <cassert>
+#include <sstream>
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
-template <class CharT>
-static void test() {
- const test_allocator<CharT> a(2);
- const std::basic_stringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(std::ios_base::in, a);
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, Allocator> ss(std::ios_base::in, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view().empty());
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/move.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/move.pass.cpp
index ec19c67..0702d9a 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/move.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/move.pass.cpp
@@ -17,13 +17,14 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
int main(int, char**)
{
{
std::stringstream ss0(" 123 456 ");
std::stringstream ss(std::move(ss0));
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456 ");
int i = 0;
@@ -34,11 +35,25 @@ int main(int, char**)
ss << i << ' ' << 123;
assert(ss.str() == "456 1236 ");
}
+ {
+ std::basic_stringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss0(" 123 456 ");
+ std::basic_stringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(std::move(ss0));
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456 ");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ ss << i << ' ' << 123;
+ assert(ss.str() == "456 1236 ");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wstringstream ss0(L" 123 456 ");
std::wstringstream ss(std::move(ss0));
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456 ");
int i = 0;
@@ -49,6 +64,22 @@ int main(int, char**)
ss << i << ' ' << 123;
assert(ss.str() == L"456 1236 ");
}
+ {
+ std::basic_stringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss0(
+ L" 123 456 ");
+ std::basic_stringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ std::move(ss0));
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456 ");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ ss << i << ' ' << 123;
+ assert(ss.str() == L"456 1236 ");
+ }
#endif
return 0;
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string-alloc.mode.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string-alloc.mode.pass.cpp
index 19fb32f..f65d7b94 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string-alloc.mode.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string-alloc.mode.pass.cpp
@@ -21,29 +21,32 @@
#include "make_string.h"
#include "test_allocator.h"
+#include "operator_hijacker.h"
#include "test_macros.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
+template <class CharT, class Allocator>
static void test() {
{
const std::basic_string<CharT> s(STR("testing"));
- const std::basic_stringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s);
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, Allocator> ss(s);
assert(ss.view() == SV("testing"));
}
{
const std::basic_string<CharT> s(STR("testing"));
- const std::basic_stringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, std::ios_base::in);
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, std::ios_base::in);
assert(ss.view() == SV("testing"));
}
}
int main(int, char**) {
- test<char>();
+ test<char, test_allocator<char>>();
+ test<char, operator_hijacker_allocator<char>>();
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t, test_allocator<wchar_t>>();
+ test<wchar_t, operator_hijacker_allocator<wchar_t>>();
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.alloc.pass.cpp
index 91f34a8..9d3024d 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.alloc.pass.cpp
@@ -22,24 +22,26 @@
#include "make_string.h"
#include "test_allocator.h"
+#include "operator_hijacker.h"
#include "test_macros.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
-static void test() {
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
const std::basic_string<CharT> s(STR("testing"));
- const test_allocator<CharT> a(2);
- const std::basic_stringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, a);
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view() == SV("testing"));
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.mode.alloc.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.mode.alloc.pass.cpp
index c1bcccd..362677c 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.mode.alloc.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.mode.alloc.pass.cpp
@@ -22,23 +22,26 @@
#include "make_string.h"
#include "test_allocator.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
#define SV(S) MAKE_STRING_VIEW(CharT, S)
-template <class CharT>
-static void test() {
+template <class CharT, class Allocator>
+static void test(const Allocator& a) {
const std::basic_string<CharT> s(STR("testing"));
- const test_allocator<CharT> a(2);
- const std::basic_stringstream<CharT, std::char_traits<CharT>, test_allocator<CharT>> ss(s, std::ios_base::out, a);
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, Allocator> ss(s, std::ios_base::out, a);
assert(ss.rdbuf()->get_allocator() == a);
assert(ss.view() == SV("testing"));
}
int main(int, char**) {
- test<char>();
+ test<char>(test_allocator<char>(2));
+ test<char>(operator_hijacker_allocator<char>());
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
- test<wchar_t>();
+ test<wchar_t>(test_allocator<wchar_t>(2));
+ test<wchar_t>(operator_hijacker_allocator<wchar_t>());
#endif
+
return 0;
}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.move.mode.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.move.mode.pass.cpp
index d0146f0..40528f1 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.move.mode.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.move.mode.pass.cpp
@@ -20,20 +20,33 @@
#include "make_string.h"
#include "test_macros.h"
+#include "operator_hijacker.h"
#define STR(S) MAKE_STRING(CharT, S)
+#define SV(S) MAKE_STRING_VIEW(CharT, S)
template <class CharT>
static void test() {
{
std::basic_string<CharT> s(STR("testing"));
const std::basic_stringstream<CharT> ss(std::move(s));
- assert(ss.str() == STR("testing"));
+ assert(ss.str() == SV("testing"));
+ }
+ {
+ std::basic_string<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> s(STR("testing"));
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> ss(std::move(s));
+ assert(ss.str() == SV("testing"));
}
{
std::basic_string<CharT> s(STR("testing"));
const std::basic_stringstream<CharT> ss(std::move(s), std::ios_base::out);
- assert(ss.str() == STR("testing"));
+ assert(ss.str() == SV("testing"));
+ }
+ {
+ std::basic_string<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> s(STR("testing"));
+ const std::basic_stringstream<CharT, std::char_traits<CharT>, operator_hijacker_allocator<CharT>> ss(
+ std::move(s), std::ios_base::out);
+ assert(ss.str() == SV("testing"));
}
}
diff --git a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.pass.cpp b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.pass.cpp
index 0fb8a02..0888087 100644
--- a/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.pass.cpp
+++ b/libcxx/test/std/input.output/string.streams/stringstream/stringstream.cons/string.pass.cpp
@@ -18,6 +18,7 @@
#include <cassert>
#include "test_macros.h"
+#include "operator_hijacker.h"
template<typename T>
struct NoDefaultAllocator : std::allocator<T>
@@ -33,7 +34,7 @@ int main(int, char**)
{
{
std::stringstream ss(" 123 456 ");
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == " 123 456 ");
int i = 0;
@@ -44,10 +45,23 @@ int main(int, char**)
ss << i << ' ' << 123;
assert(ss.str() == "456 1236 ");
}
+ {
+ std::basic_stringstream<char, std::char_traits<char>, operator_hijacker_allocator<char> > ss(" 123 456 ");
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == " 123 456 ");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ ss << i << ' ' << 123;
+ assert(ss.str() == "456 1236 ");
+ }
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
std::wstringstream ss(L" 123 456 ");
- assert(ss.rdbuf() != 0);
+ assert(ss.rdbuf() != nullptr);
assert(ss.good());
assert(ss.str() == L" 123 456 ");
int i = 0;
@@ -58,6 +72,20 @@ int main(int, char**)
ss << i << ' ' << 123;
assert(ss.str() == L"456 1236 ");
}
+ {
+ std::basic_stringstream<wchar_t, std::char_traits<wchar_t>, operator_hijacker_allocator<wchar_t> > ss(
+ L" 123 456 ");
+ assert(ss.rdbuf() != nullptr);
+ assert(ss.good());
+ assert(ss.str() == L" 123 456 ");
+ int i = 0;
+ ss >> i;
+ assert(i == 123);
+ ss >> i;
+ assert(i == 456);
+ ss << i << ' ' << 123;
+ assert(ss.str() == L"456 1236 ");
+ }
#endif
{ // This is https://llvm.org/PR33727
typedef std::basic_string <char, std::char_traits<char>, NoDefaultAllocator<char> > S;
diff --git a/libcxx/test/std/numerics/c.math/hermite.pass.cpp b/libcxx/test/std/numerics/c.math/hermite.pass.cpp
index 08fbd5c..4283cbf 100644
--- a/libcxx/test/std/numerics/c.math/hermite.pass.cpp
+++ b/libcxx/test/std/numerics/c.math/hermite.pass.cpp
@@ -26,7 +26,16 @@
#include "type_algorithms.h"
-inline constexpr unsigned g_max_n = 128;
+template <class Real>
+constexpr unsigned get_maximal_order() {
+ if constexpr (std::numeric_limits<Real>::is_iec559)
+ return 128;
+ else { // Workaround for z/OS HexFloat.
+ // Note |H_n(x)| < 10^75 for n < 39 and x in sample_points().
+ static_assert(std::numeric_limits<Real>::max_exponent10 == 75);
+ return 39;
+ }
+}
template <class T>
std::array<T, 11> sample_points() {
@@ -203,16 +212,21 @@ std::vector<T> get_roots(unsigned n) {
template <class Real>
void test() {
- { // checks if NaNs are reported correctly (i.e. output == input for input == NaN)
+ if constexpr (
+ std::numeric_limits<Real>::has_quiet_NaN &&
+ std::numeric_limits<
+ Real>::has_signaling_NaN) { // checks if NaNs are reported correctly (i.e. output == input for input == NaN)
using nl = std::numeric_limits<Real>;
for (Real NaN : {nl::quiet_NaN(), nl::signaling_NaN()})
- for (unsigned n = 0; n < g_max_n; ++n)
+ for (unsigned n = 0; n < get_maximal_order<Real>(); ++n)
assert(std::isnan(std::hermite(n, NaN)));
}
- { // simple sample points for n=0..127 should not produce NaNs.
+ if constexpr (std::numeric_limits<Real>::has_quiet_NaN &&
+ std::numeric_limits<
+ Real>::has_signaling_NaN) { // simple sample points for n=0..127 should not produce NaNs.
for (Real x : sample_points<Real>())
- for (unsigned n = 0; n < g_max_n; ++n)
+ for (unsigned n = 0; n < get_maximal_order<Real>(); ++n)
assert(!std::isnan(std::hermite(n, x)));
}
@@ -237,21 +251,21 @@ void test() {
{ // checks std::hermitef for bitwise equality with std::hermite(unsigned, float)
if constexpr (std::is_same_v<Real, float>)
- for (unsigned n = 0; n < g_max_n; ++n)
+ for (unsigned n = 0; n < get_maximal_order<Real>(); ++n)
for (float x : sample_points<float>())
assert(std::hermite(n, x) == std::hermitef(n, x));
}
{ // checks std::hermitel for bitwise equality with std::hermite(unsigned, long double)
if constexpr (std::is_same_v<Real, long double>)
- for (unsigned n = 0; n < g_max_n; ++n)
+ for (unsigned n = 0; n < get_maximal_order<Real>(); ++n)
for (long double x : sample_points<long double>())
assert(std::hermite(n, x) == std::hermitel(n, x));
}
{ // Checks if the characteristic recurrence relation holds: H_{n+1}(x) = 2x H_n(x) - 2n H_{n-1}(x)
for (Real x : sample_points<Real>()) {
- for (unsigned n = 1; n < g_max_n - 1; ++n) {
+ for (unsigned n = 1; n < get_maximal_order<Real>() - 1; ++n) {
Real H_next = std::hermite(n + 1, x);
Real H_next_recurrence = 2 * (x * std::hermite(n, x) - n * std::hermite(n - 1, x));
@@ -289,22 +303,23 @@ void test() {
}
}
- { // check input infinity is handled correctly
+ if constexpr (std::numeric_limits<Real>::has_infinity) { // check input infinity is handled correctly
Real inf = std::numeric_limits<Real>::infinity();
- for (unsigned n = 1; n < g_max_n; ++n) {
+ for (unsigned n = 1; n < get_maximal_order<Real>(); ++n) {
assert(std::hermite(n, +inf) == inf);
assert(std::hermite(n, -inf) == ((n & 1) ? -inf : inf));
}
}
- { // check: if overflow occurs that it is mapped to the correct infinity
+ if constexpr (std::numeric_limits<
+ Real>::has_infinity) { // check: if overflow occurs that it is mapped to the correct infinity
if constexpr (std::is_same_v<Real, double>) {
// Q: Why only double?
// A: The numeric values (e.g. overflow threshold `n`) below are different for other types.
static_assert(sizeof(double) == 8);
- for (unsigned n = 0; n < g_max_n; ++n) {
+ for (unsigned n = 0; n < get_maximal_order<Real>(); ++n) {
// Q: Why n=111 and x=300?
- // A: Both are chosen s.t. the first overlow occurs for some `n<g_max_n`.
+ // A: Both are chosen s.t. the first overlow occurs for some `n<get_maximal_order<Real>()`.
if (n < 111) {
assert(std::isfinite(std::hermite(n, +300.0)));
assert(std::isfinite(std::hermite(n, -300.0)));
@@ -329,7 +344,7 @@ struct TestInt {
template <class Integer>
void operator()() {
// checks that std::hermite(unsigned, Integer) actually wraps std::hermite(unsigned, double)
- for (unsigned n = 0; n < g_max_n; ++n)
+ for (unsigned n = 0; n < get_maximal_order<double>(); ++n)
for (Integer x : {-42, -7, -5, -1, 0, 1, 5, 7, 42})
assert(std::hermite(n, x) == std::hermite(n, static_cast<double>(x)));
}
diff --git a/libcxx/test/std/strings/basic.string/string.cons/move_alloc.pass.cpp b/libcxx/test/std/strings/basic.string/string.cons/move_alloc.pass.cpp
index 7210d67..93a8e59 100644
--- a/libcxx/test/std/strings/basic.string/string.cons/move_alloc.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.cons/move_alloc.pass.cpp
@@ -19,6 +19,7 @@
#include "test_allocator.h"
#include "min_allocator.h"
#include "asan_testing.h"
+#include "operator_hijacker.h"
template <class S>
TEST_CONSTEXPR_CXX20 void test(S s0, const typename S::allocator_type& a) {
@@ -74,6 +75,18 @@ TEST_CONSTEXPR_CXX20 bool test() {
test(S("1"), A());
test(S("1234567890123456789012345678901234567890123456789012345678901234567890"), A());
}
+ {
+ typedef operator_hijacker_allocator<char> A;
+ typedef std::basic_string<char, std::char_traits<char>, A> S;
+#if TEST_STD_VER > 14
+ static_assert((noexcept(S{})), "");
+#elif TEST_STD_VER >= 11
+ static_assert((noexcept(S()) == std::is_nothrow_move_constructible<A>::value), "");
+#endif
+ test(S(), A());
+ test(S("1"), A());
+ test(S("1234567890123456789012345678901234567890123456789012345678901234567890"), A());
+ }
return true;
}
diff --git a/libcxx/test/std/strings/basic.string/string.cons/substr_rvalue.pass.cpp b/libcxx/test/std/strings/basic.string/string.cons/substr_rvalue.pass.cpp
index a9b07a6..9c7c341 100644
--- a/libcxx/test/std/strings/basic.string/string.cons/substr_rvalue.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.cons/substr_rvalue.pass.cpp
@@ -24,6 +24,7 @@
#include "test_allocator.h"
#include "test_macros.h"
#include "asan_testing.h"
+#include "operator_hijacker.h"
#define STR(string) MAKE_CSTRING(typename S::value_type, string)
@@ -212,6 +213,8 @@ constexpr void test_allocators() {
test_string<std::basic_string<CharT, CharTraits, std::allocator<CharT>>>(std::allocator<CharT>{});
test_string<std::basic_string<CharT, CharTraits, min_allocator<CharT>>>(min_allocator<CharT>{});
test_string<std::basic_string<CharT, CharTraits, test_allocator<CharT>>>(test_allocator<CharT>{42});
+ test_string<std::basic_string<CharT, CharTraits, operator_hijacker_allocator<CharT>>>(
+ operator_hijacker_allocator<CharT>{});
}
template <class CharT>
diff --git a/libcxx/test/std/strings/basic.string/string.modifiers/string_swap/swap.pass.cpp b/libcxx/test/std/strings/basic.string/string.modifiers/string_swap/swap.pass.cpp
index 16bdac1..9af110a 100644
--- a/libcxx/test/std/strings/basic.string/string.modifiers/string_swap/swap.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.modifiers/string_swap/swap.pass.cpp
@@ -18,6 +18,7 @@
#include "test_macros.h"
#include "min_allocator.h"
#include "asan_testing.h"
+#include "operator_hijacker.h"
template <class S>
TEST_CONSTEXPR_CXX20 void test(S s1, S s2) {
@@ -62,6 +63,7 @@ TEST_CONSTEXPR_CXX20 bool test() {
#if TEST_STD_VER >= 11
test_string<std::basic_string<char, std::char_traits<char>, min_allocator<char>>>();
test_string<std::basic_string<char, std::char_traits<char>, safe_allocator<char>>>();
+ test_string<std::basic_string<char, std::char_traits<char>, operator_hijacker_allocator<char>>>();
#endif
return true;
diff --git a/libcxx/test/std/time/time.cal/time.cal.ymdlast/time.cal.ymdlast.nonmembers/comparisons.pass.cpp b/libcxx/test/std/time/time.cal/time.cal.ymdlast/time.cal.ymdlast.nonmembers/comparisons.pass.cpp
index e28b6d8..5d2aa48 100644
--- a/libcxx/test/std/time/time.cal/time.cal.ymdlast/time.cal.ymdlast.nonmembers/comparisons.pass.cpp
+++ b/libcxx/test/std/time/time.cal/time.cal.ymdlast/time.cal.ymdlast.nonmembers/comparisons.pass.cpp
@@ -59,8 +59,8 @@ constexpr bool test() {
: std::strong_ordering::greater)));
// same month, different years
- for (int i = 1000; i < 20; ++i)
- for (int j = 1000; j < 20; ++j)
+ for (int i = 1000; i < 1010; ++i)
+ for (int j = 1000; j < 1010; ++j)
assert((testOrder(year_month_day_last{year{i}, month_day_last{January}},
year_month_day_last{year{j}, month_day_last{January}},
i == j ? std::strong_ordering::equal
diff --git a/libcxx/test/support/operator_hijacker.h b/libcxx/test/support/operator_hijacker.h
index a2569da..15add46 100644
--- a/libcxx/test/support/operator_hijacker.h
+++ b/libcxx/test/support/operator_hijacker.h
@@ -10,7 +10,9 @@
#define SUPPORT_OPERATOR_HIJACKER_H
#include <cstddef>
+#include <memory>
#include <functional>
+#include <string>
#include "test_macros.h"
@@ -18,8 +20,8 @@
///
/// The class has some additional operations to be usable in all containers.
struct operator_hijacker {
- bool operator<(const operator_hijacker&) const { return true; }
- bool operator==(const operator_hijacker&) const { return true; }
+ TEST_CONSTEXPR bool operator<(const operator_hijacker&) const { return true; }
+ TEST_CONSTEXPR bool operator==(const operator_hijacker&) const { return true; }
template <typename T>
friend void operator&(T&&) = delete;
@@ -43,4 +45,16 @@ struct std::hash<operator_hijacker> {
std::size_t operator()(const operator_hijacker&) const { return 0; }
};
+template <class T>
+struct operator_hijacker_allocator : std::allocator<T>, operator_hijacker {
+#if TEST_STD_VER <= 17
+ struct rebind {
+ typedef operator_hijacker_allocator<T> other;
+ };
+#endif
+};
+
+template <class CharT>
+struct operator_hijacker_char_traits : std::char_traits<CharT>, operator_hijacker {};
+
#endif // SUPPORT_OPERATOR_HIJACKER_H
diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 3e0efe5..07a7535 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -228,10 +228,16 @@ static void writePltHeaderLong(uint8_t *buf) {
write32(buf + 16, gotPlt - l1 - 8);
}
+// True if we should use Thumb PLTs, which currently require Thumb2, and are
+// only used if the target does not have the ARM ISA.
+static bool useThumbPLTs() {
+ return config->armHasThumb2ISA && !config->armHasArmISA;
+}
+
// The default PLT header requires the .got.plt to be within 128 Mb of the
// .plt in the positive direction.
void ARM::writePltHeader(uint8_t *buf) const {
- if (config->armThumbPLTs) {
+ if (useThumbPLTs()) {
// The instruction sequence for thumb:
//
// 0: b500 push {lr}
@@ -289,7 +295,7 @@ void ARM::writePltHeader(uint8_t *buf) const {
}
void ARM::addPltHeaderSymbols(InputSection &isec) const {
- if (config->armThumbPLTs) {
+ if (useThumbPLTs()) {
addSyntheticLocal("$t", STT_NOTYPE, 0, 0, isec);
addSyntheticLocal("$d", STT_NOTYPE, 12, 0, isec);
} else {
@@ -315,7 +321,7 @@ static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr,
void ARM::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- if (!config->armThumbPLTs) {
+ if (!useThumbPLTs()) {
uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8;
// The PLT entry is similar to the example given in Appendix A of ELF for
@@ -367,7 +373,7 @@ void ARM::writePlt(uint8_t *buf, const Symbol &sym,
}
void ARM::addPltSymbols(InputSection &isec, uint64_t off) const {
- if (config->armThumbPLTs) {
+ if (useThumbPLTs()) {
addSyntheticLocal("$t", STT_NOTYPE, off, 0, isec);
} else {
addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec);
@@ -393,7 +399,7 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
case R_ARM_JUMP24:
// Source is ARM, all PLT entries are ARM so no interworking required.
// Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb).
- assert(!config->armThumbPLTs &&
+ assert(!useThumbPLTs() &&
"If the source is ARM, we should not need Thumb PLTs");
if (s.isFunc() && expr == R_PC && (s.getVA() & 1))
return true;
@@ -407,7 +413,8 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
case R_ARM_THM_JUMP24:
// Source is Thumb, when all PLT entries are ARM interworking is required.
// Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM).
- if ((expr == R_PLT_PC && !config->armThumbPLTs) || (s.isFunc() && (s.getVA() & 1) == 0))
+ if ((expr == R_PLT_PC && !useThumbPLTs()) ||
+ (s.isFunc() && (s.getVA() & 1) == 0))
return true;
[[fallthrough]];
case R_ARM_THM_CALL: {
@@ -675,7 +682,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
// PLT entries are always ARM state so we know we need to interwork.
assert(rel.sym); // R_ARM_THM_CALL is always reached via relocate().
bool bit0Thumb = val & 1;
- bool useThumb = bit0Thumb || config->armThumbPLTs;
+ bool useThumb = bit0Thumb || useThumbPLTs();
bool isBlx = (read16(loc + 2) & 0x1000) == 0;
// lld 10.0 and before always used bit0Thumb when deciding to write a BLX
// even when type not STT_FUNC.
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index a7243cf..0ddac5f 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -221,7 +221,8 @@ struct Config {
bool allowMultipleDefinition;
bool fatLTOObjects;
bool androidPackDynRelocs = false;
- bool armThumbPLTs = false;
+ bool armHasArmISA = false;
+ bool armHasThumb2ISA = false;
bool armHasBlx = false;
bool armHasMovtMovw = false;
bool armJ1J2BranchEncoding = false;
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index 0e4ba06..7adc35f 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -200,10 +200,8 @@ static void updateSupportedARMFeatures(const ARMAttributeParser &attributes) {
attributes.getAttributeValue(ARMBuildAttrs::ARM_ISA_use);
std::optional<unsigned> thumb =
attributes.getAttributeValue(ARMBuildAttrs::THUMB_ISA_use);
- bool noArmISA = !armISA || *armISA == ARMBuildAttrs::Not_Allowed;
- bool hasThumb2 = thumb && *thumb >= ARMBuildAttrs::AllowThumb32;
- if (noArmISA && hasThumb2)
- config->armThumbPLTs = true;
+ config->armHasArmISA |= armISA && *armISA >= ARMBuildAttrs::Allowed;
+ config->armHasThumb2ISA |= thumb && *thumb >= ARMBuildAttrs::AllowThumb32;
}
InputFile::InputFile(Kind k, MemoryBufferRef m)
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 5bee84a..4c0b4df 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -632,6 +632,7 @@ static bool isRelroSection(const OutputSection *sec) {
enum RankFlags {
RF_NOT_ADDR_SET = 1 << 27,
RF_NOT_ALLOC = 1 << 26,
+ RF_HIP_FATBIN = 1 << 19,
RF_PARTITION = 1 << 18, // Partition number (8 bits)
RF_LARGE_ALT = 1 << 15,
RF_WRITE = 1 << 14,
@@ -729,6 +730,15 @@ unsigned elf::getSectionRank(OutputSection &osec) {
if (osec.type == SHT_NOBITS)
rank |= RF_BSS;
+ // Put HIP fatbin related sections further away to avoid wasting relocation
+ // range to jump over them. Make sure .hip_fatbin is the furthest.
+ if (osec.name == ".hipFatBinSegment")
+ rank |= RF_HIP_FATBIN;
+ if (osec.name == ".hip_gpubin_handle")
+ rank |= RF_HIP_FATBIN | 2;
+ if (osec.name == ".hip_fatbin")
+ rank |= RF_HIP_FATBIN | RF_WRITE | 3;
+
// Some architectures have additional ordering restrictions for sections
// within the same PT_LOAD.
if (config->emachine == EM_PPC64) {
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index f3d2a93..e5cba4d 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -1042,20 +1042,36 @@ static bool shouldAdhocSignByDefault(Architecture arch, PlatformType platform) {
platform == PLATFORM_XROS_SIMULATOR;
}
-static bool dataConstDefault(const InputArgList &args) {
- static const std::array<std::pair<PlatformType, VersionTuple>, 6> minVersion =
- {{{PLATFORM_MACOS, VersionTuple(10, 15)},
- {PLATFORM_IOS, VersionTuple(13, 0)},
- {PLATFORM_TVOS, VersionTuple(13, 0)},
- {PLATFORM_WATCHOS, VersionTuple(6, 0)},
- {PLATFORM_XROS, VersionTuple(1, 0)},
- {PLATFORM_BRIDGEOS, VersionTuple(4, 0)}}};
- PlatformType platform = removeSimulator(config->platformInfo.target.Platform);
- auto it = llvm::find_if(minVersion,
+template <unsigned long N>
+using MinVersions = std::array<std::pair<PlatformType, VersionTuple>, N>;
+
+/// Returns true if the platform is greater than the min version.
+/// Returns false if the platform does not exist.
+template <unsigned long N>
+static bool greaterEqMinVersion(const MinVersions<N> &minVersions,
+ bool ignoreSimulator) {
+ PlatformType platform = config->platformInfo.target.Platform;
+ if (ignoreSimulator)
+ platform = removeSimulator(platform);
+ auto it = llvm::find_if(minVersions,
[&](const auto &p) { return p.first == platform; });
- if (it != minVersion.end())
- if (config->platformInfo.target.MinDeployment < it->second)
- return false;
+ if (it != minVersions.end())
+ if (config->platformInfo.target.MinDeployment >= it->second)
+ return true;
+ return false;
+}
+
+static bool dataConstDefault(const InputArgList &args) {
+ static const MinVersions<6> minVersion = {{
+ {PLATFORM_MACOS, VersionTuple(10, 15)},
+ {PLATFORM_IOS, VersionTuple(13, 0)},
+ {PLATFORM_TVOS, VersionTuple(13, 0)},
+ {PLATFORM_WATCHOS, VersionTuple(6, 0)},
+ {PLATFORM_XROS, VersionTuple(1, 0)},
+ {PLATFORM_BRIDGEOS, VersionTuple(4, 0)},
+ }};
+ if (!greaterEqMinVersion(minVersion, true))
+ return false;
switch (config->outputType) {
case MH_EXECUTE:
@@ -1106,30 +1122,18 @@ static bool shouldEmitChainedFixups(const InputArgList &args) {
if (requested)
return true;
- static const std::array<std::pair<PlatformType, VersionTuple>, 9> minVersion =
- {{
- {PLATFORM_IOS, VersionTuple(13, 4)},
- {PLATFORM_IOSSIMULATOR, VersionTuple(16, 0)},
- {PLATFORM_MACOS, VersionTuple(13, 0)},
- {PLATFORM_TVOS, VersionTuple(14, 0)},
- {PLATFORM_TVOSSIMULATOR, VersionTuple(15, 0)},
- {PLATFORM_WATCHOS, VersionTuple(7, 0)},
- {PLATFORM_WATCHOSSIMULATOR, VersionTuple(8, 0)},
- {PLATFORM_XROS, VersionTuple(1, 0)},
- {PLATFORM_XROS_SIMULATOR, VersionTuple(1, 0)},
- }};
- PlatformType platform = config->platformInfo.target.Platform;
- auto it = llvm::find_if(minVersion,
- [&](const auto &p) { return p.first == platform; });
-
- // We don't know the versions for other platforms, so default to disabled.
- if (it == minVersion.end())
- return false;
-
- if (it->second > config->platformInfo.target.MinDeployment)
- return false;
-
- return true;
+ static const MinVersions<9> minVersion = {{
+ {PLATFORM_IOS, VersionTuple(13, 4)},
+ {PLATFORM_IOSSIMULATOR, VersionTuple(16, 0)},
+ {PLATFORM_MACOS, VersionTuple(13, 0)},
+ {PLATFORM_TVOS, VersionTuple(14, 0)},
+ {PLATFORM_TVOSSIMULATOR, VersionTuple(15, 0)},
+ {PLATFORM_WATCHOS, VersionTuple(7, 0)},
+ {PLATFORM_WATCHOSSIMULATOR, VersionTuple(8, 0)},
+ {PLATFORM_XROS, VersionTuple(1, 0)},
+ {PLATFORM_XROS_SIMULATOR, VersionTuple(1, 0)},
+ }};
+ return greaterEqMinVersion(minVersion, false);
}
static bool shouldEmitRelativeMethodLists(const InputArgList &args) {
@@ -1140,12 +1144,20 @@ static bool shouldEmitRelativeMethodLists(const InputArgList &args) {
if (arg && arg->getOption().getID() == OPT_no_objc_relative_method_lists)
return false;
- // TODO: If no flag is specified, don't default to false, but instead:
- // - default false on < ios14
- // - default true on >= ios14
- // For now, until this feature is confirmed stable, default to false if no
- // flag is explicitly specified
- return false;
+ // If no flag is specified, enable this on newer versions by default.
+ // The min versions is taken from
+ // ld64(https://github.com/apple-oss-distributions/ld64/blob/47f477cb721755419018f7530038b272e9d0cdea/src/ld/ld.hpp#L310)
+ // to mimic to operation of ld64
+ // [here](https://github.com/apple-oss-distributions/ld64/blob/47f477cb721755419018f7530038b272e9d0cdea/src/ld/Options.cpp#L6085-L6101)
+ static const MinVersions<6> minVersion = {{
+ {PLATFORM_MACOS, VersionTuple(10, 16)},
+ {PLATFORM_IOS, VersionTuple(14, 0)},
+ {PLATFORM_WATCHOS, VersionTuple(7, 0)},
+ {PLATFORM_TVOS, VersionTuple(14, 0)},
+ {PLATFORM_BRIDGEOS, VersionTuple(5, 0)},
+ {PLATFORM_XROS, VersionTuple(1, 0)},
+ }};
+ return greaterEqMinVersion(minVersion, true);
}
void SymbolPatterns::clear() {
diff --git a/lld/cmake/modules/AddLLD.cmake b/lld/cmake/modules/AddLLD.cmake
index 9f2684b..34f9974 100644
--- a/lld/cmake/modules/AddLLD.cmake
+++ b/lld/cmake/modules/AddLLD.cmake
@@ -44,7 +44,7 @@ macro(add_lld_tool name)
AND (NOT LLVM_DISTRIBUTION_COMPONENTS OR ${name} IN_LIST LLVM_DISTRIBUTION_COMPONENTS)
)
set(get_obj_args ${ARGN})
- list(FILTER get_obj_args EXCLUDE REGEX "^SUPPORT_PLUGINS$")
+ list(FILTER get_obj_args EXCLUDE REGEX "^(SUPPORT_PLUGINS|EXPORT_SYMBOLS_FOR_PLUGINS)$")
generate_llvm_objects(${name} ${get_obj_args})
add_custom_target(${name} DEPENDS llvm-driver)
else()
diff --git a/lld/test/ELF/arm-mixed-plts.s b/lld/test/ELF/arm-mixed-plts.s
new file mode 100644
index 0000000..801de70
--- /dev/null
+++ b/lld/test/ELF/arm-mixed-plts.s
@@ -0,0 +1,44 @@
+# REQUIRES: arm
+
+# RUN: rm -rf %t && split-file %s %t
+# RUN: llvm-mc -filetype=obj -arm-add-build-attributes -triple=armv7a-none-linux-gnueabi %t/a.s -o %t1.o
+# RUN: llvm-mc -filetype=obj -arm-add-build-attributes -triple=armv7a-none-linux-gnueabi %t/b.s -o %t2.o
+# RUN: ld.lld -shared %t1.o %t2.o -o %t.so
+# RUN: llvm-objdump -d %t.so | FileCheck %s
+
+## Check that, when the input is a mixture of objects which can and cannot use
+## the ARM ISA, we use the default ARM PLT sequences.
+
+# CHECK: <.plt>:
+# CHECK-NEXT: e52de004 str lr, [sp, #-0x4]!
+# CHECK-NEXT: e28fe600 add lr, pc, #0, #12
+# CHECK-NEXT: e28eea20 add lr, lr, #32, #20
+# CHECK-NEXT: e5bef084 ldr pc, [lr, #0x84]!
+# CHECK-NEXT: d4 d4 d4 d4 .word 0xd4d4d4d4
+# CHECK-NEXT: d4 d4 d4 d4 .word 0xd4d4d4d4
+# CHECK-NEXT: d4 d4 d4 d4 .word 0xd4d4d4d4
+# CHECK-NEXT: d4 d4 d4 d4 .word 0xd4d4d4d4
+# CHECK-NEXT: e28fc600 add r12, pc, #0, #12
+# CHECK-NEXT: e28cca20 add r12, r12, #32, #20
+# CHECK-NEXT: e5bcf06c ldr pc, [r12, #0x6c]!
+# CHECK-NEXT: d4 d4 d4 d4 .word 0xd4d4d4d4
+
+#--- a.s
+ .globl foo
+ .type foo, %function
+ .globl bar
+ .type bar, %function
+
+ .thumb
+foo:
+ bl bar
+ bx lr
+
+#--- b.s
+ .eabi_attribute Tag_ARM_ISA_use, 0
+
+ .arm
+ .globl bar
+ .type bar, %function
+bar:
+ bx lr
diff --git a/lld/test/ELF/hip-section-layout.s b/lld/test/ELF/hip-section-layout.s
new file mode 100644
index 0000000..c76df50
--- /dev/null
+++ b/lld/test/ELF/hip-section-layout.s
@@ -0,0 +1,39 @@
+# REQUIRES: x86
+## Test HIP specific sections layout.
+
+# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux --defsym=HIP_SECTIONS=1 --defsym=NON_HIP_SECTIONS=1 %s -o %t.o
+# RUN: ld.lld %t.o -o %t.out
+# RUN: llvm-readobj --sections %t.out | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux --defsym=NON_HIP_SECTIONS=1 %s -o %t.1.o
+# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux --defsym=HIP_SECTIONS=1 %s -o %t.2.o
+# RUN: ld.lld %t.1.o %t.2.o -o %t.s.out
+# RUN: llvm-readobj --sections %t.s.out | FileCheck %s
+
+.ifdef HIP_SECTIONS
+.section .hipFatBinSegment,"aw",@progbits; .space 1
+.section .hip_gpubin_handle,"aw",@progbits; .space 1
+.section .hip_fatbin,"a",@progbits; .space 1
+.endif
+
+.ifdef NON_HIP_SECTIONS
+.global _start
+.text
+_start:
+.section .bss,"aw",@nobits; .space 1
+.section .debug_info,"",@progbits
+.section .debug_line,"",@progbits
+.section .debug_str,"MS",@progbits,1
+.endif
+
+# Check that the HIP sections are placed towards the end but before non allocated sections
+
+// CHECK: Name: .text
+// CHECK: Name: .bss
+// CHECK: Name: .hipFatBinSegment
+// CHECK: Name: .hip_gpubin_handle
+// CHECK: Name: .hip_fatbin
+// CHECK: Name: .debug_info
+// CHECK: Name: .debug_line
+// CHECK: Name: .debug_str
+
diff --git a/lld/test/MachO/lto-object-path.ll b/lld/test/MachO/lto-object-path.ll
index 93e4e2a..ae2fa11 100644
--- a/lld/test/MachO/lto-object-path.ll
+++ b/lld/test/MachO/lto-object-path.ll
@@ -12,11 +12,11 @@
; RUN: %lld %t/test-obj.o -o %t/test-obj -object_path_lto %t/lto-temps-obj
; RUN: llvm-nm -pa %t/test-obj | FileCheck %s --check-prefixes CHECK,NOLTOFILES -DDIR=%t
-; RUN: ZERO_AR_DATE=0 %lld %t/test.o -o %t/test -object_path_lto %t/lto-temps
+; RUN: env ZERO_AR_DATE=0 %lld %t/test.o -o %t/test -object_path_lto %t/lto-temps
; RUN: llvm-nm -pa %t/test | FileCheck %s --check-prefixes CHECK,OBJPATH-DIR -DDIR=%t/lto-temps
;; Ensure object path is still used if the cache is used
-; RUN: ZERO_AR_DATE=0 %lld %t/test.o -o %t/test -object_path_lto %t/lto-temps -prune_interval_lto 20 -cache_path_lto %t/cache --thinlto-cache-policy=cache_size_files=1:cache_size_bytes=10
+; RUN: env ZERO_AR_DATE=0 %lld %t/test.o -o %t/test -object_path_lto %t/lto-temps -prune_interval_lto 20 -cache_path_lto %t/cache --thinlto-cache-policy=cache_size_files=1:cache_size_bytes=10
; RUN: llvm-nm -pa %t/test | FileCheck %s --check-prefixes CHECK,OBJPATH-DIR -DDIR=%t/lto-temps
;; And that dsymutil can read the result
; RUN: dsymutil -f -o - %t/test | llvm-dwarfdump - | FileCheck %s --check-prefix=DSYM
@@ -33,7 +33,7 @@
;; check that the object path can be an existing file
; RUN: touch %t/lto-tmp.o
-; RUN: ZERO_AR_DATE=0 %lld %t/test-nonthin.o -o %t/test -object_path_lto %t/lto-tmp.o
+; RUN: env ZERO_AR_DATE=0 %lld %t/test-nonthin.o -o %t/test -object_path_lto %t/lto-tmp.o
; RUN: llvm-nm -pa %t/test | FileCheck %s --check-prefixes CHECK,OBJPATH-FILE -DFILE=%t/lto-tmp.o
; RUN: llvm-otool -l %t/lto-tmp.o | FileCheck %s --check-prefixes=MINOS
diff --git a/lld/test/MachO/objc-category-conflicts.s b/lld/test/MachO/objc-category-conflicts.s
index eb4c0ad..e64a9ee 100644
--- a/lld/test/MachO/objc-category-conflicts.s
+++ b/lld/test/MachO/objc-category-conflicts.s
@@ -1,3 +1,6 @@
+# `-no_objc_relative_method_lists` needs to be explicitly added to this test to avoid crashing after `-objc_relative_method_lists` was made default.
+# TODO: Make this test compatible with default `-objc_relative_method_lists` and remove the `-no_objc_relative_method_lists` flag. Issue #101419
+
# REQUIRES: x86
# RUN: rm -rf %t; split-file %s %t
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-macos11.0 -I %t %t/cat1.s -o %t/cat1.o
@@ -10,31 +13,31 @@
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-macos11.0 -I %t %t/cat2.s --defsym MAKE_LOAD_METHOD=1 -o %t/cat2-with-load.o
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-macos11.0 -I %t %t/klass.s --defsym MAKE_LOAD_METHOD=1 -o %t/klass-with-load.o
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-macos11.0 -I %t %t/klass-with-no-rodata.s -o %t/klass-with-no-rodata.o
-# RUN: %lld -dylib -lobjc %t/klass.o -o %t/libklass.dylib
+# RUN: %lld -no_objc_relative_method_lists -dylib -lobjc %t/klass.o -o %t/libklass.dylib
-# RUN: %no-fatal-warnings-lld --check-category-conflicts -dylib -lobjc %t/klass.o %t/cat1.o %t/cat2.o -o \
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists --check-category-conflicts -dylib -lobjc %t/klass.o %t/cat1.o %t/cat2.o -o \
# RUN: /dev/null 2>&1 | FileCheck %s --check-prefixes=CATCLS,CATCAT
-# RUN: %no-fatal-warnings-lld --check-category-conflicts -dylib -lobjc %t/libklass.dylib %t/cat1.o \
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists --check-category-conflicts -dylib -lobjc %t/libklass.dylib %t/cat1.o \
# RUN: %t/cat2.o -o /dev/null 2>&1 | FileCheck %s --check-prefix=CATCAT
-# RUN: %no-fatal-warnings-lld --check-category-conflicts -dylib -lobjc %t/klass_w_sym.o %t/cat1_w_sym.o %t/cat2_w_sym.o -o \
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists --check-category-conflicts -dylib -lobjc %t/klass_w_sym.o %t/cat1_w_sym.o %t/cat2_w_sym.o -o \
# RUN: /dev/null 2>&1 | FileCheck %s --check-prefixes=CATCLS_W_SYM,CATCAT_W_SYM
-# RUN: %no-fatal-warnings-lld --check-category-conflicts -dylib -lobjc %t/libklass.dylib %t/cat1_w_sym.o \
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists --check-category-conflicts -dylib -lobjc %t/libklass.dylib %t/cat1_w_sym.o \
# RUN: %t/cat2_w_sym.o -o /dev/null 2>&1 | FileCheck %s --check-prefix=CATCAT_W_SYM
## Check that we don't emit spurious warnings around the +load method while
## still emitting the other warnings. Note that we have made separate
## `*-with-load.s` files for ease of comparison with ld64; ld64 will not warn
## at all if multiple +load methods are present.
-# RUN: %no-fatal-warnings-lld --check-category-conflicts -dylib -lobjc %t/klass-with-load.o \
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists --check-category-conflicts -dylib -lobjc %t/klass-with-load.o \
# RUN: %t/cat1-with-load.o %t/cat2-with-load.o -o /dev/null 2>&1 | \
# RUN: FileCheck %s --check-prefixes=CATCLS,CATCAT --implicit-check-not '+load'
## Regression test: Check that we don't crash.
-# RUN: %no-fatal-warnings-lld --check-category-conflicts -dylib -lobjc %t/klass-with-no-rodata.o -o /dev/null
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists --check-category-conflicts -dylib -lobjc %t/klass-with-no-rodata.o -o /dev/null
## Check that we don't emit any warnings without --check-category-conflicts.
-# RUN: %no-fatal-warnings-lld -dylib -lobjc %t/klass.o %t/cat1.o %t/cat2.o -o \
+# RUN: %no-fatal-warnings-lld -no_objc_relative_method_lists -dylib -lobjc %t/klass.o %t/cat1.o %t/cat2.o -o \
# RUN: /dev/null 2>&1 | FileCheck %s --implicit-check-not 'warning' --allow-empty
# CATCLS: warning: method '+s1' has conflicting definitions:
diff --git a/lld/test/MachO/objc-category-merging-complete-test.s b/lld/test/MachO/objc-category-merging-complete-test.s
index cb11207..95b8096 100644
--- a/lld/test/MachO/objc-category-merging-complete-test.s
+++ b/lld/test/MachO/objc-category-merging-complete-test.s
@@ -1,3 +1,6 @@
+# `-no_objc_relative_method_lists` needs to be explicitly added to this test to avoid crashing after `-objc_relative_method_lists` was made default.
+# TODO: Make this test compatible with default `-objc_relative_method_lists` and remove the `-no_objc_relative_method_lists` flag. Issue #101419
+
# REQUIRES: aarch64
# RUN: rm -rf %t; split-file %s %t && cd %t
@@ -7,10 +10,10 @@
# RUN: %lld -arch arm64 a64_file1.o -o a64_file1.dylib -dylib
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o a64_file2.o a64_file2.s
-# RUN: %lld -arch arm64 -o a64_file2_no_merge.exe a64_file1.dylib a64_file2.o
-# RUN: %lld -arch arm64 -o a64_file2_no_merge_v2.exe a64_file1.dylib a64_file2.o -no_objc_category_merging
-# RUN: %lld -arch arm64 -o a64_file2_no_merge_v3.exe a64_file1.dylib a64_file2.o -objc_category_merging -no_objc_category_merging
-# RUN: %lld -arch arm64 -o a64_file2_merge.exe -objc_category_merging a64_file1.dylib a64_file2.o
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -o a64_file2_no_merge.exe a64_file1.dylib a64_file2.o
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -o a64_file2_no_merge_v2.exe a64_file1.dylib a64_file2.o -no_objc_category_merging
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -o a64_file2_no_merge_v3.exe a64_file1.dylib a64_file2.o -objc_category_merging -no_objc_category_merging
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -o a64_file2_merge.exe -objc_category_merging a64_file1.dylib a64_file2.o
# RUN: llvm-objdump --objc-meta-data --macho a64_file2_no_merge.exe | FileCheck %s --check-prefixes=NO_MERGE_CATS
# RUN: llvm-objdump --objc-meta-data --macho a64_file2_no_merge_v2.exe | FileCheck %s --check-prefixes=NO_MERGE_CATS
@@ -18,7 +21,7 @@
# RUN: llvm-objdump --objc-meta-data --macho a64_file2_merge.exe | FileCheck %s --check-prefixes=MERGE_CATS
############ Test merging multiple categories into the base class ############
-# RUN: %lld -arch arm64 -o a64_file2_merge_into_class.exe -objc_category_merging a64_file1.o a64_file2.o
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -o a64_file2_merge_into_class.exe -objc_category_merging a64_file1.o a64_file2.o
# RUN: llvm-objdump --objc-meta-data --macho a64_file2_merge_into_class.exe | FileCheck %s --check-prefixes=MERGE_CATS_CLS
diff --git a/lld/test/MachO/objc-category-merging-erase-objc-name-test.s b/lld/test/MachO/objc-category-merging-erase-objc-name-test.s
index aeb2395..869632d 100644
--- a/lld/test/MachO/objc-category-merging-erase-objc-name-test.s
+++ b/lld/test/MachO/objc-category-merging-erase-objc-name-test.s
@@ -1,3 +1,6 @@
+# `-no_objc_relative_method_lists` needs to be explicitly added to this test to avoid crashing after `-objc_relative_method_lists` was made default.
+# TODO: Make this test compatible with default `-objc_relative_method_lists` and remove the `-no_objc_relative_method_lists` flag. Issue #101419
+
; REQUIRES: aarch64
; Here we test that if we defined a protocol MyTestProtocol and also a category MyTestProtocol
@@ -5,7 +8,7 @@
; delete the 'MyTestProtocol' name
; RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o %T/erase-objc-name.o %s
-; RUN: %lld -arch arm64 -dylib -o %T/erase-objc-name.dylib %T/erase-objc-name.o -objc_category_merging
+; RUN: %lld -no_objc_relative_method_lists -arch arm64 -dylib -o %T/erase-objc-name.dylib %T/erase-objc-name.o -objc_category_merging
; RUN: llvm-objdump --objc-meta-data --macho %T/erase-objc-name.dylib | FileCheck %s --check-prefixes=MERGE_CATS
; === Check merge categories enabled ===
diff --git a/lld/test/MachO/objc-category-merging-minimal.s b/lld/test/MachO/objc-category-merging-minimal.s
index 5274933..5cfe5e0 100644
--- a/lld/test/MachO/objc-category-merging-minimal.s
+++ b/lld/test/MachO/objc-category-merging-minimal.s
@@ -1,3 +1,6 @@
+# `-no_objc_relative_method_lists` needs to be explicitly added to this test to avoid crashing after `-objc_relative_method_lists` was made default.
+# TODO: Make this test compatible with default `-objc_relative_method_lists` and remove the `-no_objc_relative_method_lists` flag. Issue #101419
+
# REQUIRES: aarch64
# RUN: rm -rf %t; split-file %s %t && cd %t
@@ -9,7 +12,7 @@
## Create our main testing dylib - linking against the fake dylib above
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o merge_cat_minimal.o merge_cat_minimal.s
# RUN: %lld -arch arm64 -dylib -o merge_cat_minimal_no_merge.dylib a64_fakedylib.dylib merge_cat_minimal.o
-# RUN: %lld -arch arm64 -dylib -o merge_cat_minimal_merge.dylib -objc_category_merging a64_fakedylib.dylib merge_cat_minimal.o
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -dylib -o merge_cat_minimal_merge.dylib -objc_category_merging a64_fakedylib.dylib merge_cat_minimal.o
## Now verify that the flag caused category merging to happen appropriatelly
# RUN: llvm-objdump --objc-meta-data --macho merge_cat_minimal_no_merge.dylib | FileCheck %s --check-prefixes=NO_MERGE_CATS
@@ -17,7 +20,7 @@
############ Test merging multiple categories into the base class ############
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o merge_base_class_minimal.o merge_base_class_minimal.s
-# RUN: %lld -arch arm64 -dylib -o merge_base_class_minimal_yes_merge.dylib -objc_category_merging merge_base_class_minimal.o merge_cat_minimal.o
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -dylib -o merge_base_class_minimal_yes_merge.dylib -objc_category_merging merge_base_class_minimal.o merge_cat_minimal.o
# RUN: %lld -arch arm64 -dylib -o merge_base_class_minimal_no_merge.dylib merge_base_class_minimal.o merge_cat_minimal.o
# RUN: llvm-objdump --objc-meta-data --macho merge_base_class_minimal_no_merge.dylib | FileCheck %s --check-prefixes=NO_MERGE_INTO_BASE
@@ -25,7 +28,7 @@
############ Test merging swift category into the base class ############
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o MyBaseClassSwiftExtension.o MyBaseClassSwiftExtension.s
-# RUN: %lld -arch arm64 -dylib -o merge_base_class_swift_minimal_yes_merge.dylib -objc_category_merging MyBaseClassSwiftExtension.o merge_base_class_minimal.o
+# RUN: %lld -no_objc_relative_method_lists -arch arm64 -dylib -o merge_base_class_swift_minimal_yes_merge.dylib -objc_category_merging MyBaseClassSwiftExtension.o merge_base_class_minimal.o
# RUN: llvm-objdump --objc-meta-data --macho merge_base_class_swift_minimal_yes_merge.dylib | FileCheck %s --check-prefixes=YES_MERGE_INTO_BASE_SWIFT
#### Check merge categories enabled ###
diff --git a/lld/test/MachO/objc-relative-method-lists-simple.s b/lld/test/MachO/objc-relative-method-lists-simple.s
index 9f54b5a..c8646f5 100644
--- a/lld/test/MachO/objc-relative-method-lists-simple.s
+++ b/lld/test/MachO/objc-relative-method-lists-simple.s
@@ -3,20 +3,28 @@
# RUN: rm -rf %t; split-file %s %t && cd %t
## Compile a64_rel_dylib.o
-# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o a64_rel_dylib.o a64_simple_class.s
+# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos10.15 -o a64_rel_dylib.o a64_simple_class.s
## Test arm64 + relative method lists
-# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -objc_relative_method_lists
+# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64
# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_REL
## Test arm64 + relative method lists + dead-strip
-# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -objc_relative_method_lists -dead_strip
+# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -dead_strip
# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_REL
## Test arm64 + traditional method lists (no relative offsets)
# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -no_objc_relative_method_lists
# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_NO_REL
+## Test arm64 + relative method lists by explicitly adding `-objc_relative_method_lists`.
+# RUN: %lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -platform_version macOS 10.15 10.15 -objc_relative_method_lists
+# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_REL
+
+## Test arm64 + no relative method lists by default.
+# RUN: %lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -platform_version macOS 10.15 10.15
+# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_NO_REL
+
CHK_REL: Contents of (__DATA_CONST,__objc_classlist) section
CHK_REL-NEXT: _OBJC_CLASS_$_MyClass
@@ -125,7 +133,7 @@ CHK_NO_REL-NEXT: imp +[MyClass class_method_02]
.include "objc-macros.s"
.section __TEXT,__text,regular,pure_instructions
-.build_version macos, 11, 0
+.build_version macos, 10, 15
.objc_selector_def "-[MyClass instance_method_00]"
.objc_selector_def "-[MyClass instance_method_01]"
diff --git a/lld/test/wasm/lto/stub-library.s b/lld/test/wasm/lto/stub-library.s
index 20e2a62..36d6b7b 100644
--- a/lld/test/wasm/lto/stub-library.s
+++ b/lld/test/wasm/lto/stub-library.s
@@ -1,12 +1,21 @@
+## The function `bar` is declared in stub.so and depends on `foo` which is
+## defined in an LTO object. We also test the case where the LTO object is
+## with an archive file.
+## This verifies that stub library dependencies (which are required exports) can
+## be defined in LTO objects, even when they are within archive files.
+
# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown -o %t.o %s
-# RUN: llvm-as %S/Inputs/foo.ll -o %t1.o
-# RUN: wasm-ld %t.o %t1.o %p/Inputs/stub.so -o %t.wasm
+# RUN: mkdir -p %t
+# RUN: llvm-as %S/Inputs/foo.ll -o %t/foo.o
+# RUN: wasm-ld %t.o %t/foo.o %p/Inputs/stub.so -o %t.wasm
# RUN: obj2yaml %t.wasm | FileCheck %s
-# The function `bar` is declared in stub.so and depends on `foo`, which happens
-# be in an LTO object.
-# This verifies that stub library dependencies (required exports) can be defined
-# in LTO objects.
+## Run the same test but with foo.o inside of an archive file.
+# RUN: rm -f %t/libfoo.a
+# RUN: llvm-ar rcs %t/libfoo.a %t/foo.o
+# RUN: wasm-ld %t.o %t/libfoo.a %p/Inputs/stub.so -o %t2.wasm
+# RUN: obj2yaml %t2.wasm | FileCheck %s
+
.functype bar () -> ()
.globl _start
diff --git a/lld/tools/lld/CMakeLists.txt b/lld/tools/lld/CMakeLists.txt
index 8498a91..630d38f 100644
--- a/lld/tools/lld/CMakeLists.txt
+++ b/lld/tools/lld/CMakeLists.txt
@@ -8,8 +8,8 @@ add_lld_tool(lld
SUPPORT_PLUGINS
GENERATE_DRIVER
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
-export_executable_symbols_for_plugins(lld)
function(lld_target_link_libraries target type)
if (TARGET obj.${target})
diff --git a/lld/wasm/Driver.cpp b/lld/wasm/Driver.cpp
index 8c83d17..5368fe7 100644
--- a/lld/wasm/Driver.cpp
+++ b/lld/wasm/Driver.cpp
@@ -949,6 +949,17 @@ static void processStubLibrariesPreLTO() {
auto* needed = symtab->find(dep);
if (needed ) {
needed->isUsedInRegularObj = true;
+ // Like with handleLibcall we have to extract any LTO archive
+ // members that might need to be exported due to stub library
+ // symbols being referenced. Without this the LTO object could be
+ // extracted during processStubLibraries, which is too late since
+ // LTO has already being performed at that point.
+ if (needed->isLazy() && isa<BitcodeFile>(needed->getFile())) {
+ if (!config->whyExtract.empty())
+ ctx.whyExtractRecords.emplace_back(toString(stub_file),
+ needed->getFile(), *needed);
+ cast<LazySymbol>(needed)->extract();
+ }
}
}
}
diff --git a/lld/wasm/InputFiles.cpp b/lld/wasm/InputFiles.cpp
index f3f0ef9..706ee25 100644
--- a/lld/wasm/InputFiles.cpp
+++ b/lld/wasm/InputFiles.cpp
@@ -744,7 +744,7 @@ Symbol *ObjFile::createUndefined(const WasmSymbol &sym, bool isCalledDirectly) {
llvm_unreachable("unknown symbol kind");
}
-StringRef strip(StringRef s) { return s.trim(' '); }
+static StringRef strip(StringRef s) { return s.trim(' '); }
void StubFile::parse() {
bool first = true;
@@ -761,7 +761,7 @@ void StubFile::parse() {
}
// Lines starting with # are considered comments
- if (line.starts_with("#"))
+ if (line.starts_with("#") || !line.size())
continue;
StringRef sym;
diff --git a/lldb/include/lldb/Host/Config.h.cmake b/lldb/include/lldb/Host/Config.h.cmake
index 3defa45..9e53853 100644
--- a/lldb/include/lldb/Host/Config.h.cmake
+++ b/lldb/include/lldb/Host/Config.h.cmake
@@ -33,6 +33,8 @@
#cmakedefine01 LLDB_ENABLE_LZMA
+#cmakedefine01 LLVM_ENABLE_CURL
+
#cmakedefine01 LLDB_ENABLE_CURSES
#cmakedefine01 CURSES_HAVE_NCURSES_CURSES_H
diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h
index a63d662..cf16fbc 100644
--- a/lldb/include/lldb/Target/Process.h
+++ b/lldb/include/lldb/Target/Process.h
@@ -1314,10 +1314,16 @@ public:
size_t GetThreadStatus(Stream &ostrm, bool only_threads_with_stop_reason,
uint32_t start_frame, uint32_t num_frames,
- uint32_t num_frames_with_source,
- bool stop_format);
+ uint32_t num_frames_with_source, bool stop_format);
- void SendAsyncInterrupt();
+ /// Send an async interrupt request.
+ ///
+ /// If \a thread is specified the async interrupt stop will be attributed to
+ /// the specified thread.
+ ///
+ /// \param[in] thread
+ /// The thread the async interrupt will be attributed to.
+ void SendAsyncInterrupt(Thread *thread = nullptr);
// Notify this process class that modules got loaded.
//
@@ -2867,6 +2873,17 @@ protected:
return std::nullopt;
}
+ /// Handle thread specific async interrupt and return the original thread
+ /// that requested the async interrupt. It can be null if original thread
+ /// has exited.
+ ///
+ /// \param[in] description
+ /// Returns the stop reason description of the async interrupt.
+ virtual lldb::ThreadSP
+ HandleThreadAsyncInterrupt(uint8_t signo, const std::string &description) {
+ return lldb::ThreadSP();
+ }
+
lldb::StateType GetPrivateState();
/// The "private" side of resuming a process. This doesn't alter the state
@@ -3153,6 +3170,11 @@ protected:
// Resume will only request a resume, using this
// flag to check.
+ lldb::tid_t m_interrupt_tid; /// The tid of the thread that issued the async
+ /// interrupt, used by thread plan timeout. It
+ /// can be LLDB_INVALID_THREAD_ID to indicate
+ /// user level async interrupt.
+
/// This is set at the beginning of Process::Finalize() to stop functions
/// from looking up or creating things during or after a finalize call.
std::atomic<bool> m_finalizing;
diff --git a/lldb/include/lldb/Target/StopInfo.h b/lldb/include/lldb/Target/StopInfo.h
index d1848fc..fae9036 100644
--- a/lldb/include/lldb/Target/StopInfo.h
+++ b/lldb/include/lldb/Target/StopInfo.h
@@ -123,6 +123,10 @@ public:
const char *description = nullptr,
std::optional<int> code = std::nullopt);
+ static lldb::StopInfoSP
+ CreateStopReasonWithInterrupt(Thread &thread, int signo,
+ const char *description);
+
static lldb::StopInfoSP CreateStopReasonToTrace(Thread &thread);
static lldb::StopInfoSP
diff --git a/lldb/include/lldb/Target/Thread.h b/lldb/include/lldb/Target/Thread.h
index 2ff1f50..aacc59c 100644
--- a/lldb/include/lldb/Target/Thread.h
+++ b/lldb/include/lldb/Target/Thread.h
@@ -58,6 +58,8 @@ public:
bool GetStepOutAvoidsNoDebug() const;
uint64_t GetMaxBacktraceDepth() const;
+
+ uint64_t GetSingleThreadPlanTimeout() const;
};
class Thread : public std::enable_shared_from_this<Thread>,
diff --git a/lldb/include/lldb/Target/ThreadPlan.h b/lldb/include/lldb/Target/ThreadPlan.h
index bf68a42..c336b6b 100644
--- a/lldb/include/lldb/Target/ThreadPlan.h
+++ b/lldb/include/lldb/Target/ThreadPlan.h
@@ -302,7 +302,8 @@ public:
eKindStepInRange,
eKindRunToAddress,
eKindStepThrough,
- eKindStepUntil
+ eKindStepUntil,
+ eKindSingleThreadTimeout,
};
virtual ~ThreadPlan();
@@ -395,6 +396,11 @@ public:
bool IsControllingPlan() { return m_is_controlling_plan; }
+ // Returns true if this plan is a leaf plan, meaning the plan will be popped
+ // during each stop if it does not explain the stop and re-pushed before
+ // resuming to stay at the top of the stack.
+ virtual bool IsLeafPlan() { return false; }
+
bool SetIsControllingPlan(bool value) {
bool old_value = m_is_controlling_plan;
m_is_controlling_plan = value;
@@ -483,6 +489,8 @@ public:
return m_takes_iteration_count;
}
+ virtual lldb::StateType GetPlanRunState() = 0;
+
protected:
// Constructors and Destructors
ThreadPlan(ThreadPlanKind kind, const char *name, Thread &thread,
@@ -522,8 +530,6 @@ protected:
GetThread().SetStopInfo(stop_reason_sp);
}
- virtual lldb::StateType GetPlanRunState() = 0;
-
bool IsUsuallyUnexplainedStopReason(lldb::StopReason);
Status m_status;
diff --git a/lldb/include/lldb/Target/ThreadPlanSingleThreadTimeout.h b/lldb/include/lldb/Target/ThreadPlanSingleThreadTimeout.h
new file mode 100644
index 0000000..5eff118
--- /dev/null
+++ b/lldb/include/lldb/Target/ThreadPlanSingleThreadTimeout.h
@@ -0,0 +1,110 @@
+//===-- ThreadPlanSingleThreadTimeout.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_TARGET_THREADPLANSINGLETHREADTIMEOUT_H
+#define LLDB_TARGET_THREADPLANSINGLETHREADTIMEOUT_H
+
+#include "lldb/Target/Thread.h"
+#include "lldb/Target/ThreadPlan.h"
+#include "lldb/Utility/Event.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/State.h"
+
+#include <chrono>
+#include <thread>
+
+namespace lldb_private {
+
+class ThreadPlanSingleThreadTimeout;
+//
+// Thread plan used by single thread execution to issue timeout. This is useful
+// to detect potential deadlock in single thread execution. The timeout measures
+// the elapsed time from the last internal stop and gets reset by each internal
+// stop to ensure we are accurately detecting execution not moving forward.
+// This means this thread plan may be created/destroyed multiple times by the
+// parent execution plan.
+//
+// When a timeout happens, the thread plan resolves the potential deadlock by
+// issuing a thread specific async interrupt to enter stop state, then execution
+// is resumed with all threads running to resolve the potential deadlock
+//
+class ThreadPlanSingleThreadTimeout : public ThreadPlan {
+ enum class State {
+ WaitTimeout, // Waiting for timeout.
+ AsyncInterrupt, // Async interrupt has been issued.
+ Done, // Finished resume all threads.
+ };
+
+public:
+ // TODO: allow timeout to be set on per thread plan basis.
+ struct TimeoutInfo {
+ // Whether there is a ThreadPlanSingleThreadTimeout instance alive.
+ bool m_isAlive = false;
+ ThreadPlanSingleThreadTimeout::State m_last_state = State::WaitTimeout;
+ };
+ using TimeoutInfoSP =
+ std::shared_ptr<ThreadPlanSingleThreadTimeout::TimeoutInfo>;
+
+ ~ThreadPlanSingleThreadTimeout() override;
+
+ // If input \param thread is running in single thread mode, push a
+ // new ThreadPlanSingleThreadTimeout based on timeout setting from fresh new
+ // state. The reference of \param info is passed in so that when
+ // ThreadPlanSingleThreadTimeout got popped its last state can be stored
+ // in it for future resume.
+ static void PushNewWithTimeout(Thread &thread, TimeoutInfoSP &info);
+
+ // Push a new ThreadPlanSingleThreadTimeout by restoring state from
+ // input \param info and resume execution.
+ static void ResumeFromPrevState(Thread &thread, TimeoutInfoSP &info);
+
+ void GetDescription(Stream *s, lldb::DescriptionLevel level) override;
+ bool ValidatePlan(Stream *error) override { return true; }
+ bool WillStop() override;
+ void DidPop() override;
+
+ bool IsLeafPlan() override { return true; }
+ bool DoPlanExplainsStop(Event *event_ptr) override;
+
+ lldb::StateType GetPlanRunState() override;
+ static void TimeoutThreadFunc(ThreadPlanSingleThreadTimeout *self);
+
+ bool MischiefManaged() override;
+
+ bool ShouldStop(Event *event_ptr) override;
+ void SetStopOthers(bool new_value) override;
+ bool StopOthers() override;
+
+private:
+ ThreadPlanSingleThreadTimeout(Thread &thread, TimeoutInfoSP &info);
+
+ bool IsTimeoutAsyncInterrupt(Event *event_ptr);
+ bool HandleEvent(Event *event_ptr);
+ void HandleTimeout();
+ uint64_t GetRemainingTimeoutMilliSeconds();
+
+ static std::string StateToString(State state);
+
+ ThreadPlanSingleThreadTimeout(const ThreadPlanSingleThreadTimeout &) = delete;
+ const ThreadPlanSingleThreadTimeout &
+ operator=(const ThreadPlanSingleThreadTimeout &) = delete;
+
+ TimeoutInfoSP m_info; // Reference to controlling ThreadPlan's TimeoutInfo.
+ State m_state;
+
+ // Lock for m_wakeup_cv and m_exit_flag between thread plan thread and timer
+ // thread
+ std::mutex m_mutex;
+ std::condition_variable m_wakeup_cv;
+ std::thread m_timer_thread;
+ std::chrono::steady_clock::time_point m_timeout_start;
+};
+
+} // namespace lldb_private
+
+#endif // LLDB_TARGET_THREADPLANSINGLETHREADTIMEOUT_H
diff --git a/lldb/include/lldb/Target/ThreadPlanStepOut.h b/lldb/include/lldb/Target/ThreadPlanStepOut.h
index b1d8769..013c675 100644
--- a/lldb/include/lldb/Target/ThreadPlanStepOut.h
+++ b/lldb/include/lldb/Target/ThreadPlanStepOut.h
@@ -30,6 +30,7 @@ public:
bool ValidatePlan(Stream *error) override;
bool ShouldStop(Event *event_ptr) override;
bool StopOthers() override;
+ void SetStopOthers(bool new_value) override { m_stop_others = new_value; }
lldb::StateType GetPlanRunState() override;
bool WillStop() override;
bool MischiefManaged() override;
diff --git a/lldb/include/lldb/Target/ThreadPlanStepOverRange.h b/lldb/include/lldb/Target/ThreadPlanStepOverRange.h
index 8585ac6..af29d18 100644
--- a/lldb/include/lldb/Target/ThreadPlanStepOverRange.h
+++ b/lldb/include/lldb/Target/ThreadPlanStepOverRange.h
@@ -13,11 +13,13 @@
#include "lldb/Target/StackID.h"
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadPlanStepRange.h"
+#include "lldb/Target/TimeoutResumeAll.h"
namespace lldb_private {
class ThreadPlanStepOverRange : public ThreadPlanStepRange,
- ThreadPlanShouldStopHere {
+ ThreadPlanShouldStopHere,
+ TimeoutResumeAll {
public:
ThreadPlanStepOverRange(Thread &thread, const AddressRange &range,
const SymbolContext &addr_context,
@@ -27,7 +29,9 @@ public:
~ThreadPlanStepOverRange() override;
void GetDescription(Stream *s, lldb::DescriptionLevel level) override;
+ void SetStopOthers(bool new_value) override;
bool ShouldStop(Event *event_ptr) override;
+ void DidPush() override;
protected:
bool DoPlanExplainsStop(Event *event_ptr) override;
@@ -44,6 +48,7 @@ private:
bool IsEquivalentContext(const SymbolContext &context);
bool m_first_resume;
+ lldb::RunMode m_run_mode;
ThreadPlanStepOverRange(const ThreadPlanStepOverRange &) = delete;
const ThreadPlanStepOverRange &
diff --git a/lldb/include/lldb/Target/ThreadPlanStepRange.h b/lldb/include/lldb/Target/ThreadPlanStepRange.h
index 2fe8852..001f3d9 100644
--- a/lldb/include/lldb/Target/ThreadPlanStepRange.h
+++ b/lldb/include/lldb/Target/ThreadPlanStepRange.h
@@ -58,8 +58,15 @@ protected:
// run' plan, then just single step.
bool SetNextBranchBreakpoint();
+ // Whether the input stop info is caused by the next branch breakpoint.
+ // Note: this does not check if branch breakpoint site is shared by other
+ // breakpoints or not.
+ bool IsNextBranchBreakpointStop(lldb::StopInfoSP stop_info_sp);
+
void ClearNextBranchBreakpoint();
+ void ClearNextBranchBreakpointExplainedStop();
+
bool NextRangeBreakpointExplainsStop(lldb::StopInfoSP stop_info_sp);
SymbolContext m_addr_context;
diff --git a/lldb/include/lldb/Target/TimeoutResumeAll.h b/lldb/include/lldb/Target/TimeoutResumeAll.h
new file mode 100644
index 0000000..9a1a6c4
--- /dev/null
+++ b/lldb/include/lldb/Target/TimeoutResumeAll.h
@@ -0,0 +1,43 @@
+//===-- TimeoutResumeAll.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_TARGET_TIMEOUTRESUMEALL_H
+#define LLDB_TARGET_TIMEOUTRESUMEALL_H
+
+#include "lldb/Target/ThreadPlanSingleThreadTimeout.h"
+
+namespace lldb_private {
+
+// Mixin class that provides the capability for ThreadPlan to support single
+// thread execution that resumes all threads after a timeout.
+// Opt-in thread plan should call PushNewTimeout() in its DidPush() and
+// ResumeWithTimeout() during DoWillResume().
+class TimeoutResumeAll {
+public:
+ TimeoutResumeAll(Thread &thread)
+ : m_thread(thread),
+ m_timeout_info(
+ std::make_shared<ThreadPlanSingleThreadTimeout::TimeoutInfo>()) {}
+
+ void PushNewTimeout() {
+ ThreadPlanSingleThreadTimeout::PushNewWithTimeout(m_thread, m_timeout_info);
+ }
+
+ void ResumeWithTimeout() {
+ ThreadPlanSingleThreadTimeout::ResumeFromPrevState(m_thread,
+ m_timeout_info);
+ }
+
+private:
+ Thread &m_thread;
+ ThreadPlanSingleThreadTimeout::TimeoutInfoSP m_timeout_info;
+};
+
+} // namespace lldb_private
+
+#endif // LLDB_TARGET_TIMEOUTRESUMEALL_H
diff --git a/lldb/include/lldb/Utility/AddressableBits.h b/lldb/include/lldb/Utility/AddressableBits.h
index 0d27c35..8c7a1ec 100644
--- a/lldb/include/lldb/Utility/AddressableBits.h
+++ b/lldb/include/lldb/Utility/AddressableBits.h
@@ -12,6 +12,8 @@
#include "lldb/lldb-forward.h"
#include "lldb/lldb-public.h"
+#include <cstdint>
+
namespace lldb_private {
/// \class AddressableBits AddressableBits.h "lldb/Core/AddressableBits.h"
diff --git a/lldb/include/lldb/lldb-enumerations.h b/lldb/include/lldb/lldb-enumerations.h
index 74ff458..7bfde8b 100644
--- a/lldb/include/lldb/lldb-enumerations.h
+++ b/lldb/include/lldb/lldb-enumerations.h
@@ -253,6 +253,7 @@ enum StopReason {
eStopReasonFork,
eStopReasonVFork,
eStopReasonVForkDone,
+ eStopReasonInterrupt, ///< Thread requested interrupt
};
/// Command Return Status Types.
diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py
index ecc7b81..0e8ca15 100644
--- a/lldb/packages/Python/lldbsuite/test/decorators.py
+++ b/lldb/packages/Python/lldbsuite/test/decorators.py
@@ -1053,6 +1053,10 @@ def _get_bool_config_skip_if_decorator(key):
return unittest.skipIf(not have, "requires " + key)
+def skipIfCurlSupportMissing(func):
+ return _get_bool_config_skip_if_decorator("curl")(func)
+
+
def skipIfCursesSupportMissing(func):
return _get_bool_config_skip_if_decorator("curses")(func)
diff --git a/lldb/packages/Python/lldbsuite/test/make/Makefile.rules b/lldb/packages/Python/lldbsuite/test/make/Makefile.rules
index be3ad68..1ba3f84 100644
--- a/lldb/packages/Python/lldbsuite/test/make/Makefile.rules
+++ b/lldb/packages/Python/lldbsuite/test/make/Makefile.rules
@@ -112,7 +112,7 @@ $(error "C compiler is not specified. Please run tests through lldb-dotest or li
endif
#----------------------------------------------------------------------
-# Handle SDKROOT on Darwin
+# Handle SDKROOT for the cross platform builds.
#----------------------------------------------------------------------
ifeq "$(OS)" "Darwin"
@@ -120,6 +120,18 @@ ifeq "$(OS)" "Darwin"
# We haven't otherwise set the SDKROOT, so set it now to macosx
SDKROOT := $(shell xcrun --sdk macosx --show-sdk-path)
endif
+ SYSROOT_FLAGS := -isysroot "$(SDKROOT)"
+ GCC_TOOLCHAIN_FLAGS :=
+else
+ ifneq "$(SDKROOT)" ""
+ SYSROOT_FLAGS := --sysroot "$(SDKROOT)"
+ GCC_TOOLCHAIN_FLAGS := --gcc-toolchain="$(SDKROOT)/usr"
+ else
+ # Do not set up these options if SDKROOT was not specified.
+ # This is a regular build in that case (or Android).
+ SYSROOT_FLAGS :=
+ GCC_TOOLCHAIN_FLAGS :=
+ endif
endif
#----------------------------------------------------------------------
@@ -190,6 +202,12 @@ else
ifeq "$(SPLIT_DEBUG_SYMBOLS)" "YES"
DSYM = $(EXE).debug
endif
+
+ ifeq "$(MAKE_DWP)" "YES"
+ MAKE_DWO := YES
+ DWP_NAME = $(EXE).dwp
+ DYLIB_DWP_NAME = $(DYLIB_NAME).dwp
+ endif
endif
LIMIT_DEBUG_INFO_FLAGS =
@@ -210,20 +228,15 @@ endif
DEBUG_INFO_FLAG ?= -g
CFLAGS ?= $(DEBUG_INFO_FLAG) -O0
-
-ifeq "$(OS)" "Darwin"
- ifneq "$(SDKROOT)" ""
- CFLAGS += -isysroot "$(SDKROOT)"
- endif
-endif
+CFLAGS += $(SYSROOT_FLAGS)
ifeq "$(OS)" "Darwin"
CFLAGS += $(ARCHFLAG) $(ARCH) $(FRAMEWORK_INCLUDES)
else
CFLAGS += $(ARCHFLAG)$(ARCH)
endif
-CFLAGS += -I$(LLDB_BASE_DIR)include -I$(LLDB_OBJ_ROOT)/include
+CFLAGS += -I$(LLDB_BASE_DIR)/include -I$(LLDB_OBJ_ROOT)/include
CFLAGS += -I$(SRCDIR) -I$(THIS_FILE_DIR)
ifndef NO_TEST_COMMON_H
@@ -234,9 +247,9 @@ CFLAGS += $(NO_LIMIT_DEBUG_INFO_FLAGS) $(ARCH_CFLAGS)
# Use this one if you want to build one part of the result without debug information:
ifeq "$(OS)" "Darwin"
- CFLAGS_NO_DEBUG = -O0 $(ARCHFLAG) $(ARCH) $(FRAMEWORK_INCLUDES) $(ARCH_CFLAGS) $(CFLAGS_EXTRAS) -isysroot "$(SDKROOT)"
+ CFLAGS_NO_DEBUG = -O0 $(ARCHFLAG) $(ARCH) $(FRAMEWORK_INCLUDES) $(ARCH_CFLAGS) $(CFLAGS_EXTRAS) $(SYSROOT_FLAGS)
else
- CFLAGS_NO_DEBUG = -O0 $(ARCHFLAG)$(ARCH) $(FRAMEWORK_INCLUDES) $(ARCH_CFLAGS) $(CFLAGS_EXTRAS)
+ CFLAGS_NO_DEBUG = -O0 $(ARCHFLAG)$(ARCH) $(FRAMEWORK_INCLUDES) $(ARCH_CFLAGS) $(CFLAGS_EXTRAS) $(SYSROOT_FLAGS)
endif
ifeq "$(MAKE_DWO)" "YES"
@@ -267,7 +280,9 @@ endif
CFLAGS += $(CFLAGS_EXTRAS)
CXXFLAGS += -std=c++11 $(CFLAGS) $(ARCH_CXXFLAGS)
LD = $(CC)
-LDFLAGS ?= $(CFLAGS)
+# Copy common options to the linker flags (dwarf, arch. & etc).
+# Note: we get some 'garbage' options for linker here (such as -I, --isystem & etc).
+LDFLAGS += $(CFLAGS)
LDFLAGS += $(LD_EXTRAS) $(ARCH_LDFLAGS)
ifeq (,$(filter $(OS), Windows_NT Android Darwin))
ifneq (,$(filter YES,$(ENABLE_THREADS)))
@@ -338,6 +353,17 @@ ifneq "$(OS)" "Darwin"
OBJCOPY ?= $(call replace_cc_with,objcopy)
ARCHIVER ?= $(call replace_cc_with,ar)
+ # Look for llvm-dwp or gnu dwp
+ DWP ?= $(call replace_cc_with,llvm-dwp)
+ ifeq ($(wildcard $(DWP)),)
+ DWP = $(call replace_cc_with,dwp)
+ ifeq ($(wildcard $(DWP)),)
+ DWP = $(shell command -v llvm-dwp 2> /dev/null)
+ ifeq ($(wildcard $(DWP)),)
+ DWP = $(shell command -v dwp 2> /dev/null)
+ endif
+ endif
+ endif
override AR = $(ARCHIVER)
endif
@@ -378,11 +404,26 @@ ifeq (1, $(USE_SYSTEM_STDLIB))
endif
endif
+ifeq (,$(filter 1, $(USE_LIBSTDCPP) $(USE_LIBCPP) $(USE_SYSTEM_STDLIB)))
+ # If no explicit C++ library request was made, but we have paths to a custom libcxx, use
+ # them. Otherwise, use the system library by default.
+ ifneq ($(and $(LIBCPP_INCLUDE_DIR), $(LIBCPP_LIBRARY_DIR)),)
+ CXXFLAGS += -nostdlib++ -nostdinc++ -cxx-isystem $(LIBCPP_INCLUDE_DIR)
+ ifneq "$(LIBCPP_INCLUDE_TARGET_DIR)" ""
+ CXXFLAGS += -cxx-isystem $(LIBCPP_INCLUDE_TARGET_DIR)
+ endif
+ LDFLAGS += -L$(LIBCPP_LIBRARY_DIR) -Wl,-rpath,$(LIBCPP_LIBRARY_DIR) -lc++
+ else
+ USE_SYSTEM_STDLIB := 1
+ endif
+endif
+
ifeq (1,$(USE_LIBSTDCPP))
# Clang requires an extra flag: -stdlib=libstdc++
ifneq (,$(findstring clang,$(CC)))
- CXXFLAGS += -stdlib=libstdc++
- LDFLAGS += -stdlib=libstdc++
+ # Force clang looking for the gcc's headers at specific rootfs folder.
+ CXXFLAGS += -stdlib=libstdc++ $(GCC_TOOLCHAIN_FLAGS)
+ LDFLAGS += -stdlib=libstdc++ $(GCC_TOOLCHAIN_FLAGS)
endif
endif
@@ -416,21 +457,15 @@ ifeq (1, $(USE_SYSTEM_STDLIB))
endif
CXXFLAGS += -nostdlib++ -nostdinc++ -cxx-isystem $(SDKROOT)/usr/include/c++/v1
LDFLAGS += -L$(SDKROOT)/usr/lib -Wl,-rpath,$(SDKROOT)/usr/lib -lc++
+ else
+ ifneq (,$(findstring clang,$(CC)))
+ # Force clang looking for the gcc's headers at specific rootfs folder.
+ CXXFLAGS += $(GCC_TOOLCHAIN_FLAGS)
+ LDFLAGS += $(GCC_TOOLCHAIN_FLAGS)
+ endif
endif
endif
-# If no explicit request was made, but we have paths to a custom libcxx, use
-# them.
-ifeq ($(or $(USE_LIBSTDCPP), $(USE_LIBCPP), $(USE_SYSTEM_STDLIB)),)
- ifneq ($(and $(LIBCPP_INCLUDE_DIR), $(LIBCPP_LIBRARY_DIR)),)
- CXXFLAGS += -nostdlib++ -nostdinc++ -cxx-isystem $(LIBCPP_INCLUDE_DIR)
- ifneq "$(LIBCPP_INCLUDE_TARGET_DIR)" ""
- CXXFLAGS += -cxx-isystem $(LIBCPP_INCLUDE_TARGET_DIR)
- endif
- LDFLAGS += -L$(LIBCPP_LIBRARY_DIR) -Wl,-rpath,$(LIBCPP_LIBRARY_DIR) -lc++
- endif
-endif
-
#----------------------------------------------------------------------
# Additional system libraries
#----------------------------------------------------------------------
@@ -508,6 +543,10 @@ ifneq "$(CXX)" ""
endif
endif
+ifeq "$(GEN_GNU_BUILD_ID)" "YES"
+ LDFLAGS += -Wl,--build-id
+endif
+
#----------------------------------------------------------------------
# DYLIB_ONLY variable can be used to skip the building of a.out.
# See the sections below regarding dSYM file as well as the building of
@@ -546,11 +585,18 @@ else
endif
else
ifeq "$(SPLIT_DEBUG_SYMBOLS)" "YES"
+ifeq "$(SAVE_FULL_DEBUG_BINARY)" "YES"
+ cp "$(EXE)" "$(EXE).unstripped"
+endif
$(OBJCOPY) --only-keep-debug "$(EXE)" "$(DSYM)"
$(OBJCOPY) --strip-debug --add-gnu-debuglink="$(DSYM)" "$(EXE)" "$(EXE)"
endif
+ifeq "$(MAKE_DWP)" "YES"
+ $(DWP) -o "$(DWP_NAME)" $(DWOS)
+endif
endif
+
#----------------------------------------------------------------------
# Make the dylib
#----------------------------------------------------------------------
@@ -591,9 +637,15 @@ endif
else
$(LD) $(DYLIB_OBJECTS) $(LDFLAGS) -shared -o "$(DYLIB_FILENAME)"
ifeq "$(SPLIT_DEBUG_SYMBOLS)" "YES"
+ifeq "$(SAVE_FULL_DEBUG_BINARY)" "YES"
+ cp "$(DYLIB_FILENAME)" "$(DYLIB_FILENAME).unstripped"
+endif
$(OBJCOPY) --only-keep-debug "$(DYLIB_FILENAME)" "$(DYLIB_FILENAME).debug"
$(OBJCOPY) --strip-debug --add-gnu-debuglink="$(DYLIB_FILENAME).debug" "$(DYLIB_FILENAME)" "$(DYLIB_FILENAME)"
endif
+ifeq "$(MAKE_DWP)" "YES"
+ $(DWP) -o $(DYLIB_DWP_FILE) $(DYLIB_DWOS)
+endif
endif
#----------------------------------------------------------------------
diff --git a/lldb/source/API/SBDebugger.cpp b/lldb/source/API/SBDebugger.cpp
index 29da7d3..fb035a3 100644
--- a/lldb/source/API/SBDebugger.cpp
+++ b/lldb/source/API/SBDebugger.cpp
@@ -776,6 +776,9 @@ SBStructuredData SBDebugger::GetBuildConfiguration() {
*config_up, "xml", XMLDocument::XMLEnabled(),
"A boolean value that indicates if XML support is enabled in LLDB");
AddBoolConfigEntry(
+ *config_up, "curl", LLVM_ENABLE_CURL,
+ "A boolean value that indicates if CURL support is enabled in LLDB");
+ AddBoolConfigEntry(
*config_up, "curses", LLDB_ENABLE_CURSES,
"A boolean value that indicates if curses support is enabled in LLDB");
AddBoolConfigEntry(
@@ -1724,20 +1727,20 @@ SBDebugger::LoadTraceFromFile(SBError &error,
void SBDebugger::RequestInterrupt() {
LLDB_INSTRUMENT_VA(this);
-
+
if (m_opaque_sp)
- m_opaque_sp->RequestInterrupt();
+ m_opaque_sp->RequestInterrupt();
}
void SBDebugger::CancelInterruptRequest() {
LLDB_INSTRUMENT_VA(this);
-
+
if (m_opaque_sp)
- m_opaque_sp->CancelInterruptRequest();
+ m_opaque_sp->CancelInterruptRequest();
}
bool SBDebugger::InterruptRequested() {
LLDB_INSTRUMENT_VA(this);
-
+
if (m_opaque_sp)
return m_opaque_sp->InterruptRequested();
return false;
diff --git a/lldb/source/API/SBStructuredData.cpp b/lldb/source/API/SBStructuredData.cpp
index b18fc56..78afdc6 100644
--- a/lldb/source/API/SBStructuredData.cpp
+++ b/lldb/source/API/SBStructuredData.cpp
@@ -86,7 +86,12 @@ lldb::SBError SBStructuredData::SetFromJSON(lldb::SBStream &stream) {
StructuredData::ParseJSON(stream.GetData());
m_impl_up->SetObjectSP(json_obj);
- if (!json_obj || json_obj->GetType() != eStructuredDataTypeDictionary)
+ static constexpr StructuredDataType unsupported_type[] = {
+ eStructuredDataTypeInvalid,
+ eStructuredDataTypeGeneric,
+ };
+
+ if (!json_obj || llvm::is_contained(unsupported_type, json_obj->GetType()))
error.SetErrorString("Invalid Syntax");
return error;
}
diff --git a/lldb/source/API/SBThread.cpp b/lldb/source/API/SBThread.cpp
index 5364336..bda9810 100644
--- a/lldb/source/API/SBThread.cpp
+++ b/lldb/source/API/SBThread.cpp
@@ -192,6 +192,9 @@ size_t SBThread::GetStopReasonDataCount() {
case eStopReasonSignal:
return 1;
+ case eStopReasonInterrupt:
+ return 1;
+
case eStopReasonException:
return 1;
@@ -261,6 +264,9 @@ uint64_t SBThread::GetStopReasonDataAtIndex(uint32_t idx) {
case eStopReasonSignal:
return stop_info_sp->GetValue();
+ case eStopReasonInterrupt:
+ return stop_info_sp->GetValue();
+
case eStopReasonException:
return stop_info_sp->GetValue();
diff --git a/lldb/source/Core/Progress.cpp b/lldb/source/Core/Progress.cpp
index 1a779e2dd..e0ba1a6 100644
--- a/lldb/source/Core/Progress.cpp
+++ b/lldb/source/Core/Progress.cpp
@@ -45,8 +45,7 @@ Progress::~Progress() {
// Make sure to always report progress completed when this object is
// destructed so it indicates the progress dialog/activity should go away.
std::lock_guard<std::mutex> guard(m_mutex);
- if (!m_completed)
- m_completed = m_total;
+ m_completed = m_total;
ReportProgress();
// Report to the ProgressManager if that subsystem is enabled.
diff --git a/lldb/source/Interpreter/CommandInterpreter.cpp b/lldb/source/Interpreter/CommandInterpreter.cpp
index fc07168..71c928e 100644
--- a/lldb/source/Interpreter/CommandInterpreter.cpp
+++ b/lldb/source/Interpreter/CommandInterpreter.cpp
@@ -2513,7 +2513,7 @@ bool CommandInterpreter::DidProcessStopAbnormally() const {
const StopReason reason = stop_info->GetStopReason();
if (reason == eStopReasonException ||
reason == eStopReasonInstrumentation ||
- reason == eStopReasonProcessorTrace)
+ reason == eStopReasonProcessorTrace || reason == eStopReasonInterrupt)
return true;
if (reason == eStopReasonSignal) {
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
index 44071d1..adf13ff 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
@@ -84,8 +84,7 @@ clang::Decl *ClangASTImporter::CopyDecl(clang::ASTContext *dst_ast,
LLDB_LOG_ERROR(log, result.takeError(), "Couldn't import decl: {0}");
if (log) {
lldb::user_id_t user_id = LLDB_INVALID_UID;
- ClangASTMetadata *metadata = GetDeclMetadata(decl);
- if (metadata)
+ if (std::optional<ClangASTMetadata> metadata = GetDeclMetadata(decl))
user_id = metadata->GetUserID();
if (NamedDecl *named_decl = dyn_cast<NamedDecl>(decl))
@@ -950,7 +949,8 @@ bool ClangASTImporter::RequireCompleteType(clang::QualType type) {
return true;
}
-ClangASTMetadata *ClangASTImporter::GetDeclMetadata(const clang::Decl *decl) {
+std::optional<ClangASTMetadata>
+ClangASTImporter::GetDeclMetadata(const clang::Decl *decl) {
DeclOrigin decl_origin = GetDeclOrigin(decl);
if (decl_origin.Valid()) {
@@ -1105,7 +1105,7 @@ ClangASTImporter::ASTImporterDelegate::ImportImpl(Decl *From) {
// If we have a forcefully completed type, try to find an actual definition
// for it in other modules.
- const ClangASTMetadata *md = m_main.GetDeclMetadata(From);
+ std::optional<ClangASTMetadata> md = m_main.GetDeclMetadata(From);
auto *td = dyn_cast<TagDecl>(From);
if (td && md && md->IsForcefullyCompleted()) {
Log *log = GetLog(LLDBLog::Expressions);
@@ -1284,8 +1284,7 @@ void ClangASTImporter::ASTImporterDelegate::Imported(clang::Decl *from,
}
lldb::user_id_t user_id = LLDB_INVALID_UID;
- ClangASTMetadata *metadata = m_main.GetDeclMetadata(from);
- if (metadata)
+ if (std::optional<ClangASTMetadata> metadata = m_main.GetDeclMetadata(from))
user_id = metadata->GetUserID();
if (log) {
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h
index bc962e5..6231f0f 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h
@@ -189,7 +189,7 @@ public:
/// is only a shallow clone that lacks any contents.
void SetDeclOrigin(const clang::Decl *decl, clang::Decl *original_decl);
- ClangASTMetadata *GetDeclMetadata(const clang::Decl *decl);
+ std::optional<ClangASTMetadata> GetDeclMetadata(const clang::Decl *decl);
//
// Namespace maps
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
index 2a8bdf2..f41323d 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
@@ -625,10 +625,6 @@ ClangExpressionParser::ClangExpressionParser(
// Make sure clang uses the same VFS as LLDB.
m_compiler->createFileManager(FileSystem::Instance().GetVirtualFileSystem());
- std::string abi;
- ArchSpec target_arch;
- target_arch = target_sp->GetArchitecture();
-
// 2. Configure the compiler with a set of default options that are
// appropriate for most situations.
SetupTargetOpts(*m_compiler, *target_sp);
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangUserExpression.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangUserExpression.cpp
index 35038a5..f096566 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangUserExpression.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangUserExpression.cpp
@@ -219,9 +219,10 @@ void ClangUserExpression::ScanContext(ExecutionContext &exe_ctx, Status &err) {
// whatever runtime the debug info says the object pointer belongs to. Do
// that here.
- ClangASTMetadata *metadata =
- TypeSystemClang::DeclContextGetMetaData(decl_context, function_decl);
- if (metadata && metadata->HasObjectPtr()) {
+ if (std::optional<ClangASTMetadata> metadata =
+ TypeSystemClang::DeclContextGetMetaData(decl_context,
+ function_decl);
+ metadata && metadata->HasObjectPtr()) {
lldb::LanguageType language = metadata->GetObjectPtrLanguage();
if (language == lldb::eLanguageTypeC_plus_plus) {
if (m_enforce_valid_object) {
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp
index 6894cdc..f3a008f 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp
@@ -398,9 +398,9 @@ bool AppleObjCDeclVendor::FinishDecl(clang::ObjCInterfaceDecl *interface_decl) {
Log *log(
GetLog(LLDBLog::Expressions)); // FIXME - a more appropriate log channel?
- ClangASTMetadata *metadata = m_ast_ctx->GetMetadata(interface_decl);
ObjCLanguageRuntime::ObjCISA objc_isa = 0;
- if (metadata)
+ if (std::optional<ClangASTMetadata> metadata =
+ m_ast_ctx->GetMetadata(interface_decl))
objc_isa = metadata->GetISAPtr();
if (!objc_isa)
@@ -559,8 +559,8 @@ uint32_t AppleObjCDeclVendor::FindDecls(ConstString name, bool append,
ast_ctx.getObjCInterfaceType(result_iface_decl);
uint64_t isa_value = LLDB_INVALID_ADDRESS;
- ClangASTMetadata *metadata = m_ast_ctx->GetMetadata(result_iface_decl);
- if (metadata)
+ if (std::optional<ClangASTMetadata> metadata =
+ m_ast_ctx->GetMetadata(result_iface_decl))
isa_value = metadata->GetISAPtr();
LLDB_LOGF(log,
diff --git a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
index 08d5f50..a0b08a2 100644
--- a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
+++ b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
@@ -714,6 +714,8 @@ static const char *GetStopReasonString(StopReason stop_reason) {
return "vfork";
case eStopReasonVForkDone:
return "vforkdone";
+ case eStopReasonInterrupt:
+ return "async interrupt";
case eStopReasonInstrumentation:
case eStopReasonInvalid:
case eStopReasonPlanComplete:
diff --git a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
index 604c923..6f9c2cc 100644
--- a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
+++ b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
@@ -1730,14 +1730,24 @@ ThreadSP ProcessGDBRemote::SetThreadStopInfo(
thread_sp = memory_thread_sp;
if (exc_type != 0) {
- const size_t exc_data_size = exc_data.size();
-
- thread_sp->SetStopInfo(
- StopInfoMachException::CreateStopReasonWithMachException(
- *thread_sp, exc_type, exc_data_size,
- exc_data_size >= 1 ? exc_data[0] : 0,
- exc_data_size >= 2 ? exc_data[1] : 0,
- exc_data_size >= 3 ? exc_data[2] : 0));
+ // For thread plan async interrupt, creating stop info on the
+ // original async interrupt request thread instead. If interrupt thread
+ // does not exist anymore we fallback to current signal receiving thread
+ // instead.
+ ThreadSP interrupt_thread;
+ if (m_interrupt_tid != LLDB_INVALID_THREAD_ID)
+ interrupt_thread = HandleThreadAsyncInterrupt(signo, description);
+ if (interrupt_thread)
+ thread_sp = interrupt_thread;
+ else {
+ const size_t exc_data_size = exc_data.size();
+ thread_sp->SetStopInfo(
+ StopInfoMachException::CreateStopReasonWithMachException(
+ *thread_sp, exc_type, exc_data_size,
+ exc_data_size >= 1 ? exc_data[0] : 0,
+ exc_data_size >= 2 ? exc_data[1] : 0,
+ exc_data_size >= 3 ? exc_data[2] : 0));
+ }
} else {
bool handled = false;
bool did_exec = false;
@@ -1936,9 +1946,20 @@ ThreadSP ProcessGDBRemote::SetThreadStopInfo(
*thread_sp, signo, description.c_str()));
}
}
- if (!handled)
- thread_sp->SetStopInfo(StopInfo::CreateStopReasonWithSignal(
- *thread_sp, signo, description.c_str()));
+ if (!handled) {
+ // For thread plan async interrupt, creating stop info on the
+ // original async interrupt request thread instead. If interrupt
+ // thread does not exist anymore we fallback to current signal
+ // receiving thread instead.
+ ThreadSP interrupt_thread;
+ if (m_interrupt_tid != LLDB_INVALID_THREAD_ID)
+ interrupt_thread = HandleThreadAsyncInterrupt(signo, description);
+ if (interrupt_thread)
+ thread_sp = interrupt_thread;
+ else
+ thread_sp->SetStopInfo(StopInfo::CreateStopReasonWithSignal(
+ *thread_sp, signo, description.c_str()));
+ }
}
if (!description.empty()) {
@@ -1957,6 +1978,24 @@ ThreadSP ProcessGDBRemote::SetThreadStopInfo(
return thread_sp;
}
+ThreadSP
+ProcessGDBRemote::HandleThreadAsyncInterrupt(uint8_t signo,
+ const std::string &description) {
+ ThreadSP thread_sp;
+ {
+ std::lock_guard<std::recursive_mutex> guard(m_thread_list_real.GetMutex());
+ thread_sp = m_thread_list_real.FindThreadByProtocolID(m_interrupt_tid,
+ /*can_update=*/false);
+ }
+ if (thread_sp)
+ thread_sp->SetStopInfo(StopInfo::CreateStopReasonWithInterrupt(
+ *thread_sp, signo, description.c_str()));
+ // Clear m_interrupt_tid regardless we can find original interrupt thread or
+ // not.
+ m_interrupt_tid = LLDB_INVALID_THREAD_ID;
+ return thread_sp;
+}
+
lldb::ThreadSP
ProcessGDBRemote::SetThreadStopInfo(StructuredData::Dictionary *thread_dict) {
static constexpr llvm::StringLiteral g_key_tid("tid");
diff --git a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h
index b44ffef..2492795 100644
--- a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h
+++ b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h
@@ -440,6 +440,10 @@ private:
void HandleStopReply() override;
void HandleAsyncStructuredDataPacket(llvm::StringRef data) override;
+ lldb::ThreadSP
+ HandleThreadAsyncInterrupt(uint8_t signo,
+ const std::string &description) override;
+
void SetThreadPc(const lldb::ThreadSP &thread_sp, uint64_t index);
using ModuleCacheKey = std::pair<std::string, std::string>;
// KeyInfo for the cached module spec DenseMap.
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
index a4dcde1..1a13725 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
@@ -1803,7 +1803,7 @@ DWARFASTParserClang::ParseStructureLikeDIE(const SymbolContext &sc,
if (!clang_type) {
clang_type = m_ast.CreateRecordType(
containing_decl_ctx, GetOwningClangModule(die), attrs.accessibility,
- attrs.name.GetCString(), tag_decl_kind, attrs.class_language, &metadata,
+ attrs.name.GetCString(), tag_decl_kind, attrs.class_language, metadata,
attrs.exports_symbols);
}
@@ -1883,43 +1883,18 @@ public:
// required if you don't have an
// ivar decl
const char *property_setter_name, const char *property_getter_name,
- uint32_t property_attributes, const ClangASTMetadata *metadata)
+ uint32_t property_attributes, ClangASTMetadata metadata)
: m_class_opaque_type(class_opaque_type), m_property_name(property_name),
m_property_opaque_type(property_opaque_type),
m_property_setter_name(property_setter_name),
m_property_getter_name(property_getter_name),
- m_property_attributes(property_attributes) {
- if (metadata != nullptr) {
- m_metadata_up = std::make_unique<ClangASTMetadata>();
- *m_metadata_up = *metadata;
- }
- }
-
- DelayedAddObjCClassProperty(const DelayedAddObjCClassProperty &rhs) {
- *this = rhs;
- }
-
- DelayedAddObjCClassProperty &
- operator=(const DelayedAddObjCClassProperty &rhs) {
- m_class_opaque_type = rhs.m_class_opaque_type;
- m_property_name = rhs.m_property_name;
- m_property_opaque_type = rhs.m_property_opaque_type;
- m_property_setter_name = rhs.m_property_setter_name;
- m_property_getter_name = rhs.m_property_getter_name;
- m_property_attributes = rhs.m_property_attributes;
-
- if (rhs.m_metadata_up) {
- m_metadata_up = std::make_unique<ClangASTMetadata>();
- *m_metadata_up = *rhs.m_metadata_up;
- }
- return *this;
- }
+ m_property_attributes(property_attributes), m_metadata(metadata) {}
bool Finalize() {
return TypeSystemClang::AddObjCClassProperty(
m_class_opaque_type, m_property_name, m_property_opaque_type,
/*ivar_decl=*/nullptr, m_property_setter_name, m_property_getter_name,
- m_property_attributes, m_metadata_up.get());
+ m_property_attributes, m_metadata);
}
private:
@@ -1929,7 +1904,7 @@ private:
const char *m_property_setter_name;
const char *m_property_getter_name;
uint32_t m_property_attributes;
- std::unique_ptr<ClangASTMetadata> m_metadata_up;
+ ClangASTMetadata m_metadata;
};
bool DWARFASTParserClang::ParseTemplateDIE(
@@ -2721,10 +2696,10 @@ void DWARFASTParserClang::ParseObjCProperty(
ClangASTMetadata metadata;
metadata.SetUserID(die.GetID());
- delayed_properties.push_back(DelayedAddObjCClassProperty(
+ delayed_properties.emplace_back(
class_clang_type, propAttrs.prop_name,
member_type->GetLayoutCompilerType(), propAttrs.prop_setter_name,
- propAttrs.prop_getter_name, propAttrs.prop_attributes, &metadata));
+ propAttrs.prop_getter_name, propAttrs.prop_attributes, metadata);
}
llvm::Expected<llvm::APInt> DWARFASTParserClang::ExtractIntFromFormValue(
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 7cd3a33..7e0cf36 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -4331,26 +4331,38 @@ const std::shared_ptr<SymbolFileDWARFDwo> &SymbolFileDWARF::GetDwpSymbolFile() {
FileSpecList search_paths = Target::GetDefaultDebugFileSearchPaths();
ModuleSpec module_spec;
module_spec.GetFileSpec() = m_objfile_sp->GetFileSpec();
+ FileSpec dwp_filespec;
for (const auto &symfile : symfiles.files()) {
module_spec.GetSymbolFileSpec() =
FileSpec(symfile.GetPath() + ".dwp", symfile.GetPathStyle());
LLDB_LOG(log, "Searching for DWP using: \"{0}\"",
module_spec.GetSymbolFileSpec());
- FileSpec dwp_filespec =
+ dwp_filespec =
PluginManager::LocateExecutableSymbolFile(module_spec, search_paths);
if (FileSystem::Instance().Exists(dwp_filespec)) {
- LLDB_LOG(log, "Found DWP file: \"{0}\"", dwp_filespec);
- DataBufferSP dwp_file_data_sp;
- lldb::offset_t dwp_file_data_offset = 0;
- ObjectFileSP dwp_obj_file = ObjectFile::FindPlugin(
- GetObjectFile()->GetModule(), &dwp_filespec, 0,
- FileSystem::Instance().GetByteSize(dwp_filespec), dwp_file_data_sp,
- dwp_file_data_offset);
- if (dwp_obj_file) {
- m_dwp_symfile = std::make_shared<SymbolFileDWARFDwo>(
- *this, dwp_obj_file, DIERef::k_file_index_mask);
- break;
- }
+ break;
+ }
+ }
+ if (!FileSystem::Instance().Exists(dwp_filespec)) {
+ LLDB_LOG(log, "No DWP file found locally");
+ // Fill in the UUID for the module we're trying to match for, so we can
+ // find the correct DWP file, as the Debuginfod plugin uses *only* this
+ // data to correctly match the DWP file with the binary.
+ module_spec.GetUUID() = m_objfile_sp->GetUUID();
+ dwp_filespec =
+ PluginManager::LocateExecutableSymbolFile(module_spec, search_paths);
+ }
+ if (FileSystem::Instance().Exists(dwp_filespec)) {
+ LLDB_LOG(log, "Found DWP file: \"{0}\"", dwp_filespec);
+ DataBufferSP dwp_file_data_sp;
+ lldb::offset_t dwp_file_data_offset = 0;
+ ObjectFileSP dwp_obj_file = ObjectFile::FindPlugin(
+ GetObjectFile()->GetModule(), &dwp_filespec, 0,
+ FileSystem::Instance().GetByteSize(dwp_filespec), dwp_file_data_sp,
+ dwp_file_data_offset);
+ if (dwp_obj_file) {
+ m_dwp_symfile = std::make_shared<SymbolFileDWARFDwo>(
+ *this, dwp_obj_file, DIERef::k_file_index_mask);
}
}
if (!m_dwp_symfile) {
diff --git a/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp b/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
index b79d3e6..0c71df6 100644
--- a/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
+++ b/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
@@ -618,7 +618,7 @@ clang::QualType PdbAstBuilder::CreateRecordType(PdbTypeSymId id,
CompilerType ct = m_clang.CreateRecordType(
context, OptionalClangModuleID(), access, uname, llvm::to_underlying(ttk),
- lldb::eLanguageTypeC_plus_plus, &metadata);
+ lldb::eLanguageTypeC_plus_plus, metadata);
lldbassert(ct.IsValid());
diff --git a/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp b/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp
index 17c5f61..807ee5b 100644
--- a/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp
+++ b/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp
@@ -366,7 +366,7 @@ UdtRecordCompleter::AddMember(TypeSystemClang &clang, Member *field,
metadata.SetIsDynamicCXXType(false);
CompilerType record_ct = clang.CreateRecordType(
parent_decl_ctx, OptionalClangModuleID(), lldb::eAccessPublic, "",
- llvm::to_underlying(kind), lldb::eLanguageTypeC_plus_plus, &metadata);
+ llvm::to_underlying(kind), lldb::eLanguageTypeC_plus_plus, metadata);
TypeSystemClang::StartTagDeclarationDefinition(record_ct);
ClangASTImporter::LayoutInfo layout;
clang::DeclContext *decl_ctx = clang.GetDeclContextForType(record_ct);
diff --git a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
index d656ca3..fa3530a 100644
--- a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
+++ b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
@@ -420,7 +420,7 @@ lldb::TypeSP PDBASTParser::CreateLLDBTypeFromPDBType(const PDBSymbol &type) {
clang_type = m_ast.CreateRecordType(
decl_context, OptionalClangModuleID(), access, name, tag_type_kind,
- lldb::eLanguageTypeC_plus_plus, &metadata);
+ lldb::eLanguageTypeC_plus_plus, metadata);
assert(clang_type.IsValid());
auto record_decl =
diff --git a/lldb/source/Plugins/SymbolLocator/CMakeLists.txt b/lldb/source/Plugins/SymbolLocator/CMakeLists.txt
index ca96962..3367022 100644
--- a/lldb/source/Plugins/SymbolLocator/CMakeLists.txt
+++ b/lldb/source/Plugins/SymbolLocator/CMakeLists.txt
@@ -1,5 +1,10 @@
+# Order matters here: the first symbol locator prevents further searching.
+# For DWARF binaries that are both stripped and split, the Default plugin
+# will return the stripped binary when asked for the ObjectFile, which then
+# prevents an unstripped binary from being requested from the Debuginfod
+# provider.
+add_subdirectory(Debuginfod)
add_subdirectory(Default)
if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
add_subdirectory(DebugSymbols)
endif()
-add_subdirectory(Debuginfod)
diff --git a/lldb/source/Plugins/SymbolVendor/ELF/SymbolVendorELF.cpp b/lldb/source/Plugins/SymbolVendor/ELF/SymbolVendorELF.cpp
index b5fe35d..a2c3825 100644
--- a/lldb/source/Plugins/SymbolVendor/ELF/SymbolVendorELF.cpp
+++ b/lldb/source/Plugins/SymbolVendor/ELF/SymbolVendorELF.cpp
@@ -44,6 +44,24 @@ llvm::StringRef SymbolVendorELF::GetPluginDescriptionStatic() {
"executables.";
}
+// If this is needed elsewhere, it can be exported/moved.
+static bool IsDwpSymbolFile(const lldb::ModuleSP &module_sp,
+ const FileSpec &file_spec) {
+ DataBufferSP dwp_file_data_sp;
+ lldb::offset_t dwp_file_data_offset = 0;
+ // Try to create an ObjectFile from the file_spec.
+ ObjectFileSP dwp_obj_file = ObjectFile::FindPlugin(
+ module_sp, &file_spec, 0, FileSystem::Instance().GetByteSize(file_spec),
+ dwp_file_data_sp, dwp_file_data_offset);
+ // The presence of a debug_cu_index section is the key identifying feature of
+ // a DWP file. Make sure we don't fill in the section list on dwp_obj_file
+ // (by calling GetSectionList(false)) as this function could be called before
+ // we may have all the symbol files collected and available.
+ return dwp_obj_file && ObjectFileELF::classof(dwp_obj_file.get()) &&
+ dwp_obj_file->GetSectionList(false)->FindSectionByType(
+ eSectionTypeDWARFDebugCuIndex, false);
+}
+
// CreateInstance
//
// Platforms can register a callback to use when creating symbol vendors to
@@ -87,8 +105,20 @@ SymbolVendorELF::CreateInstance(const lldb::ModuleSP &module_sp,
FileSpecList search_paths = Target::GetDefaultDebugFileSearchPaths();
FileSpec dsym_fspec =
PluginManager::LocateExecutableSymbolFile(module_spec, search_paths);
- if (!dsym_fspec)
- return nullptr;
+ if (!dsym_fspec || IsDwpSymbolFile(module_sp, dsym_fspec)) {
+ // If we have a stripped binary or if we have a DWP file, SymbolLocator
+ // plugins may be able to give us an unstripped binary or an
+ // 'only-keep-debug' stripped file.
+ ModuleSpec unstripped_spec =
+ PluginManager::LocateExecutableObjectFile(module_spec);
+ if (!unstripped_spec)
+ return nullptr;
+ // The default SymbolLocator plugin returns the original binary if no other
+ // plugin finds something better.
+ if (unstripped_spec.GetFileSpec() == module_spec.GetFileSpec())
+ return nullptr;
+ dsym_fspec = unstripped_spec.GetFileSpec();
+ }
DataBufferSP dsym_file_data_sp;
lldb::offset_t dsym_file_data_offset = 0;
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 78e9eeb..6639466 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -1220,7 +1220,8 @@ TypeSystemClang::GetOrCreateClangModule(llvm::StringRef name,
CompilerType TypeSystemClang::CreateRecordType(
clang::DeclContext *decl_ctx, OptionalClangModuleID owning_module,
AccessType access_type, llvm::StringRef name, int kind,
- LanguageType language, ClangASTMetadata *metadata, bool exports_symbols) {
+ LanguageType language, std::optional<ClangASTMetadata> metadata,
+ bool exports_symbols) {
ASTContext &ast = getASTContext();
if (decl_ctx == nullptr)
@@ -1787,8 +1788,8 @@ bool TypeSystemClang::RecordHasFields(const RecordDecl *record_decl) {
// -flimit-debug-info instead of just seeing nothing if this is a base class
// (since we were hiding empty base classes), or nothing when you turn open
// an valiable whose type was incomplete.
- ClangASTMetadata *meta_data = GetMetadata(record_decl);
- if (meta_data && meta_data->IsForcefullyCompleted())
+ if (std::optional<ClangASTMetadata> meta_data = GetMetadata(record_decl);
+ meta_data && meta_data->IsForcefullyCompleted())
return true;
return false;
@@ -1799,7 +1800,7 @@ bool TypeSystemClang::RecordHasFields(const RecordDecl *record_decl) {
CompilerType TypeSystemClang::CreateObjCClass(
llvm::StringRef name, clang::DeclContext *decl_ctx,
OptionalClangModuleID owning_module, bool isForwardDecl, bool isInternal,
- ClangASTMetadata *metadata) {
+ std::optional<ClangASTMetadata> metadata) {
ASTContext &ast = getASTContext();
assert(!name.empty());
if (!decl_ctx)
@@ -2465,27 +2466,31 @@ void TypeSystemClang::SetMetadataAsUserID(const clang::Type *type,
}
void TypeSystemClang::SetMetadata(const clang::Decl *object,
- ClangASTMetadata &metadata) {
+ ClangASTMetadata metadata) {
m_decl_metadata[object] = metadata;
}
void TypeSystemClang::SetMetadata(const clang::Type *object,
- ClangASTMetadata &metadata) {
+ ClangASTMetadata metadata) {
m_type_metadata[object] = metadata;
}
-ClangASTMetadata *TypeSystemClang::GetMetadata(const clang::Decl *object) {
+std::optional<ClangASTMetadata>
+TypeSystemClang::GetMetadata(const clang::Decl *object) {
auto It = m_decl_metadata.find(object);
if (It != m_decl_metadata.end())
- return &It->second;
- return nullptr;
+ return It->second;
+
+ return std::nullopt;
}
-ClangASTMetadata *TypeSystemClang::GetMetadata(const clang::Type *object) {
+std::optional<ClangASTMetadata>
+TypeSystemClang::GetMetadata(const clang::Type *object) {
auto It = m_type_metadata.find(object);
if (It != m_type_metadata.end())
- return &It->second;
- return nullptr;
+ return It->second;
+
+ return std::nullopt;
}
void TypeSystemClang::SetCXXRecordDeclAccess(const clang::CXXRecordDecl *object,
@@ -2934,9 +2939,10 @@ bool TypeSystemClang::IsRuntimeGeneratedType(
clang::ObjCInterfaceDecl *result_iface_decl =
llvm::dyn_cast<clang::ObjCInterfaceDecl>(decl_ctx);
- ClangASTMetadata *ast_metadata = GetMetadata(result_iface_decl);
+ std::optional<ClangASTMetadata> ast_metadata = GetMetadata(result_iface_decl);
if (!ast_metadata)
return false;
+
return (ast_metadata->GetISAPtr() != 0);
}
@@ -3622,8 +3628,8 @@ bool TypeSystemClang::IsPossibleDynamicType(lldb::opaque_compiler_type_t type,
if (is_complete)
success = cxx_record_decl->isDynamicClass();
else {
- ClangASTMetadata *metadata = GetMetadata(cxx_record_decl);
- if (metadata)
+ if (std::optional<ClangASTMetadata> metadata =
+ GetMetadata(cxx_record_decl))
success = metadata->GetIsDynamicCXXType();
else {
is_complete = GetType(pointee_qual_type).GetCompleteType();
@@ -5326,7 +5332,8 @@ GetDynamicArrayInfo(TypeSystemClang &ast, SymbolFile *sym_file,
clang::QualType qual_type,
const ExecutionContext *exe_ctx) {
if (qual_type->isIncompleteArrayType())
- if (auto *metadata = ast.GetMetadata(qual_type.getTypePtr()))
+ if (std::optional<ClangASTMetadata> metadata =
+ ast.GetMetadata(qual_type.getTypePtr()))
return sym_file->GetDynamicArrayInfoForUID(metadata->GetUserID(),
exe_ctx);
return std::nullopt;
@@ -7980,7 +7987,7 @@ bool TypeSystemClang::AddObjCClassProperty(
const CompilerType &type, const char *property_name,
const CompilerType &property_clang_type, clang::ObjCIvarDecl *ivar_decl,
const char *property_setter_name, const char *property_getter_name,
- uint32_t property_attributes, ClangASTMetadata *metadata) {
+ uint32_t property_attributes, ClangASTMetadata metadata) {
if (!type || !property_clang_type.IsValid() || property_name == nullptr ||
property_name[0] == '\0')
return false;
@@ -8024,8 +8031,7 @@ bool TypeSystemClang::AddObjCClassProperty(
if (!property_decl)
return false;
- if (metadata)
- ast->SetMetadata(property_decl, *metadata);
+ ast->SetMetadata(property_decl, metadata);
class_interface_decl->addDecl(property_decl);
@@ -8117,8 +8123,7 @@ bool TypeSystemClang::AddObjCClassProperty(
SetMemberOwningModule(getter, class_interface_decl);
if (getter) {
- if (metadata)
- ast->SetMetadata(getter, *metadata);
+ ast->SetMetadata(getter, metadata);
getter->setMethodParams(clang_ast, llvm::ArrayRef<clang::ParmVarDecl *>(),
llvm::ArrayRef<clang::SourceLocation>());
@@ -8160,8 +8165,7 @@ bool TypeSystemClang::AddObjCClassProperty(
SetMemberOwningModule(setter, class_interface_decl);
if (setter) {
- if (metadata)
- ast->SetMetadata(setter, *metadata);
+ ast->SetMetadata(setter, metadata);
llvm::SmallVector<clang::ParmVarDecl *, 1> params;
params.push_back(clang::ParmVarDecl::Create(
@@ -8868,8 +8872,7 @@ void TypeSystemClang::DumpTypeDescription(lldb::opaque_compiler_type_t type,
CompilerType ct(weak_from_this(), type);
const clang::Type *clang_type = ClangUtil::GetQualType(ct).getTypePtr();
- ClangASTMetadata *metadata = GetMetadata(clang_type);
- if (metadata) {
+ if (std::optional<ClangASTMetadata> metadata = GetMetadata(clang_type)) {
metadata->Dump(&s);
}
}
@@ -9490,7 +9493,7 @@ bool TypeSystemClang::DeclContextIsClassMethod(void *opaque_decl_ctx) {
return true;
} else if (clang::FunctionDecl *fun_decl =
llvm::dyn_cast<clang::FunctionDecl>(decl_ctx)) {
- if (ClangASTMetadata *metadata = GetMetadata(fun_decl))
+ if (std::optional<ClangASTMetadata> metadata = GetMetadata(fun_decl))
return metadata->HasObjectPtr();
}
@@ -9543,7 +9546,7 @@ TypeSystemClang::DeclContextGetLanguage(void *opaque_decl_ctx) {
} else if (llvm::isa<clang::CXXMethodDecl>(decl_ctx)) {
return eLanguageTypeC_plus_plus;
} else if (auto *fun_decl = llvm::dyn_cast<clang::FunctionDecl>(decl_ctx)) {
- if (ClangASTMetadata *metadata = GetMetadata(fun_decl))
+ if (std::optional<ClangASTMetadata> metadata = GetMetadata(fun_decl))
return metadata->GetObjectPtrLanguage();
}
@@ -9593,7 +9596,7 @@ TypeSystemClang::DeclContextGetAsNamespaceDecl(const CompilerDeclContext &dc) {
return nullptr;
}
-ClangASTMetadata *
+std::optional<ClangASTMetadata>
TypeSystemClang::DeclContextGetMetaData(const CompilerDeclContext &dc,
const Decl *object) {
TypeSystemClang *ast = llvm::cast<TypeSystemClang>(dc.GetTypeSystem());
@@ -9827,8 +9830,7 @@ bool TypeSystemClang::IsForcefullyCompleted(lldb::opaque_compiler_type_t type) {
if (record_type) {
const clang::RecordDecl *record_decl = record_type->getDecl();
assert(record_decl);
- ClangASTMetadata *metadata = GetMetadata(record_decl);
- if (metadata)
+ if (std::optional<ClangASTMetadata> metadata = GetMetadata(record_decl))
return metadata->IsForcefullyCompleted();
}
}
@@ -9838,11 +9840,13 @@ bool TypeSystemClang::IsForcefullyCompleted(lldb::opaque_compiler_type_t type) {
bool TypeSystemClang::SetDeclIsForcefullyCompleted(const clang::TagDecl *td) {
if (td == nullptr)
return false;
- ClangASTMetadata *metadata = GetMetadata(td);
- if (metadata == nullptr)
+ std::optional<ClangASTMetadata> metadata = GetMetadata(td);
+ if (!metadata)
return false;
m_has_forcefully_completed_types = true;
metadata->SetIsForcefullyCompleted();
+ SetMetadata(td, *metadata);
+
return true;
}
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
index 56a5c0a..7893527 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
@@ -29,6 +29,7 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SmallVector.h"
+#include "Plugins/ExpressionParser/Clang/ClangASTMetadata.h"
#include "Plugins/ExpressionParser/Clang/ClangPersistentVariables.h"
#include "lldb/Expression/ExpressionVariable.h"
#include "lldb/Symbol/CompilerType.h"
@@ -50,7 +51,6 @@ class ModuleMap;
namespace lldb_private {
-class ClangASTMetadata;
class ClangASTSource;
class Declaration;
@@ -191,11 +191,11 @@ public:
void SetMetadataAsUserID(const clang::Decl *decl, lldb::user_id_t user_id);
void SetMetadataAsUserID(const clang::Type *type, lldb::user_id_t user_id);
- void SetMetadata(const clang::Decl *object, ClangASTMetadata &meta_data);
+ void SetMetadata(const clang::Decl *object, ClangASTMetadata meta_data);
- void SetMetadata(const clang::Type *object, ClangASTMetadata &meta_data);
- ClangASTMetadata *GetMetadata(const clang::Decl *object);
- ClangASTMetadata *GetMetadata(const clang::Type *object);
+ void SetMetadata(const clang::Type *object, ClangASTMetadata meta_data);
+ std::optional<ClangASTMetadata> GetMetadata(const clang::Decl *object);
+ std::optional<ClangASTMetadata> GetMetadata(const clang::Type *object);
void SetCXXRecordDeclAccess(const clang::CXXRecordDecl *object,
clang::AccessSpecifier access);
@@ -325,13 +325,13 @@ public:
bool is_framework = false,
bool is_explicit = false);
- CompilerType CreateRecordType(clang::DeclContext *decl_ctx,
- OptionalClangModuleID owning_module,
- lldb::AccessType access_type,
- llvm::StringRef name, int kind,
- lldb::LanguageType language,
- ClangASTMetadata *metadata = nullptr,
- bool exports_symbols = false);
+ CompilerType
+ CreateRecordType(clang::DeclContext *decl_ctx,
+ OptionalClangModuleID owning_module,
+ lldb::AccessType access_type, llvm::StringRef name, int kind,
+ lldb::LanguageType language,
+ std::optional<ClangASTMetadata> metadata = std::nullopt,
+ bool exports_symbols = false);
class TemplateParameterInfos {
public:
@@ -455,11 +455,11 @@ public:
bool BaseSpecifierIsEmpty(const clang::CXXBaseSpecifier *b);
- CompilerType CreateObjCClass(llvm::StringRef name,
- clang::DeclContext *decl_ctx,
- OptionalClangModuleID owning_module,
- bool isForwardDecl, bool isInternal,
- ClangASTMetadata *metadata = nullptr);
+ CompilerType
+ CreateObjCClass(llvm::StringRef name, clang::DeclContext *decl_ctx,
+ OptionalClangModuleID owning_module, bool isForwardDecl,
+ bool isInternal,
+ std::optional<ClangASTMetadata> metadata = std::nullopt);
// Returns a mask containing bits from the TypeSystemClang::eTypeXXX
// enumerations
@@ -616,8 +616,9 @@ public:
static clang::NamespaceDecl *
DeclContextGetAsNamespaceDecl(const CompilerDeclContext &dc);
- static ClangASTMetadata *DeclContextGetMetaData(const CompilerDeclContext &dc,
- const clang::Decl *object);
+ static std::optional<ClangASTMetadata>
+ DeclContextGetMetaData(const CompilerDeclContext &dc,
+ const clang::Decl *object);
static clang::ASTContext *
DeclContextGetTypeSystemClang(const CompilerDeclContext &dc);
@@ -1004,7 +1005,7 @@ public:
const char *property_setter_name,
const char *property_getter_name,
uint32_t property_attributes,
- ClangASTMetadata *metadata);
+ ClangASTMetadata metadata);
static clang::ObjCMethodDecl *AddMethodToObjCObjectType(
const CompilerType &type,
diff --git a/lldb/source/Target/CMakeLists.txt b/lldb/source/Target/CMakeLists.txt
index 4a51310..a42c44b 100644
--- a/lldb/source/Target/CMakeLists.txt
+++ b/lldb/source/Target/CMakeLists.txt
@@ -59,6 +59,7 @@ add_lldb_library(lldbTarget
ThreadPlanCallOnFunctionExit.cpp
ThreadPlanCallUserExpression.cpp
ThreadPlanRunToAddress.cpp
+ ThreadPlanSingleThreadTimeout.cpp
ThreadPlanShouldStopHere.cpp
ThreadPlanStepInRange.cpp
ThreadPlanStepInstruction.cpp
diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp
index 728f9aa..e3c4f2e 100644
--- a/lldb/source/Target/Process.cpp
+++ b/lldb/source/Target/Process.cpp
@@ -473,7 +473,8 @@ Process::Process(lldb::TargetSP target_sp, ListenerSP listener_sp,
m_memory_cache(*this), m_allocated_memory_cache(*this),
m_should_detach(false), m_next_event_action_up(), m_public_run_lock(),
m_private_run_lock(), m_currently_handling_do_on_removals(false),
- m_resume_requested(false), m_finalizing(false), m_destructing(false),
+ m_resume_requested(false), m_interrupt_tid(LLDB_INVALID_THREAD_ID),
+ m_finalizing(false), m_destructing(false),
m_clear_thread_plans_on_stop(false), m_force_next_event_delivery(false),
m_last_broadcast_state(eStateInvalid), m_destroy_in_process(false),
m_can_interpret_function_calls(false), m_run_thread_plan_lock(),
@@ -895,6 +896,7 @@ bool Process::HandleProcessStateChangedEvent(
case eStopReasonThreadExiting:
case eStopReasonInstrumentation:
case eStopReasonProcessorTrace:
+ case eStopReasonInterrupt:
if (!other_thread)
other_thread = thread;
break;
@@ -3873,7 +3875,11 @@ void Process::ControlPrivateStateThread(uint32_t signal) {
}
}
-void Process::SendAsyncInterrupt() {
+void Process::SendAsyncInterrupt(Thread *thread) {
+ if (thread != nullptr)
+ m_interrupt_tid = thread->GetProtocolID();
+ else
+ m_interrupt_tid = LLDB_INVALID_THREAD_ID;
if (PrivateStateThreadIsValid())
m_private_state_broadcaster.BroadcastEvent(Process::eBroadcastBitInterrupt,
nullptr);
@@ -4099,9 +4105,14 @@ thread_result_t Process::RunPrivateStateThread(bool is_secondary_thread) {
if (interrupt_requested) {
if (StateIsStoppedState(internal_state, true)) {
- // We requested the interrupt, so mark this as such in the stop event
- // so clients can tell an interrupted process from a natural stop
- ProcessEventData::SetInterruptedInEvent(event_sp.get(), true);
+ // Only mark interrupt event if it is not thread specific async
+ // interrupt.
+ if (m_interrupt_tid == LLDB_INVALID_THREAD_ID) {
+ // We requested the interrupt, so mark this as such in the stop
+ // event so clients can tell an interrupted process from a natural
+ // stop
+ ProcessEventData::SetInterruptedInEvent(event_sp.get(), true);
+ }
interrupt_requested = false;
} else if (log) {
LLDB_LOGF(log,
diff --git a/lldb/source/Target/StopInfo.cpp b/lldb/source/Target/StopInfo.cpp
index 95f7805..bd7032b 100644
--- a/lldb/source/Target/StopInfo.cpp
+++ b/lldb/source/Target/StopInfo.cpp
@@ -1125,6 +1125,29 @@ private:
std::optional<int> m_code;
};
+// StopInfoInterrupt
+
+class StopInfoInterrupt : public StopInfo {
+public:
+ StopInfoInterrupt(Thread &thread, int signo, const char *description)
+ : StopInfo(thread, signo) {
+ SetDescription(description);
+ }
+
+ ~StopInfoInterrupt() override = default;
+
+ StopReason GetStopReason() const override {
+ return lldb::eStopReasonInterrupt;
+ }
+
+ const char *GetDescription() override {
+ if (m_description.empty()) {
+ m_description = "async interrupt";
+ }
+ return m_description.c_str();
+ }
+};
+
// StopInfoTrace
class StopInfoTrace : public StopInfo {
@@ -1390,6 +1413,11 @@ StopInfoSP StopInfo::CreateStopReasonWithSignal(Thread &thread, int signo,
return StopInfoSP(new StopInfoUnixSignal(thread, signo, description, code));
}
+StopInfoSP StopInfo::CreateStopReasonWithInterrupt(Thread &thread, int signo,
+ const char *description) {
+ return StopInfoSP(new StopInfoInterrupt(thread, signo, description));
+}
+
StopInfoSP StopInfo::CreateStopReasonToTrace(Thread &thread) {
return StopInfoSP(new StopInfoTrace(thread));
}
diff --git a/lldb/source/Target/TargetProperties.td b/lldb/source/Target/TargetProperties.td
index 4404a45..ef53867 100644
--- a/lldb/source/Target/TargetProperties.td
+++ b/lldb/source/Target/TargetProperties.td
@@ -313,6 +313,10 @@ let Definition = "thread" in {
def MaxBacktraceDepth: Property<"max-backtrace-depth", "UInt64">,
DefaultUnsignedValue<600000>,
Desc<"Maximum number of frames to backtrace.">;
+ def SingleThreadPlanTimeout: Property<"single-thread-plan-timeout", "UInt64">,
+ Global,
+ DefaultUnsignedValue<1000>,
+ Desc<"The time in milliseconds to wait for single thread ThreadPlan to move forward before resuming all threads to resolve any potential deadlock. Specify value 0 to disable timeout.">;
}
let Definition = "language" in {
diff --git a/lldb/source/Target/Thread.cpp b/lldb/source/Target/Thread.cpp
index a62074b..74d1a26 100644
--- a/lldb/source/Target/Thread.cpp
+++ b/lldb/source/Target/Thread.cpp
@@ -143,6 +143,12 @@ uint64_t ThreadProperties::GetMaxBacktraceDepth() const {
idx, g_thread_properties[idx].default_uint_value);
}
+uint64_t ThreadProperties::GetSingleThreadPlanTimeout() const {
+ const uint32_t idx = ePropertySingleThreadPlanTimeout;
+ return GetPropertyAtIndexAs<uint64_t>(
+ idx, g_thread_properties[idx].default_uint_value);
+}
+
// Thread Event Data
llvm::StringRef Thread::ThreadEventData::GetFlavorString() {
@@ -813,12 +819,17 @@ bool Thread::ShouldStop(Event *event_ptr) {
// decide whether they still need to do more work.
bool done_processing_current_plan = false;
-
if (!current_plan->PlanExplainsStop(event_ptr)) {
if (current_plan->TracerExplainsStop()) {
done_processing_current_plan = true;
should_stop = false;
} else {
+ // Leaf plan that does not explain the stop should be popped.
+ // The plan should be push itself later again before resuming to stay
+ // as leaf.
+ if (current_plan->IsLeafPlan())
+ PopPlan();
+
// If the current plan doesn't explain the stop, then find one that does
// and let it handle the situation.
ThreadPlan *plan_ptr = current_plan;
@@ -1715,6 +1726,8 @@ std::string Thread::StopReasonAsString(lldb::StopReason reason) {
return "instrumentation break";
case eStopReasonProcessorTrace:
return "processor trace";
+ case eStopReasonInterrupt:
+ return "async interrupt";
}
return "StopReason = " + std::to_string(reason);
diff --git a/lldb/source/Target/ThreadPlan.cpp b/lldb/source/Target/ThreadPlan.cpp
index 7927fc3..f05e1fa 100644
--- a/lldb/source/Target/ThreadPlan.cpp
+++ b/lldb/source/Target/ThreadPlan.cpp
@@ -174,6 +174,7 @@ bool ThreadPlan::IsUsuallyUnexplainedStopReason(lldb::StopReason reason) {
case eStopReasonFork:
case eStopReasonVFork:
case eStopReasonVForkDone:
+ case eStopReasonInterrupt:
return true;
default:
return false;
diff --git a/lldb/source/Target/ThreadPlanSingleThreadTimeout.cpp b/lldb/source/Target/ThreadPlanSingleThreadTimeout.cpp
new file mode 100644
index 0000000..40a8af8
--- /dev/null
+++ b/lldb/source/Target/ThreadPlanSingleThreadTimeout.cpp
@@ -0,0 +1,250 @@
+//===-- ThreadPlanStepOverRange.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "lldb/Target/ThreadPlanSingleThreadTimeout.h"
+#include "lldb/Symbol/Block.h"
+#include "lldb/Symbol/CompileUnit.h"
+#include "lldb/Symbol/Function.h"
+#include "lldb/Symbol/LineTable.h"
+#include "lldb/Target/Process.h"
+#include "lldb/Target/RegisterContext.h"
+#include "lldb/Target/Target.h"
+#include "lldb/Target/Thread.h"
+#include "lldb/Target/ThreadPlanStepOut.h"
+#include "lldb/Target/ThreadPlanStepThrough.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/Log.h"
+#include "lldb/Utility/Stream.h"
+
+using namespace lldb_private;
+using namespace lldb;
+
+ThreadPlanSingleThreadTimeout::ThreadPlanSingleThreadTimeout(
+ Thread &thread, TimeoutInfoSP &info)
+ : ThreadPlan(ThreadPlan::eKindSingleThreadTimeout, "Single thread timeout",
+ thread, eVoteNo, eVoteNoOpinion),
+ m_info(info), m_state(State::WaitTimeout) {
+ // TODO: reuse m_timer_thread without recreation.
+ m_timer_thread = std::thread(TimeoutThreadFunc, this);
+ m_info->m_isAlive = true;
+ m_state = m_info->m_last_state;
+}
+
+ThreadPlanSingleThreadTimeout::~ThreadPlanSingleThreadTimeout() {
+ m_info->m_isAlive = false;
+}
+
+uint64_t ThreadPlanSingleThreadTimeout::GetRemainingTimeoutMilliSeconds() {
+ uint64_t timeout_in_ms = GetThread().GetSingleThreadPlanTimeout();
+ std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now();
+ std::chrono::milliseconds duration_ms =
+ std::chrono::duration_cast<std::chrono::milliseconds>(now -
+ m_timeout_start);
+ return timeout_in_ms - duration_ms.count();
+}
+
+void ThreadPlanSingleThreadTimeout::GetDescription(
+ Stream *s, lldb::DescriptionLevel level) {
+ s->Printf("Single thread timeout, state(%s), remaining %" PRIu64 " ms",
+ StateToString(m_state).c_str(), GetRemainingTimeoutMilliSeconds());
+}
+
+std::string ThreadPlanSingleThreadTimeout::StateToString(State state) {
+ switch (state) {
+ case State::WaitTimeout:
+ return "WaitTimeout";
+ case State::AsyncInterrupt:
+ return "AsyncInterrupt";
+ case State::Done:
+ return "Done";
+ }
+}
+
+void ThreadPlanSingleThreadTimeout::PushNewWithTimeout(Thread &thread,
+ TimeoutInfoSP &info) {
+ uint64_t timeout_in_ms = thread.GetSingleThreadPlanTimeout();
+ if (timeout_in_ms == 0)
+ return;
+
+ // Do not create timeout if we are not stopping other threads.
+ if (!thread.GetCurrentPlan()->StopOthers())
+ return;
+
+ auto timeout_plan = new ThreadPlanSingleThreadTimeout(thread, info);
+ ThreadPlanSP thread_plan_sp(timeout_plan);
+ auto status = thread.QueueThreadPlan(thread_plan_sp,
+ /*abort_other_plans*/ false);
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(
+ log,
+ "ThreadPlanSingleThreadTimeout pushing a brand new one with %" PRIu64
+ " ms",
+ timeout_in_ms);
+}
+
+void ThreadPlanSingleThreadTimeout::ResumeFromPrevState(Thread &thread,
+ TimeoutInfoSP &info) {
+ uint64_t timeout_in_ms = thread.GetSingleThreadPlanTimeout();
+ if (timeout_in_ms == 0)
+ return;
+
+ // There is already an instance alive.
+ if (info->m_isAlive)
+ return;
+
+ // Do not create timeout if we are not stopping other threads.
+ if (!thread.GetCurrentPlan()->StopOthers())
+ return;
+
+ auto timeout_plan = new ThreadPlanSingleThreadTimeout(thread, info);
+ ThreadPlanSP thread_plan_sp(timeout_plan);
+ auto status = thread.QueueThreadPlan(thread_plan_sp,
+ /*abort_other_plans*/ false);
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(
+ log,
+ "ThreadPlanSingleThreadTimeout reset from previous state with %" PRIu64
+ " ms",
+ timeout_in_ms);
+}
+
+bool ThreadPlanSingleThreadTimeout::WillStop() {
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(log, "ThreadPlanSingleThreadTimeout::WillStop().");
+
+ // Reset the state during stop.
+ m_info->m_last_state = State::WaitTimeout;
+ return true;
+}
+
+void ThreadPlanSingleThreadTimeout::DidPop() {
+ Log *log = GetLog(LLDBLog::Step);
+ {
+ std::lock_guard<std::mutex> lock(m_mutex);
+ LLDB_LOGF(log, "ThreadPlanSingleThreadTimeout::DidPop().");
+ // Tell timer thread to exit.
+ m_info->m_isAlive = false;
+ }
+ m_wakeup_cv.notify_one();
+ // Wait for timer thread to exit.
+ m_timer_thread.join();
+}
+
+bool ThreadPlanSingleThreadTimeout::DoPlanExplainsStop(Event *event_ptr) {
+ bool is_timeout_interrupt = IsTimeoutAsyncInterrupt(event_ptr);
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(log,
+ "ThreadPlanSingleThreadTimeout::DoPlanExplainsStop() returns %d. "
+ "%" PRIu64 " ms remaining.",
+ is_timeout_interrupt, GetRemainingTimeoutMilliSeconds());
+ return is_timeout_interrupt;
+}
+
+lldb::StateType ThreadPlanSingleThreadTimeout::GetPlanRunState() {
+ return GetPreviousPlan()->GetPlanRunState();
+}
+
+void ThreadPlanSingleThreadTimeout::TimeoutThreadFunc(
+ ThreadPlanSingleThreadTimeout *self) {
+ std::unique_lock<std::mutex> lock(self->m_mutex);
+ uint64_t timeout_in_ms = self->GetThread().GetSingleThreadPlanTimeout();
+ // The thread should wakeup either when timeout or
+ // ThreadPlanSingleThreadTimeout has been popped (not alive).
+ Log *log = GetLog(LLDBLog::Step);
+ self->m_timeout_start = std::chrono::steady_clock::now();
+ LLDB_LOGF(
+ log,
+ "ThreadPlanSingleThreadTimeout::TimeoutThreadFunc(), wait for %" PRIu64
+ " ms",
+ timeout_in_ms);
+ self->m_wakeup_cv.wait_for(lock, std::chrono::milliseconds(timeout_in_ms),
+ [self] { return !self->m_info->m_isAlive; });
+ LLDB_LOGF(log,
+ "ThreadPlanSingleThreadTimeout::TimeoutThreadFunc() wake up with "
+ "m_isAlive(%d).",
+ self->m_info->m_isAlive);
+ if (!self->m_info->m_isAlive)
+ return;
+
+ self->HandleTimeout();
+}
+
+bool ThreadPlanSingleThreadTimeout::MischiefManaged() {
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(log, "ThreadPlanSingleThreadTimeout::MischiefManaged() called.");
+ // Need to reset timer on each internal stop/execution progress.
+ return true;
+}
+
+bool ThreadPlanSingleThreadTimeout::ShouldStop(Event *event_ptr) {
+ return HandleEvent(event_ptr);
+}
+
+void ThreadPlanSingleThreadTimeout::SetStopOthers(bool new_value) {
+ // Note: this assumes that the SingleThreadTimeout plan is always going to be
+ // pushed on behalf of the plan directly above it.
+ GetPreviousPlan()->SetStopOthers(new_value);
+}
+
+bool ThreadPlanSingleThreadTimeout::StopOthers() {
+ if (m_state == State::Done)
+ return false;
+ else
+ return GetPreviousPlan()->StopOthers();
+}
+
+bool ThreadPlanSingleThreadTimeout::IsTimeoutAsyncInterrupt(Event *event_ptr) {
+ lldb::StateType stop_state =
+ Process::ProcessEventData::GetStateFromEvent(event_ptr);
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(log,
+ "ThreadPlanSingleThreadTimeout::IsTimeoutAsyncInterrupt(): got "
+ "event: %s.",
+ StateAsCString(stop_state));
+
+ lldb::StopInfoSP stop_info = GetThread().GetStopInfo();
+ return (m_state == State::AsyncInterrupt &&
+ stop_state == lldb::eStateStopped && stop_info &&
+ stop_info->GetStopReason() == lldb::eStopReasonInterrupt);
+}
+
+bool ThreadPlanSingleThreadTimeout::HandleEvent(Event *event_ptr) {
+ if (IsTimeoutAsyncInterrupt(event_ptr)) {
+ Log *log = GetLog(LLDBLog::Step);
+ if (Process::ProcessEventData::GetRestartedFromEvent(event_ptr)) {
+ // If we were restarted, we just need to go back up to fetch
+ // another event.
+ LLDB_LOGF(log,
+ "ThreadPlanSingleThreadTimeout::HandleEvent(): Got a stop and "
+ "restart, so we'll continue waiting.");
+
+ } else {
+ LLDB_LOGF(
+ log,
+ "ThreadPlanSingleThreadTimeout::HandleEvent(): Got async interrupt "
+ ", so we will resume all threads.");
+ GetThread().GetCurrentPlan()->SetStopOthers(false);
+ GetPreviousPlan()->SetStopOthers(false);
+ m_state = State::Done;
+ }
+ }
+ // Should not report stop.
+ return false;
+}
+
+void ThreadPlanSingleThreadTimeout::HandleTimeout() {
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(
+ log,
+ "ThreadPlanSingleThreadTimeout::HandleTimeout() send async interrupt.");
+ m_state = State::AsyncInterrupt;
+
+ // Private state thread will only send async interrupt
+ // in running state so no need to check state here.
+ m_process.SendAsyncInterrupt(&GetThread());
+}
diff --git a/lldb/source/Target/ThreadPlanStepInRange.cpp b/lldb/source/Target/ThreadPlanStepInRange.cpp
index 17f2100..567dcc2 100644
--- a/lldb/source/Target/ThreadPlanStepInRange.cpp
+++ b/lldb/source/Target/ThreadPlanStepInRange.cpp
@@ -134,6 +134,7 @@ bool ThreadPlanStepInRange::ShouldStop(Event *event_ptr) {
GetTarget().GetArchitecture().GetAddressByteSize());
LLDB_LOGF(log, "ThreadPlanStepInRange reached %s.", s.GetData());
}
+ ClearNextBranchBreakpointExplainedStop();
if (IsPlanComplete())
return true;
diff --git a/lldb/source/Target/ThreadPlanStepOverRange.cpp b/lldb/source/Target/ThreadPlanStepOverRange.cpp
index abe4d34..934f23b 100644
--- a/lldb/source/Target/ThreadPlanStepOverRange.cpp
+++ b/lldb/source/Target/ThreadPlanStepOverRange.cpp
@@ -15,6 +15,7 @@
#include "lldb/Target/RegisterContext.h"
#include "lldb/Target/Target.h"
#include "lldb/Target/Thread.h"
+#include "lldb/Target/ThreadPlanSingleThreadTimeout.h"
#include "lldb/Target/ThreadPlanStepOut.h"
#include "lldb/Target/ThreadPlanStepThrough.h"
#include "lldb/Utility/LLDBLog.h"
@@ -36,7 +37,8 @@ ThreadPlanStepOverRange::ThreadPlanStepOverRange(
: ThreadPlanStepRange(ThreadPlan::eKindStepOverRange,
"Step range stepping over", thread, range,
addr_context, stop_others),
- ThreadPlanShouldStopHere(this), m_first_resume(true) {
+ ThreadPlanShouldStopHere(this), TimeoutResumeAll(thread),
+ m_first_resume(true), m_run_mode(stop_others) {
SetFlagsToDefault();
SetupAvoidNoDebug(step_out_avoids_code_without_debug_info);
}
@@ -124,6 +126,11 @@ bool ThreadPlanStepOverRange::IsEquivalentContext(
return m_addr_context.symbol && m_addr_context.symbol == context.symbol;
}
+void ThreadPlanStepOverRange::SetStopOthers(bool stop_others) {
+ if (!stop_others)
+ m_stop_others = RunMode::eAllThreads;
+}
+
bool ThreadPlanStepOverRange::ShouldStop(Event *event_ptr) {
Log *log = GetLog(LLDBLog::Step);
Thread &thread = GetThread();
@@ -134,6 +141,7 @@ bool ThreadPlanStepOverRange::ShouldStop(Event *event_ptr) {
GetTarget().GetArchitecture().GetAddressByteSize());
LLDB_LOGF(log, "ThreadPlanStepOverRange reached %s.", s.GetData());
}
+ ClearNextBranchBreakpointExplainedStop();
// If we're out of the range but in the same frame or in our caller's frame
// then we should stop. When stepping out we only stop others if we are
@@ -141,6 +149,8 @@ bool ThreadPlanStepOverRange::ShouldStop(Event *event_ptr) {
bool stop_others = (m_stop_others == lldb::eOnlyThisThread);
ThreadPlanSP new_plan_sp;
FrameComparison frame_order = CompareCurrentFrameToStartFrame();
+ LLDB_LOGF(log, "ThreadPlanStepOverRange compare frame result: %d.",
+ frame_order);
if (frame_order == eFrameCompareOlder) {
// If we're in an older frame then we should stop.
@@ -337,6 +347,12 @@ bool ThreadPlanStepOverRange::ShouldStop(Event *event_ptr) {
return false;
}
+void ThreadPlanStepOverRange::DidPush() {
+ ThreadPlanStepRange::DidPush();
+ if (m_run_mode == lldb::eOnlyThisThread && IsControllingPlan())
+ PushNewTimeout();
+}
+
bool ThreadPlanStepOverRange::DoPlanExplainsStop(Event *event_ptr) {
// For crashes, breakpoint hits, signals, etc, let the base plan (or some
// plan above us) handle the stop. That way the user can see the stop, step
@@ -414,6 +430,7 @@ bool ThreadPlanStepOverRange::DoWillResume(lldb::StateType resume_state,
}
}
}
-
+ if (m_run_mode == lldb::eOnlyThisThread && IsControllingPlan())
+ ResumeWithTimeout();
return true;
}
diff --git a/lldb/source/Target/ThreadPlanStepRange.cpp b/lldb/source/Target/ThreadPlanStepRange.cpp
index 801856b..3c82505 100644
--- a/lldb/source/Target/ThreadPlanStepRange.cpp
+++ b/lldb/source/Target/ThreadPlanStepRange.cpp
@@ -293,6 +293,20 @@ InstructionList *ThreadPlanStepRange::GetInstructionsForAddress(
return nullptr;
}
+bool ThreadPlanStepRange::IsNextBranchBreakpointStop(StopInfoSP stop_info_sp) {
+ if (!m_next_branch_bp_sp)
+ return false;
+
+ break_id_t bp_site_id = stop_info_sp->GetValue();
+ BreakpointSiteSP bp_site_sp =
+ m_process.GetBreakpointSiteList().FindByID(bp_site_id);
+ if (!bp_site_sp)
+ return false;
+ else if (!bp_site_sp->IsBreakpointAtThisSite(m_next_branch_bp_sp->GetID()))
+ return false;
+ return true;
+}
+
void ThreadPlanStepRange::ClearNextBranchBreakpoint() {
if (m_next_branch_bp_sp) {
Log *log = GetLog(LLDBLog::Step);
@@ -305,6 +319,11 @@ void ThreadPlanStepRange::ClearNextBranchBreakpoint() {
}
}
+void ThreadPlanStepRange::ClearNextBranchBreakpointExplainedStop() {
+ if (IsNextBranchBreakpointStop(GetPrivateStopInfo()))
+ ClearNextBranchBreakpoint();
+}
+
bool ThreadPlanStepRange::SetNextBranchBreakpoint() {
if (m_next_branch_bp_sp)
return true;
@@ -347,7 +366,9 @@ bool ThreadPlanStepRange::SetNextBranchBreakpoint() {
run_to_address =
instructions->GetInstructionAtIndex(branch_index)->GetAddress();
}
-
+ if (branch_index == pc_index)
+ LLDB_LOGF(log, "ThreadPlanStepRange::SetNextBranchBreakpoint - skipping "
+ "because current is branch instruction");
if (run_to_address.IsValid()) {
const bool is_internal = true;
m_next_branch_bp_sp =
@@ -381,15 +402,16 @@ bool ThreadPlanStepRange::SetNextBranchBreakpoint() {
return true;
} else
return false;
- }
+ } else
+ LLDB_LOGF(log, "ThreadPlanStepRange::SetNextBranchBreakpoint - skipping "
+ "invalid run_to_address");
}
return false;
}
bool ThreadPlanStepRange::NextRangeBreakpointExplainsStop(
lldb::StopInfoSP stop_info_sp) {
- Log *log = GetLog(LLDBLog::Step);
- if (!m_next_branch_bp_sp)
+ if (!IsNextBranchBreakpointStop(stop_info_sp))
return false;
break_id_t bp_site_id = stop_info_sp->GetValue();
@@ -397,30 +419,27 @@ bool ThreadPlanStepRange::NextRangeBreakpointExplainsStop(
m_process.GetBreakpointSiteList().FindByID(bp_site_id);
if (!bp_site_sp)
return false;
- else if (!bp_site_sp->IsBreakpointAtThisSite(m_next_branch_bp_sp->GetID()))
- return false;
- else {
- // If we've hit the next branch breakpoint, then clear it.
- size_t num_constituents = bp_site_sp->GetNumberOfConstituents();
- bool explains_stop = true;
- // If all the constituents are internal, then we are probably just stepping
- // over this range from multiple threads, or multiple frames, so we want to
- // continue. If one is not internal, then we should not explain the stop,
- // and let the user breakpoint handle the stop.
- for (size_t i = 0; i < num_constituents; i++) {
- if (!bp_site_sp->GetConstituentAtIndex(i)->GetBreakpoint().IsInternal()) {
- explains_stop = false;
- break;
- }
+
+ // If we've hit the next branch breakpoint, then clear it.
+ size_t num_constituents = bp_site_sp->GetNumberOfConstituents();
+ bool explains_stop = true;
+ // If all the constituents are internal, then we are probably just stepping
+ // over this range from multiple threads, or multiple frames, so we want to
+ // continue. If one is not internal, then we should not explain the stop,
+ // and let the user breakpoint handle the stop.
+ for (size_t i = 0; i < num_constituents; i++) {
+ if (!bp_site_sp->GetConstituentAtIndex(i)->GetBreakpoint().IsInternal()) {
+ explains_stop = false;
+ break;
}
- LLDB_LOGF(log,
- "ThreadPlanStepRange::NextRangeBreakpointExplainsStop - Hit "
- "next range breakpoint which has %" PRIu64
- " constituents - explains stop: %u.",
- (uint64_t)num_constituents, explains_stop);
- ClearNextBranchBreakpoint();
- return explains_stop;
}
+ Log *log = GetLog(LLDBLog::Step);
+ LLDB_LOGF(log,
+ "ThreadPlanStepRange::NextRangeBreakpointExplainsStop - Hit "
+ "next range breakpoint which has %" PRIu64
+ " constituents - explains stop: %u.",
+ (uint64_t)num_constituents, explains_stop);
+ return explains_stop;
}
bool ThreadPlanStepRange::WillStop() { return true; }
diff --git a/lldb/test/API/debuginfod/Normal/Makefile b/lldb/test/API/debuginfod/Normal/Makefile
new file mode 100644
index 0000000..54bd7ad
--- /dev/null
+++ b/lldb/test/API/debuginfod/Normal/Makefile
@@ -0,0 +1,19 @@
+C_SOURCES := main.c
+
+# For normal (non DWP) Debuginfod tests, we need:
+
+# * The full binary: a.out.unstripped
+# Produced by Makefile.rules with SAVE_FULL_DEBUG_BINARY set to YES and
+# SPLIT_DEBUG_SYMBOLS set to YES
+
+# * The stripped binary (a.out)
+# Produced by Makefile.rules with SPLIT_DEBUG_SYMBOLS set to YES
+
+# * The 'only-keep-debug' binary (a.out.debug)
+# Produced below
+
+SPLIT_DEBUG_SYMBOLS := YES
+SAVE_FULL_DEBUG_BINARY := YES
+GEN_GNU_BUILD_ID := YES
+
+include Makefile.rules
diff --git a/lldb/test/API/debuginfod/Normal/TestDebuginfod.py b/lldb/test/API/debuginfod/Normal/TestDebuginfod.py
new file mode 100644
index 0000000..1860c56
--- /dev/null
+++ b/lldb/test/API/debuginfod/Normal/TestDebuginfod.py
@@ -0,0 +1,186 @@
+import os
+import shutil
+import tempfile
+
+import lldb
+from lldbsuite.test.decorators import *
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+
+
+"""
+Test support for the DebugInfoD network symbol acquisition protocol.
+This one is for simple / no split-dwarf scenarios.
+
+For no-split-dwarf scenarios, there are 2 variations:
+1 - A stripped binary with it's corresponding unstripped binary:
+2 - A stripped binary with a corresponding --only-keep-debug symbols file
+"""
+
+
+class DebugInfodTests(TestBase):
+ # No need to try every flavor of debug inf.
+ NO_DEBUG_INFO_TESTCASE = True
+
+ @skipUnlessPlatform(["linux", "freebsd"])
+ def test_normal_no_symbols(self):
+ """
+ Validate behavior with no symbols or symbol locator.
+ ('baseline negative' behavior)
+ """
+ test_root = self.config_test(["a.out"])
+ self.try_breakpoint(False)
+
+ @skipUnlessPlatform(["linux", "freebsd"])
+ def test_normal_default(self):
+ """
+ Validate behavior with symbols, but no symbol locator.
+ ('baseline positive' behavior)
+ """
+ test_root = self.config_test(["a.out", "a.out.debug"])
+ self.try_breakpoint(True)
+
+ @skipIfCurlSupportMissing
+ @skipUnlessPlatform(["linux", "freebsd"])
+ def test_debuginfod_symbols(self):
+ """
+ Test behavior with the full binary available from Debuginfod as
+ 'debuginfo' from the plug-in.
+ """
+ test_root = self.config_test(["a.out"], "a.out.unstripped")
+ self.try_breakpoint(True)
+
+ @skipIfCurlSupportMissing
+ @skipUnlessPlatform(["linux", "freebsd"])
+ def test_debuginfod_executable(self):
+ """
+ Test behavior with the full binary available from Debuginfod as
+ 'executable' from the plug-in.
+ """
+ test_root = self.config_test(["a.out"], None, "a.out.unstripped")
+ self.try_breakpoint(True)
+
+ @skipIfCurlSupportMissing
+ @skipUnlessPlatform(["linux", "freebsd"])
+ def test_debuginfod_okd_symbols(self):
+ """
+ Test behavior with the 'only-keep-debug' symbols available from Debuginfod.
+ """
+ test_root = self.config_test(["a.out"], "a.out.debug")
+ self.try_breakpoint(True)
+
+ def try_breakpoint(self, should_have_loc):
+ """
+ This function creates a target from self.aout, sets a function-name
+ breakpoint, and checks to see if we have a file/line location,
+ as a way to validate that the symbols have been loaded.
+ should_have_loc specifies if we're testing that symbols have or
+ haven't been loaded.
+ """
+ target = self.dbg.CreateTarget(self.aout)
+ self.assertTrue(target and target.IsValid(), "Target is valid")
+
+ bp = target.BreakpointCreateByName("func")
+ self.assertTrue(bp and bp.IsValid(), "Breakpoint is valid")
+ self.assertEqual(bp.GetNumLocations(), 1)
+
+ loc = bp.GetLocationAtIndex(0)
+ self.assertTrue(loc and loc.IsValid(), "Location is valid")
+ addr = loc.GetAddress()
+ self.assertTrue(addr and addr.IsValid(), "Loc address is valid")
+ line_entry = addr.GetLineEntry()
+ self.assertEqual(
+ should_have_loc,
+ line_entry != None and line_entry.IsValid(),
+ "Loc line entry is valid",
+ )
+ if should_have_loc:
+ self.assertEqual(line_entry.GetLine(), 4)
+ self.assertEqual(
+ line_entry.GetFileSpec().GetFilename(),
+ self.main_source_file.GetFilename(),
+ )
+ self.dbg.DeleteTarget(target)
+ shutil.rmtree(self.tmp_dir)
+
+ def config_test(self, local_files, debuginfo=None, executable=None):
+ """
+ Set up a test with local_files[] copied to a different location
+ so that we control which files are, or are not, found in the file system.
+ Also, create a stand-alone file-system 'hosted' debuginfod server with the
+ provided debuginfo and executable files (if they exist)
+
+ Make the filesystem look like:
+
+ /tmp/<tmpdir>/test/[local_files]
+
+ /tmp/<tmpdir>/cache (for lldb to use as a temp cache)
+
+ /tmp/<tmpdir>/buildid/<uuid>/executable -> <executable>
+ /tmp/<tmpdir>/buildid/<uuid>/debuginfo -> <debuginfo>
+ Returns the /tmp/<tmpdir> path
+ """
+
+ self.build()
+
+ uuid = self.getUUID("a.out")
+ if not uuid:
+ self.fail("Could not get UUID for a.out")
+ return
+ self.main_source_file = lldb.SBFileSpec("main.c")
+ self.tmp_dir = tempfile.mkdtemp()
+ test_dir = os.path.join(self.tmp_dir, "test")
+ os.makedirs(test_dir)
+
+ self.aout = ""
+ # Copy the files used by the test:
+ for f in local_files:
+ shutil.copy(self.getBuildArtifact(f), test_dir)
+ # The first item is the binary to be used for the test
+ if self.aout == "":
+ self.aout = os.path.join(test_dir, f)
+
+ use_debuginfod = debuginfo != None or executable != None
+
+ # Populated the 'file://... mocked' Debuginfod server:
+ if use_debuginfod:
+ os.makedirs(os.path.join(self.tmp_dir, "cache"))
+ uuid_dir = os.path.join(self.tmp_dir, "buildid", uuid)
+ os.makedirs(uuid_dir)
+ if debuginfo:
+ shutil.copy(
+ self.getBuildArtifact(debuginfo),
+ os.path.join(uuid_dir, "debuginfo"),
+ )
+ if executable:
+ shutil.copy(
+ self.getBuildArtifact(executable),
+ os.path.join(uuid_dir, "executable"),
+ )
+
+ # Configure LLDB for the test:
+ self.runCmd(
+ "settings set symbols.enable-external-lookup %s"
+ % str(use_debuginfod).lower()
+ )
+ self.runCmd("settings clear plugin.symbol-locator.debuginfod.server-urls")
+ if use_debuginfod:
+ self.runCmd(
+ "settings set plugin.symbol-locator.debuginfod.cache-path %s/cache"
+ % self.tmp_dir
+ )
+ self.runCmd(
+ "settings insert-before plugin.symbol-locator.debuginfod.server-urls 0 file://%s"
+ % self.tmp_dir
+ )
+
+ def getUUID(self, filename):
+ try:
+ spec = lldb.SBModuleSpec()
+ spec.SetFileSpec(lldb.SBFileSpec(self.getBuildArtifact(filename)))
+ module = lldb.SBModule(spec)
+ uuid = module.GetUUIDString().replace("-", "").lower()
+ # Don't want lldb's fake 32 bit CRC's for this one
+ return uuid if len(uuid) > 8 else None
+ except:
+ return None
diff --git a/lldb/test/API/debuginfod/Normal/main.c b/lldb/test/API/debuginfod/Normal/main.c
new file mode 100644
index 0000000..4c71846
--- /dev/null
+++ b/lldb/test/API/debuginfod/Normal/main.c
@@ -0,0 +1,7 @@
+// This is a dump little pair of test files
+
+int func(int argc, const char *argv[]) {
+ return (argc + 1) * (argv[argc][0] + 2);
+}
+
+int main(int argc, const char *argv[]) { return func(0, argv); }
diff --git a/lldb/test/API/debuginfod/SplitDWARF/Makefile b/lldb/test/API/debuginfod/SplitDWARF/Makefile
new file mode 100644
index 0000000..3ab9a96
--- /dev/null
+++ b/lldb/test/API/debuginfod/SplitDWARF/Makefile
@@ -0,0 +1,23 @@
+C_SOURCES := main.c
+
+# For split-dwarf Debuginfod tests, we need:
+
+# * A .DWP file (a.out.dwp)
+# Produced by Makefile.rules with MAKE_DWP set to YES
+
+# * The "full" binary (missing things that live in .dwo's) (a.out.unstripped)
+# Produced by Makefile.rules with SAVE_FULL_DEBUG_BINARY set to YES and
+# SPLIT_DEBUG_SYMBOLS set to YES
+
+# * The stripped binary (a.out)
+# Produced by Makefile.rules
+
+# * The 'only-keep-debug' binary (a.out.debug)
+# Produced below
+
+MAKE_DWP := YES
+SPLIT_DEBUG_SYMBOLS := YES
+SAVE_FULL_DEBUG_BINARY := YES
+GEN_GNU_BUILD_ID := YES
+
+include Makefile.rules
diff --git a/lldb/test/API/debuginfod/SplitDWARF/TestDebuginfodDWP.py b/lldb/test/API/debuginfod/SplitDWARF/TestDebuginfodDWP.py
new file mode 100644
index 0000000..437c83a
--- /dev/null
+++ b/lldb/test/API/debuginfod/SplitDWARF/TestDebuginfodDWP.py
@@ -0,0 +1,196 @@
+"""
+Test support for the DebugInfoD network symbol acquisition protocol.
+"""
+import os
+import shutil
+import tempfile
+
+import lldb
+from lldbsuite.test.decorators import *
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+
+
+"""
+Test support for the DebugInfoD network symbol acquisition protocol.
+This file is for split-dwarf (dwp) scenarios.
+
+1 - A split binary target with it's corresponding DWP file
+2 - A stripped, split binary target with an unstripped binary and a DWP file
+3 - A stripped, split binary target with an --only-keep-debug symbols file and a DWP file
+"""
+
+
+class DebugInfodDWPTests(TestBase):
+ # No need to try every flavor of debug inf.
+ NO_DEBUG_INFO_TESTCASE = True
+
+ @skipUnlessPlatform(["linux_freebsd_but_old_dwp_tools_on_build_bots_are_broken"])
+ def test_normal_stripped(self):
+ """
+ Validate behavior with a stripped binary, no symbols or symbol locator.
+ """
+ self.config_test(["a.out"])
+ self.try_breakpoint(False)
+
+ @skipUnlessPlatform(["linux_freebsd_but_old_dwp_tools_on_build_bots_are_broken"])
+ def test_normal_stripped_split_with_dwp(self):
+ """
+ Validate behavior with symbols, but no symbol locator.
+ """
+ self.config_test(["a.out", "a.out.debug", "a.out.dwp"])
+ self.try_breakpoint(True)
+
+ @skipUnlessPlatform(["linux_freebsd_but_old_dwp_tools_on_build_bots_are_broken"])
+ def test_normal_stripped_only_dwp(self):
+ """
+ Validate behavior *with* dwp symbols only, but missing other symbols,
+ but no symbol locator. This shouldn't work: without the other symbols
+ DWO's appear mostly useless.
+ """
+ self.config_test(["a.out", "a.out.dwp"])
+ self.try_breakpoint(False)
+
+ @skipIfCurlSupportMissing
+ @skipUnlessPlatform(["linux_freebsd_but_old_dwp_tools_on_build_bots_are_broken"])
+ def test_debuginfod_dwp_from_service(self):
+ """
+ Test behavior with the unstripped binary, and DWP from the service.
+ """
+ self.config_test(["a.out.debug"], "a.out.dwp")
+ self.try_breakpoint(True)
+
+ @skipIfCurlSupportMissing
+ @skipUnlessPlatform(["linux_freebsd_but_old_dwp_tools_on_build_bots_are_broken"])
+ def test_debuginfod_both_symfiles_from_service(self):
+ """
+ Test behavior with a stripped binary, with the unstripped binary and
+ dwp symbols from Debuginfod.
+ """
+ self.config_test(["a.out"], "a.out.dwp", "a.out.unstripped")
+ self.try_breakpoint(True)
+
+ @skipIfCurlSupportMissing
+ @skipUnlessPlatform(["linux_freebsd_but_old_dwp_tools_on_build_bots_are_broken"])
+ def test_debuginfod_both_okd_symfiles_from_service(self):
+ """
+ Test behavior with both the only-keep-debug symbols and the dwp symbols
+ from Debuginfod.
+ """
+ self.config_test(["a.out"], "a.out.dwp", "a.out.debug")
+ self.try_breakpoint(True)
+
+ def try_breakpoint(self, should_have_loc):
+ """
+ This function creates a target from self.aout, sets a function-name
+ breakpoint, and checks to see if we have a file/line location,
+ as a way to validate that the symbols have been loaded.
+ should_have_loc specifies if we're testing that symbols have or
+ haven't been loaded.
+ """
+ target = self.dbg.CreateTarget(self.aout)
+ self.assertTrue(target and target.IsValid(), "Target is valid")
+
+ bp = target.BreakpointCreateByName("func")
+ self.assertTrue(bp and bp.IsValid(), "Breakpoint is valid")
+ self.assertEqual(bp.GetNumLocations(), 1)
+
+ loc = bp.GetLocationAtIndex(0)
+ self.assertTrue(loc and loc.IsValid(), "Location is valid")
+ addr = loc.GetAddress()
+ self.assertTrue(addr and addr.IsValid(), "Loc address is valid")
+ line_entry = addr.GetLineEntry()
+ self.assertEqual(
+ should_have_loc,
+ line_entry != None and line_entry.IsValid(),
+ "Loc line entry is valid",
+ )
+ if should_have_loc:
+ self.assertEqual(line_entry.GetLine(), 4)
+ self.assertEqual(
+ line_entry.GetFileSpec().GetFilename(),
+ self.main_source_file.GetFilename(),
+ )
+ self.dbg.DeleteTarget(target)
+ shutil.rmtree(self.tmp_dir)
+
+ def config_test(self, local_files, debuginfo=None, executable=None):
+ """
+ Set up a test with local_files[] copied to a different location
+ so that we control which files are, or are not, found in the file system.
+ Also, create a stand-alone file-system 'hosted' debuginfod server with the
+ provided debuginfo and executable files (if they exist)
+
+ Make the filesystem look like:
+
+ /tmp/<tmpdir>/test/[local_files]
+
+ /tmp/<tmpdir>/cache (for lldb to use as a temp cache)
+
+ /tmp/<tmpdir>/buildid/<uuid>/executable -> <executable>
+ /tmp/<tmpdir>/buildid/<uuid>/debuginfo -> <debuginfo>
+ Returns the /tmp/<tmpdir> path
+ """
+
+ self.build()
+
+ uuid = self.getUUID("a.out")
+ if not uuid:
+ self.fail("Could not get UUID for a.out")
+ return
+ self.main_source_file = lldb.SBFileSpec("main.c")
+ self.tmp_dir = tempfile.mkdtemp()
+ self.test_dir = os.path.join(self.tmp_dir, "test")
+ os.makedirs(self.test_dir)
+
+ self.aout = ""
+ # Copy the files used by the test:
+ for f in local_files:
+ shutil.copy(self.getBuildArtifact(f), self.test_dir)
+ if self.aout == "":
+ self.aout = os.path.join(self.test_dir, f)
+
+ use_debuginfod = debuginfo != None or executable != None
+
+ # Populated the 'file://... mocked' Debuginfod server:
+ if use_debuginfod:
+ os.makedirs(os.path.join(self.tmp_dir, "cache"))
+ uuid_dir = os.path.join(self.tmp_dir, "buildid", uuid)
+ os.makedirs(uuid_dir)
+ if debuginfo:
+ shutil.copy(
+ self.getBuildArtifact(debuginfo),
+ os.path.join(uuid_dir, "debuginfo"),
+ )
+ if executable:
+ shutil.copy(
+ self.getBuildArtifact(executable),
+ os.path.join(uuid_dir, "executable"),
+ )
+ os.remove(self.getBuildArtifact("main.dwo"))
+ # Configure LLDB for the test:
+ self.runCmd(
+ "settings set symbols.enable-external-lookup %s"
+ % str(use_debuginfod).lower()
+ )
+ self.runCmd("settings clear plugin.symbol-locator.debuginfod.server-urls")
+ if use_debuginfod:
+ self.runCmd(
+ "settings set plugin.symbol-locator.debuginfod.cache-path %s/cache"
+ % self.tmp_dir
+ )
+ self.runCmd(
+ "settings insert-before plugin.symbol-locator.debuginfod.server-urls 0 file://%s"
+ % self.tmp_dir
+ )
+
+ def getUUID(self, filename):
+ try:
+ spec = lldb.SBModuleSpec()
+ spec.SetFileSpec(lldb.SBFileSpec(self.getBuildArtifact(filename)))
+ module = lldb.SBModule(spec)
+ uuid = module.GetUUIDString().replace("-", "").lower()
+ # Don't want lldb's fake 32 bit CRC's for this one
+ return uuid if len(uuid) > 8 else None
+ except:
+ return None
diff --git a/lldb/test/API/debuginfod/SplitDWARF/main.c b/lldb/test/API/debuginfod/SplitDWARF/main.c
new file mode 100644
index 0000000..4c71846
--- /dev/null
+++ b/lldb/test/API/debuginfod/SplitDWARF/main.c
@@ -0,0 +1,7 @@
+// This is a dump little pair of test files
+
+int func(int argc, const char *argv[]) {
+ return (argc + 1) * (argv[argc][0] + 2);
+}
+
+int main(int argc, const char *argv[]) { return func(0, argv); }
diff --git a/lldb/test/API/functionalities/single-thread-step/Makefile b/lldb/test/API/functionalities/single-thread-step/Makefile
new file mode 100644
index 0000000..de4ec12
--- /dev/null
+++ b/lldb/test/API/functionalities/single-thread-step/Makefile
@@ -0,0 +1,4 @@
+ENABLE_THREADS := YES
+CXX_SOURCES := main.cpp
+
+include Makefile.rules
diff --git a/lldb/test/API/functionalities/single-thread-step/TestSingleThreadStepTimeout.py b/lldb/test/API/functionalities/single-thread-step/TestSingleThreadStepTimeout.py
new file mode 100644
index 0000000..214a2fb
--- /dev/null
+++ b/lldb/test/API/functionalities/single-thread-step/TestSingleThreadStepTimeout.py
@@ -0,0 +1,254 @@
+"""
+Test that single thread step over deadlock issue can be resolved
+after timeout.
+"""
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class SingleThreadStepTimeoutTestCase(TestBase):
+ NO_DEBUG_INFO_TESTCASE = True
+
+ def setUp(self):
+ TestBase.setUp(self)
+ self.main_source = "main.cpp"
+ self.build()
+
+ def verify_hit_correct_line(self, pattern):
+ target_line = line_number(self.main_source, pattern)
+ self.assertNotEqual(target_line, 0, "Could not find source pattern " + pattern)
+ cur_line = self.thread.frames[0].GetLineEntry().GetLine()
+ self.assertEqual(
+ cur_line,
+ target_line,
+ "Stepped to line %d instead of expected %d with pattern '%s'."
+ % (cur_line, target_line, pattern),
+ )
+
+ def step_over_deadlock_helper(self):
+ (target, _, self.thread, _) = lldbutil.run_to_source_breakpoint(
+ self, "// Set breakpoint1 here", lldb.SBFileSpec(self.main_source)
+ )
+
+ signal_main_thread_value = target.FindFirstGlobalVariable("signal_main_thread")
+ self.assertTrue(signal_main_thread_value.IsValid())
+
+ # Change signal_main_thread global variable to 1 so that worker thread loop can
+ # terminate and move forward to signal main thread
+ signal_main_thread_value.SetValueFromCString("1")
+
+ self.thread.StepOver(lldb.eOnlyThisThread)
+ self.verify_hit_correct_line("// Finish step-over from breakpoint1")
+
+ @skipIfWindows
+ def test_step_over_deadlock_small_timeout_fast_stepping(self):
+ """Test single thread step over deadlock on other threads can be resolved after timeout with small timeout and fast stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 10"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping true")
+ self.step_over_deadlock_helper()
+
+ @skipIfWindows
+ def test_step_over_deadlock_small_timeout_slow_stepping(self):
+ """Test single thread step over deadlock on other threads can be resolved after timeout with small timeout and slow stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 10"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping false")
+ self.step_over_deadlock_helper()
+
+ @skipIfWindows
+ def test_step_over_deadlock_large_timeout_fast_stepping(self):
+ """Test single thread step over deadlock on other threads can be resolved after timeout with large timeout and fast stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping true")
+ self.step_over_deadlock_helper()
+
+ @skipIfWindows
+ def test_step_over_deadlock_large_timeout_slow_stepping(self):
+ """Test single thread step over deadlock on other threads can be resolved after timeout with large timeout and slow stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping false")
+ self.step_over_deadlock_helper()
+
+ def step_over_multi_calls_helper(self):
+ (target, _, self.thread, _) = lldbutil.run_to_source_breakpoint(
+ self, "// Set breakpoint2 here", lldb.SBFileSpec(self.main_source)
+ )
+ self.thread.StepOver(lldb.eOnlyThisThread)
+ self.verify_hit_correct_line("// Finish step-over from breakpoint2")
+
+ @skipIfWindows
+ def test_step_over_multi_calls_small_timeout_fast_stepping(self):
+ """Test step over source line with multiple call instructions works fine with small timeout and fast stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 10"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping true")
+ self.step_over_multi_calls_helper()
+
+ @skipIfWindows
+ def test_step_over_multi_calls_small_timeout_slow_stepping(self):
+ """Test step over source line with multiple call instructions works fine with small timeout and slow stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 10"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping false")
+ self.step_over_multi_calls_helper()
+
+ @skipIfWindows
+ def test_step_over_multi_calls_large_timeout_fast_stepping(self):
+ """Test step over source line with multiple call instructions works fine with large timeout and fast stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping true")
+ self.step_over_multi_calls_helper()
+
+ @skipIfWindows
+ def test_step_over_multi_calls_large_timeout_slow_stepping(self):
+ """Test step over source line with multiple call instructions works fine with large timeout and slow stepping."""
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000"
+ )
+ self.dbg.HandleCommand("settings set target.use-fast-stepping false")
+ self.step_over_multi_calls_helper()
+
+ @skipIfWindows
+ def test_step_over_deadlock_with_inner_breakpoint_continue(self):
+ """Test step over deadlock function with inner breakpoint will trigger the breakpoint
+ and later continue will finish the stepping.
+ """
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000"
+ )
+ (target, process, self.thread, _) = lldbutil.run_to_source_breakpoint(
+ self, "// Set breakpoint1 here", lldb.SBFileSpec(self.main_source)
+ )
+
+ signal_main_thread_value = target.FindFirstGlobalVariable("signal_main_thread")
+ self.assertTrue(signal_main_thread_value.IsValid())
+
+ # Change signal_main_thread global variable to 1 so that worker thread loop can
+ # terminate and move forward to signal main thread
+ signal_main_thread_value.SetValueFromCString("1")
+
+ # Set breakpoint on inner function call
+ inner_breakpoint = target.BreakpointCreateByLocation(
+ lldb.SBFileSpec(self.main_source),
+ line_number("main.cpp", "// Set interrupt breakpoint here"),
+ 0,
+ 0,
+ lldb.SBFileSpecList(),
+ False,
+ )
+
+ # Step over will hit the inner breakpoint and stop
+ self.thread.StepOver(lldb.eOnlyThisThread)
+ self.assertStopReason(self.thread.GetStopReason(), lldb.eStopReasonBreakpoint)
+ thread1 = lldbutil.get_one_thread_stopped_at_breakpoint(
+ process, inner_breakpoint
+ )
+ self.assertTrue(
+ thread1.IsValid(),
+ "We are indeed stopped at inner breakpoint inside deadlock_func",
+ )
+
+ # Continue the process should complete the step-over
+ process.Continue()
+ self.assertState(process.GetState(), lldb.eStateStopped)
+ self.assertStopReason(self.thread.GetStopReason(), lldb.eStopReasonPlanComplete)
+
+ self.verify_hit_correct_line("// Finish step-over from breakpoint1")
+
+ @skipIfWindows
+ def test_step_over_deadlock_with_inner_breakpoint_step(self):
+ """Test step over deadlock function with inner breakpoint will trigger the breakpoint
+ and later step still works
+ """
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000"
+ )
+ (target, process, self.thread, _) = lldbutil.run_to_source_breakpoint(
+ self, "// Set breakpoint1 here", lldb.SBFileSpec(self.main_source)
+ )
+
+ signal_main_thread_value = target.FindFirstGlobalVariable("signal_main_thread")
+ self.assertTrue(signal_main_thread_value.IsValid())
+
+ # Change signal_main_thread global variable to 1 so that worker thread loop can
+ # terminate and move forward to signal main thread
+ signal_main_thread_value.SetValueFromCString("1")
+
+ # Set breakpoint on inner function call
+ inner_breakpoint = target.BreakpointCreateByLocation(
+ lldb.SBFileSpec(self.main_source),
+ line_number("main.cpp", "// Set interrupt breakpoint here"),
+ 0,
+ 0,
+ lldb.SBFileSpecList(),
+ False,
+ )
+
+ # Step over will hit the inner breakpoint and stop
+ self.thread.StepOver(lldb.eOnlyThisThread)
+ self.assertStopReason(self.thread.GetStopReason(), lldb.eStopReasonBreakpoint)
+ thread1 = lldbutil.get_one_thread_stopped_at_breakpoint(
+ process, inner_breakpoint
+ )
+ self.assertTrue(
+ thread1.IsValid(),
+ "We are indeed stopped at inner breakpoint inside deadlock_func",
+ )
+
+ # Step still works
+ self.thread.StepOver(lldb.eOnlyThisThread)
+ self.assertState(process.GetState(), lldb.eStateStopped)
+ self.assertStopReason(self.thread.GetStopReason(), lldb.eStopReasonPlanComplete)
+
+ self.verify_hit_correct_line("// Finish step-over from inner breakpoint")
+
+ @skipIfWindows
+ def test_step_over_deadlock_with_user_async_interrupt(self):
+ """Test step over deadlock function with large timeout then send async interrupt
+ should report correct stop reason
+ """
+
+ self.dbg.HandleCommand(
+ "settings set target.process.thread.single-thread-plan-timeout 2000000"
+ )
+
+ (target, process, self.thread, _) = lldbutil.run_to_source_breakpoint(
+ self, "// Set breakpoint1 here", lldb.SBFileSpec(self.main_source)
+ )
+
+ signal_main_thread_value = target.FindFirstGlobalVariable("signal_main_thread")
+ self.assertTrue(signal_main_thread_value.IsValid())
+
+ # Change signal_main_thread global variable to 1 so that worker thread loop can
+ # terminate and move forward to signal main thread
+ signal_main_thread_value.SetValueFromCString("1")
+
+ self.dbg.SetAsync(True)
+
+ # This stepping should block due to large timeout and should be interrupted by the
+ # async interrupt from the worker thread
+ self.thread.StepOver(lldb.eOnlyThisThread)
+ time.sleep(1)
+
+ listener = self.dbg.GetListener()
+ lldbutil.expect_state_changes(self, listener, process, [lldb.eStateRunning])
+ self.dbg.SetAsync(False)
+
+ process.SendAsyncInterrupt()
+
+ lldbutil.expect_state_changes(self, listener, process, [lldb.eStateStopped])
+ self.assertStopReason(self.thread.GetStopReason(), lldb.eStopReasonSignal)
diff --git a/lldb/test/API/functionalities/single-thread-step/main.cpp b/lldb/test/API/functionalities/single-thread-step/main.cpp
new file mode 100644
index 0000000..fe4fb11
--- /dev/null
+++ b/lldb/test/API/functionalities/single-thread-step/main.cpp
@@ -0,0 +1,68 @@
+#include <condition_variable>
+#include <iostream>
+#include <mutex>
+#include <thread>
+
+std::mutex mtx;
+std::condition_variable cv;
+int ready_thread_id = 0;
+int signal_main_thread = 0;
+
+void worker(int id) {
+ std::cout << "Worker " << id << " executing..." << std::endl;
+
+ // lldb test should change signal_main_thread to true to break the loop.
+ while (!signal_main_thread) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
+ }
+
+ // Signal the main thread to continue main thread
+ {
+ std::lock_guard<std::mutex> lock(mtx);
+ ready_thread_id = id; // break worker thread here
+ }
+ cv.notify_one();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+ std::cout << "Worker " << id << " finished." << std::endl;
+}
+
+void deadlock_func(std::unique_lock<std::mutex> &lock) {
+ int i = 10;
+ ++i; // Set interrupt breakpoint here
+ printf("%d", i); // Finish step-over from inner breakpoint
+ auto func = [] { return ready_thread_id == 1; };
+ cv.wait(lock, func);
+}
+
+int simulate_thread() {
+ std::thread t1(worker, 1);
+
+ std::unique_lock<std::mutex> lock(mtx);
+ deadlock_func(lock); // Set breakpoint1 here
+
+ std::thread t2(worker, 2); // Finish step-over from breakpoint1
+
+ cv.wait(lock, [] { return ready_thread_id == 2; });
+
+ t1.join();
+ t2.join();
+
+ std::cout << "Main thread continues..." << std::endl;
+
+ return 0;
+}
+
+int bar() { return 54; }
+
+int foo(const std::string p1, int extra) { return p1.size() + extra; }
+
+int main(int argc, char *argv[]) {
+ std::string ss = "this is a string for testing",
+ ls = "this is a long string for testing";
+ foo(ss.size() % 2 == 0 ? ss : ls, bar()); // Set breakpoint2 here
+
+ simulate_thread(); // Finish step-over from breakpoint2
+
+ return 0;
+}
diff --git a/lldb/test/API/python_api/sbstructureddata/TestStructuredDataAPI.py b/lldb/test/API/python_api/sbstructureddata/TestStructuredDataAPI.py
index b3db3bc..21256d6 100644
--- a/lldb/test/API/python_api/sbstructureddata/TestStructuredDataAPI.py
+++ b/lldb/test/API/python_api/sbstructureddata/TestStructuredDataAPI.py
@@ -110,6 +110,37 @@ class TestStructuredDataAPI(TestBase):
self.assertTrue(my_random_class)
self.assertEqual(my_random_class.payload, MyRandomClass.payload)
+ example = lldb.SBStructuredData()
+ self.assertSuccess(example.SetFromJSON("1"))
+ self.assertEqual(example.GetType(), lldb.eStructuredDataTypeInteger)
+ self.assertEqual(example.GetIntegerValue(), 1)
+
+ self.assertSuccess(example.SetFromJSON("4.19"))
+ self.assertEqual(example.GetType(), lldb.eStructuredDataTypeFloat)
+ self.assertEqual(example.GetFloatValue(), 4.19)
+
+ self.assertSuccess(example.SetFromJSON('"Bonjour, 123!"'))
+ self.assertEqual(example.GetType(), lldb.eStructuredDataTypeString)
+ self.assertEqual(example.GetStringValue(42), "Bonjour, 123!")
+
+ self.assertSuccess(example.SetFromJSON("true"))
+ self.assertEqual(example.GetType(), lldb.eStructuredDataTypeBoolean)
+ self.assertTrue(example.GetBooleanValue())
+
+ self.assertSuccess(example.SetFromJSON("null"))
+ self.assertEqual(example.GetType(), lldb.eStructuredDataTypeNull)
+
+ example_arr = [1, 2.3, "4", {"5": False}]
+ arr_str = json.dumps(example_arr)
+ s.Clear()
+ s.Print(arr_str)
+ self.assertSuccess(example.SetFromJSON(s))
+
+ s.Clear()
+ self.assertSuccess(example.GetAsJSON(s))
+ sb_data = json.loads(s.GetData())
+ self.assertEqual(sb_data, example_arr)
+
def invalid_struct_test(self, example):
invalid_struct = lldb.SBStructuredData()
invalid_struct = example.GetValueForKey("invalid_key")
diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp
index 544e9ff..a8b85f5 100644
--- a/lldb/tools/lldb-dap/JSONUtils.cpp
+++ b/lldb/tools/lldb-dap/JSONUtils.cpp
@@ -923,6 +923,9 @@ llvm::json::Value CreateThreadStopped(lldb::SBThread &thread,
case lldb::eStopReasonVForkDone:
body.try_emplace("reason", "vforkdone");
break;
+ case lldb::eStopReasonInterrupt:
+ body.try_emplace("reason", "async interrupt");
+ break;
case lldb::eStopReasonThreadExiting:
case lldb::eStopReasonInvalid:
case lldb::eStopReasonNone:
diff --git a/lldb/tools/lldb-dap/LLDBUtils.cpp b/lldb/tools/lldb-dap/LLDBUtils.cpp
index a91cc67..2da1078 100644
--- a/lldb/tools/lldb-dap/LLDBUtils.cpp
+++ b/lldb/tools/lldb-dap/LLDBUtils.cpp
@@ -110,6 +110,7 @@ bool ThreadHasStopReason(lldb::SBThread &thread) {
case lldb::eStopReasonFork:
case lldb::eStopReasonVFork:
case lldb::eStopReasonVForkDone:
+ case lldb::eStopReasonInterrupt:
return true;
case lldb::eStopReasonThreadExiting:
case lldb::eStopReasonInvalid:
diff --git a/lldb/unittests/Core/ProgressReportTest.cpp b/lldb/unittests/Core/ProgressReportTest.cpp
index 141244f..0149b1d 100644
--- a/lldb/unittests/Core/ProgressReportTest.cpp
+++ b/lldb/unittests/Core/ProgressReportTest.cpp
@@ -133,6 +133,81 @@ TEST_F(ProgressReportTest, TestReportCreation) {
EXPECT_EQ(data->GetMessage(), "Progress report 1: Starting report 1");
}
+TEST_F(ProgressReportTest, TestReportDestructionWithPartialProgress) {
+ ListenerSP listener_sp = CreateListenerFor(lldb::eBroadcastBitProgress);
+ EventSP event_sp;
+ const ProgressEventData *data;
+
+ // Create a finite progress report and only increment to a non-completed
+ // state before destruction.
+ {
+ Progress progress("Finite progress", "Report 1", 100);
+ progress.Increment(3);
+ }
+
+ // Verify that the progress in the events are:
+ // 1. At construction: 0 out of 100
+ // 2. At increment: 3 out of 100
+ // 3. At destruction: 100 out of 100
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ EXPECT_EQ(data->GetDetails(), "Report 1");
+ EXPECT_TRUE(data->IsFinite());
+ EXPECT_EQ(data->GetCompleted(), (uint64_t)0);
+ EXPECT_EQ(data->GetTotal(), (uint64_t)100);
+ EXPECT_EQ(data->GetMessage(), "Finite progress: Report 1");
+
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ EXPECT_EQ(data->GetDetails(), "Report 1");
+ EXPECT_TRUE(data->IsFinite());
+ EXPECT_EQ(data->GetCompleted(), (uint64_t)3);
+ EXPECT_EQ(data->GetTotal(), (uint64_t)100);
+ EXPECT_EQ(data->GetMessage(), "Finite progress: Report 1");
+
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ EXPECT_EQ(data->GetDetails(), "Report 1");
+ EXPECT_TRUE(data->IsFinite());
+ EXPECT_EQ(data->GetCompleted(), (uint64_t)100);
+ EXPECT_EQ(data->GetTotal(), (uint64_t)100);
+ EXPECT_EQ(data->GetMessage(), "Finite progress: Report 1");
+
+ // Create an infinite progress report and increment by some amount.
+ {
+ Progress progress("Infinite progress", "Report 2");
+ progress.Increment(3);
+ }
+
+ // Verify that the progress in the events are:
+ // 1. At construction: 0
+ // 2. At increment: 3
+ // 3. At destruction: Progress::kNonDeterministicTotal
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ EXPECT_EQ(data->GetDetails(), "Report 2");
+ EXPECT_FALSE(data->IsFinite());
+ EXPECT_EQ(data->GetCompleted(), (uint64_t)0);
+ EXPECT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ EXPECT_EQ(data->GetMessage(), "Infinite progress: Report 2");
+
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ EXPECT_EQ(data->GetDetails(), "Report 2");
+ EXPECT_FALSE(data->IsFinite());
+ EXPECT_EQ(data->GetCompleted(), (uint64_t)3);
+ EXPECT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ EXPECT_EQ(data->GetMessage(), "Infinite progress: Report 2");
+
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ EXPECT_EQ(data->GetDetails(), "Report 2");
+ EXPECT_FALSE(data->IsFinite());
+ EXPECT_EQ(data->GetCompleted(), Progress::kNonDeterministicTotal);
+ EXPECT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ EXPECT_EQ(data->GetMessage(), "Infinite progress: Report 2");
+}
+
TEST_F(ProgressReportTest, TestProgressManager) {
ListenerSP listener_sp =
CreateListenerFor(lldb::eBroadcastBitProgressCategory);
diff --git a/lldb/unittests/Symbol/TestClangASTImporter.cpp b/lldb/unittests/Symbol/TestClangASTImporter.cpp
index de59efe..41c7ed7 100644
--- a/lldb/unittests/Symbol/TestClangASTImporter.cpp
+++ b/lldb/unittests/Symbol/TestClangASTImporter.cpp
@@ -188,7 +188,7 @@ TEST_F(TestClangASTImporter, MetadataPropagation) {
ASSERT_NE(nullptr, imported);
// Check that we got the same Metadata.
- ASSERT_NE(nullptr, importer.GetDeclMetadata(imported));
+ ASSERT_NE(std::nullopt, importer.GetDeclMetadata(imported));
EXPECT_EQ(metadata, importer.GetDeclMetadata(imported)->GetUserID());
}
@@ -219,7 +219,7 @@ TEST_F(TestClangASTImporter, MetadataPropagationIndirectImport) {
ASSERT_NE(nullptr, imported);
// Check that we got the same Metadata.
- ASSERT_NE(nullptr, importer.GetDeclMetadata(imported));
+ ASSERT_NE(std::nullopt, importer.GetDeclMetadata(imported));
EXPECT_EQ(metadata, importer.GetDeclMetadata(imported)->GetUserID());
}
@@ -244,7 +244,7 @@ TEST_F(TestClangASTImporter, MetadataPropagationAfterCopying) {
source.ast->SetMetadataAsUserID(source.record_decl, metadata);
// Check that we got the same Metadata.
- ASSERT_NE(nullptr, importer.GetDeclMetadata(imported));
+ ASSERT_NE(std::nullopt, importer.GetDeclMetadata(imported));
EXPECT_EQ(metadata, importer.GetDeclMetadata(imported)->GetUserID());
}
diff --git a/lldb/unittests/Symbol/TestTypeSystemClang.cpp b/lldb/unittests/Symbol/TestTypeSystemClang.cpp
index 30d20b9..2c2dae0 100644
--- a/lldb/unittests/Symbol/TestTypeSystemClang.cpp
+++ b/lldb/unittests/Symbol/TestTypeSystemClang.cpp
@@ -296,7 +296,7 @@ TEST_F(TestTypeSystemClang, TestOwningModule) {
CompilerType record_type = ast.CreateRecordType(
nullptr, OptionalClangModuleID(200), lldb::eAccessPublic, "FooRecord",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
auto *rd = TypeSystemClang::GetAsRecordDecl(record_type);
EXPECT_FALSE(!rd);
EXPECT_EQ(rd->getOwningModuleID(), 200u);
@@ -317,7 +317,7 @@ TEST_F(TestTypeSystemClang, TestIsClangType) {
CompilerType record_type = m_ast->CreateRecordType(
nullptr, OptionalClangModuleID(100), lldb::eAccessPublic, "FooRecord",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
// Clang builtin type and record type should pass
EXPECT_TRUE(ClangUtil::IsClangType(bool_type));
EXPECT_TRUE(ClangUtil::IsClangType(record_type));
@@ -330,7 +330,7 @@ TEST_F(TestTypeSystemClang, TestRemoveFastQualifiers) {
CompilerType record_type = m_ast->CreateRecordType(
nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "FooRecord",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
QualType qt;
qt = ClangUtil::GetQualType(record_type);
@@ -403,7 +403,7 @@ TEST_F(TestTypeSystemClang, TestRecordHasFields) {
CompilerType empty_base = m_ast->CreateRecordType(
nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "EmptyBase",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
TypeSystemClang::StartTagDeclarationDefinition(empty_base);
TypeSystemClang::CompleteTagDeclarationDefinition(empty_base);
@@ -415,7 +415,7 @@ TEST_F(TestTypeSystemClang, TestRecordHasFields) {
CompilerType non_empty_base = m_ast->CreateRecordType(
nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "NonEmptyBase",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
TypeSystemClang::StartTagDeclarationDefinition(non_empty_base);
FieldDecl *non_empty_base_field_decl = m_ast->AddFieldToRecordType(
non_empty_base, "MyField", int_type, eAccessPublic, 0);
@@ -432,7 +432,7 @@ TEST_F(TestTypeSystemClang, TestRecordHasFields) {
CompilerType empty_derived = m_ast->CreateRecordType(
nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "EmptyDerived",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
TypeSystemClang::StartTagDeclarationDefinition(empty_derived);
std::unique_ptr<clang::CXXBaseSpecifier> non_empty_base_spec =
m_ast->CreateBaseClassSpecifier(non_empty_base.GetOpaqueQualType(),
@@ -455,7 +455,7 @@ TEST_F(TestTypeSystemClang, TestRecordHasFields) {
CompilerType empty_derived2 = m_ast->CreateRecordType(
nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "EmptyDerived2",
llvm::to_underlying(clang::TagTypeKind::Struct),
- lldb::eLanguageTypeC_plus_plus, nullptr);
+ lldb::eLanguageTypeC_plus_plus, std::nullopt);
TypeSystemClang::StartTagDeclarationDefinition(empty_derived2);
std::unique_ptr<CXXBaseSpecifier> non_empty_vbase_spec =
m_ast->CreateBaseClassSpecifier(non_empty_base.GetOpaqueQualType(),
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 699de1c..51f99cb 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -1197,7 +1197,7 @@ if( ${CMAKE_SYSTEM_NAME} MATCHES SunOS )
endif( ${CMAKE_SYSTEM_NAME} MATCHES SunOS )
# Make sure we don't get -rdynamic in every binary. For those that need it,
-# use export_executable_symbols(target).
+# use EXPORT_SYMBOLS argument.
set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
include(AddLLVM)
@@ -1238,7 +1238,7 @@ if( LLVM_INCLUDE_UTILS )
if( LLVM_INCLUDE_TESTS )
set(LLVM_SUBPROJECT_TITLE "Third-Party/Google Test")
add_subdirectory(${LLVM_THIRD_PARTY_DIR}/unittest ${CMAKE_CURRENT_BINARY_DIR}/third-party/unittest)
- set(LLVM_SUBPROJECT_TITLE)
+ set(LLVM_SUBPROJECT_TITLE)
endif()
else()
if ( LLVM_INCLUDE_TESTS )
diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake
index bb4e996..257dc22 100644
--- a/llvm/cmake/modules/AddLLVM.cmake
+++ b/llvm/cmake/modules/AddLLVM.cmake
@@ -1010,7 +1010,7 @@ endmacro()
macro(add_llvm_executable name)
cmake_parse_arguments(ARG
- "DISABLE_LLVM_LINK_LLVM_DYLIB;IGNORE_EXTERNALIZE_DEBUGINFO;NO_INSTALL_RPATH;SUPPORT_PLUGINS"
+ "DISABLE_LLVM_LINK_LLVM_DYLIB;IGNORE_EXTERNALIZE_DEBUGINFO;NO_INSTALL_RPATH;SUPPORT_PLUGINS;EXPORT_SYMBOLS;EXPORT_SYMBOLS_FOR_PLUGINS"
"ENTITLEMENTS;BUNDLE_PATH"
""
${ARGN})
@@ -1070,7 +1070,8 @@ macro(add_llvm_executable name)
endif(LLVM_EXPORTED_SYMBOL_FILE)
if (DEFINED LLVM_ENABLE_EXPORTED_SYMBOLS_IN_EXECUTABLES AND
- NOT LLVM_ENABLE_EXPORTED_SYMBOLS_IN_EXECUTABLES)
+ NOT LLVM_ENABLE_EXPORTED_SYMBOLS_IN_EXECUTABLES AND
+ NOT ARG_EXPORT_SYMBOLS AND NOT ARG_EXPORT_SYMBOLS_FOR_PLUGINS)
if(LLVM_LINKER_SUPPORTS_NO_EXPORTED_SYMBOLS)
set_property(TARGET ${name} APPEND_STRING PROPERTY
LINK_FLAGS " -Wl,-no_exported_symbols")
@@ -1080,6 +1081,12 @@ macro(add_llvm_executable name)
endif()
endif()
+ if (ARG_EXPORT_SYMBOLS)
+ export_executable_symbols(${name})
+ elseif(ARG_EXPORT_SYMBOLS_FOR_PLUGINS)
+ export_executable_symbols_for_plugins(${name})
+ endif()
+
if (LLVM_LINK_LLVM_DYLIB AND NOT ARG_DISABLE_LLVM_LINK_LLVM_DYLIB)
set(USE_SHARED USE_SHARED)
endif()
@@ -1464,7 +1471,7 @@ macro(add_llvm_example name)
if( NOT LLVM_BUILD_EXAMPLES )
set(EXCLUDE_FROM_ALL ON)
endif()
- add_llvm_executable(${name} ${ARGN})
+ add_llvm_executable(${name} EXPORT_SYMBOLS ${ARGN})
if( LLVM_BUILD_EXAMPLES )
install(TARGETS ${name} RUNTIME DESTINATION "${LLVM_EXAMPLES_INSTALL_DIR}")
endif()
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index 5654961..b2839b4 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -251,6 +251,41 @@ Overview:
The '``@llvm.nvvm.barrier0()``' intrinsic emits a PTX ``bar.sync 0``
instruction, equivalent to the ``__syncthreads()`` call in CUDA.
+Membar/Fences
+-------------
+
+
+'``llvm.nvvm.fence.proxy.tensormap_generic.*``'
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+.. code-block:: llvm
+
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.cta()
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.cluster()
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.gpu()
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.sys()
+
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.cta(ptr %addr, i32 %size)
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.cluster(ptr %addr, i32 %size)
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.gpu(ptr %addr, i32 %size)
+ declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.sys(ptr %addr, i32 %size)
+
+Overview:
+"""""""""
+
+The ``@llvm.nvvm.fence.proxy.tensormap_generic.*`` is a uni-directional fence used to establish ordering between a prior memory access performed via the generic `proxy<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#proxies>_` and a subsequent memory access performed via the tensormap proxy. ``nvvm.fence.proxy.tensormap_generic.release`` can form a release sequence that synchronizes with an acquire sequence that contains the ``nvvm.fence.proxy.tensormap_generic.acquire`` proxy fence. The following table describes the mapping between LLVM Intrinsic and the PTX instruction:
+
+ ====================================================== =========================================================
+ NVVM Intrinsic PTX Instruction
+ ====================================================== =========================================================
+ ``@llvm.nvvm.fence.proxy.tensormap_generic.release.*`` ``fence.proxy.tensormap::generic.release.*``
+ ``@llvm.nvvm.fence.proxy.tensormap_generic.acquire.*`` ``fence.proxy.tensormap::generic.acquire.* [addr], size``
+ ====================================================== =========================================================
+
+The address operand ``addr`` and the operand ``size`` together specify the memory range ``[addr, addr+size)`` on which the ordering guarantees on the memory accesses across the proxies is to be provided. The only supported value for the ``size`` operand is ``128`` and must be an immediate. Generic Addressing is used unconditionally, and the address specified by the operand addr must fall within the ``.global`` state space. Otherwise, the behavior is undefined. For more information, see `PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/#parallel-synchronization-and-communication-instructions-membar>`_.
Other Intrinsics
----------------
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index a95cb53..1ed860d 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -107,6 +107,7 @@ Changes to the RISC-V Backend
the required alignment space with a sequence of `0x0` bytes (the requested
fill value) rather than NOPs.
* Added Syntacore SCR4 CPUs: ``-mcpu=syntacore-scr4-rv32/64``
+* ``-mcpu=sifive-p470`` was added.
Changes to the WebAssembly Backend
----------------------------------
diff --git a/llvm/examples/ExceptionDemo/CMakeLists.txt b/llvm/examples/ExceptionDemo/CMakeLists.txt
index 793cf29..6c125fe 100644
--- a/llvm/examples/ExceptionDemo/CMakeLists.txt
+++ b/llvm/examples/ExceptionDemo/CMakeLists.txt
@@ -16,6 +16,6 @@ endif()
add_llvm_example(ExceptionDemo
ExceptionDemo.cpp
- )
-export_executable_symbols(ExceptionDemo)
+ EXPORT_SYMBOLS
+ )
diff --git a/llvm/examples/HowToUseLLJIT/CMakeLists.txt b/llvm/examples/HowToUseLLJIT/CMakeLists.txt
index 3ca99e5..ca5e190 100644
--- a/llvm/examples/HowToUseLLJIT/CMakeLists.txt
+++ b/llvm/examples/HowToUseLLJIT/CMakeLists.txt
@@ -7,6 +7,6 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(HowToUseLLJIT
HowToUseLLJIT.cpp
- )
-export_executable_symbols(HowToUseLLJIT)
+ EXPORT_SYMBOLS
+ )
diff --git a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter1/CMakeLists.txt b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter1/CMakeLists.txt
index 72c9668..d4b7c3f 100644
--- a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter1/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter1/CMakeLists.txt
@@ -14,5 +14,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(BuildingAJIT-Ch1
toy.cpp
)
-
-export_executable_symbols(BuildingAJIT-Ch1)
diff --git a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter2/CMakeLists.txt b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter2/CMakeLists.txt
index ba6abd7..9fd9e88 100644
--- a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter2/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter2/CMakeLists.txt
@@ -14,5 +14,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(BuildingAJIT-Ch2
toy.cpp
)
-
-export_executable_symbols(BuildingAJIT-Ch2)
diff --git a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter3/CMakeLists.txt b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter3/CMakeLists.txt
index 51800a6..cf82552 100644
--- a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter3/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter3/CMakeLists.txt
@@ -15,5 +15,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(BuildingAJIT-Ch3
toy.cpp
)
-
-export_executable_symbols(BuildingAJIT-Ch3)
diff --git a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter4/CMakeLists.txt b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter4/CMakeLists.txt
index 7cd40a1..241192f 100644
--- a/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter4/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/BuildingAJIT/Chapter4/CMakeLists.txt
@@ -15,5 +15,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(BuildingAJIT-Ch4
toy.cpp
)
-
-export_executable_symbols(BuildingAJIT-Ch4)
diff --git a/llvm/examples/Kaleidoscope/CMakeLists.txt b/llvm/examples/Kaleidoscope/CMakeLists.txt
index 6ad3b61..3cc6733 100644
--- a/llvm/examples/Kaleidoscope/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/CMakeLists.txt
@@ -3,7 +3,7 @@ set_target_properties(Kaleidoscope PROPERTIES FOLDER "LLVM/Examples")
macro(add_kaleidoscope_chapter name)
add_dependencies(Kaleidoscope ${name})
- add_llvm_example(${name} ${ARGN})
+ add_llvm_example(${name} EXPORT_SYMBOLS ${ARGN})
endmacro(add_kaleidoscope_chapter name)
add_subdirectory(BuildingAJIT)
diff --git a/llvm/examples/Kaleidoscope/Chapter4/CMakeLists.txt b/llvm/examples/Kaleidoscope/Chapter4/CMakeLists.txt
index 5281941..dcb82ff 100644
--- a/llvm/examples/Kaleidoscope/Chapter4/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/Chapter4/CMakeLists.txt
@@ -15,5 +15,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(Kaleidoscope-Ch4
toy.cpp
)
-
-export_executable_symbols(Kaleidoscope-Ch4)
diff --git a/llvm/examples/Kaleidoscope/Chapter5/CMakeLists.txt b/llvm/examples/Kaleidoscope/Chapter5/CMakeLists.txt
index 35fb1b7..9639f44 100644
--- a/llvm/examples/Kaleidoscope/Chapter5/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/Chapter5/CMakeLists.txt
@@ -15,5 +15,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(Kaleidoscope-Ch5
toy.cpp
)
-
-export_executable_symbols(Kaleidoscope-Ch5)
diff --git a/llvm/examples/Kaleidoscope/Chapter6/CMakeLists.txt b/llvm/examples/Kaleidoscope/Chapter6/CMakeLists.txt
index 627d682..0baf3f4 100644
--- a/llvm/examples/Kaleidoscope/Chapter6/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/Chapter6/CMakeLists.txt
@@ -15,5 +15,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(Kaleidoscope-Ch6
toy.cpp
)
-
-export_executable_symbols(Kaleidoscope-Ch6)
diff --git a/llvm/examples/Kaleidoscope/Chapter7/CMakeLists.txt b/llvm/examples/Kaleidoscope/Chapter7/CMakeLists.txt
index f4d8bd9..2167b4c 100644
--- a/llvm/examples/Kaleidoscope/Chapter7/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/Chapter7/CMakeLists.txt
@@ -16,5 +16,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(Kaleidoscope-Ch7
toy.cpp
)
-
-export_executable_symbols(Kaleidoscope-Ch7)
diff --git a/llvm/examples/Kaleidoscope/Chapter8/CMakeLists.txt b/llvm/examples/Kaleidoscope/Chapter8/CMakeLists.txt
index 1bb1cd25..29c02c9 100644
--- a/llvm/examples/Kaleidoscope/Chapter8/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/Chapter8/CMakeLists.txt
@@ -5,5 +5,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(Kaleidoscope-Ch8
toy.cpp
)
-
-export_executable_symbols(Kaleidoscope-Ch8)
diff --git a/llvm/examples/Kaleidoscope/Chapter9/CMakeLists.txt b/llvm/examples/Kaleidoscope/Chapter9/CMakeLists.txt
index a5d1a45..d21ba34 100644
--- a/llvm/examples/Kaleidoscope/Chapter9/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/Chapter9/CMakeLists.txt
@@ -11,5 +11,3 @@ set(LLVM_LINK_COMPONENTS
add_kaleidoscope_chapter(Kaleidoscope-Ch9
toy.cpp
)
-
-export_executable_symbols(Kaleidoscope-Ch9)
diff --git a/llvm/examples/OrcV2Examples/LLJITDumpObjects/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITDumpObjects/CMakeLists.txt
index 42b9d1c..3d83ee6 100644
--- a/llvm/examples/OrcV2Examples/LLJITDumpObjects/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITDumpObjects/CMakeLists.txt
@@ -11,5 +11,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITDumpObjects
LLJITDumpObjects.cpp
)
-
-export_executable_symbols(LLJITDumpObjects)
diff --git a/llvm/examples/OrcV2Examples/LLJITRemovableCode/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITRemovableCode/CMakeLists.txt
index 2c0036f..3ab58c1 100644
--- a/llvm/examples/OrcV2Examples/LLJITRemovableCode/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITRemovableCode/CMakeLists.txt
@@ -12,5 +12,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITRemovableCode
LLJITRemovableCode.cpp
)
-
-export_executable_symbols(LLJITRemovableCode)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithCustomObjectLinkingLayer/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithCustomObjectLinkingLayer/CMakeLists.txt
index 85e11ec..6034fc6 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithCustomObjectLinkingLayer/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithCustomObjectLinkingLayer/CMakeLists.txt
@@ -10,5 +10,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithCustomObjectLinkingLayer
LLJITWithCustomObjectLinkingLayer.cpp
)
-
-export_executable_symbols(LLJITWithCustomObjectLinkingLayer)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithExecutorProcessControl/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithExecutorProcessControl/CMakeLists.txt
index 65a5b1b..47f50ca 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithExecutorProcessControl/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithExecutorProcessControl/CMakeLists.txt
@@ -10,5 +10,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithExecutorProcessControl
LLJITWithExecutorProcessControl.cpp
)
-
-export_executable_symbols(LLJITWithExecutorProcessControl)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithGDBRegistrationListener/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithGDBRegistrationListener/CMakeLists.txt
index 12fe634..6186420 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithGDBRegistrationListener/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithGDBRegistrationListener/CMakeLists.txt
@@ -12,7 +12,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithGDBRegistrationListener
LLJITWithGDBRegistrationListener.cpp
)
-
-# We want JIT'd code to be able to link against process symbols like printf
-# for this example, so make sure they're exported.
-export_executable_symbols(LLJITWithGDBRegistrationListener)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithInitializers/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithInitializers/CMakeLists.txt
index ed466f4..30264756 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithInitializers/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithInitializers/CMakeLists.txt
@@ -11,5 +11,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithInitializers
LLJITWithInitializers.cpp
)
-
-export_executable_symbols(LLJITWithInitializers)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithLazyReexports/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithLazyReexports/CMakeLists.txt
index 2ed22e1..cdff74b 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithLazyReexports/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithLazyReexports/CMakeLists.txt
@@ -10,5 +10,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithLazyReexports
LLJITWithLazyReexports.cpp
)
-
-export_executable_symbols(LLJITWithLazyReexports)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithObjectCache/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithObjectCache/CMakeLists.txt
index a4eaaad..c5f8fd6 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithObjectCache/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithObjectCache/CMakeLists.txt
@@ -10,5 +10,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithObjectCache
LLJITWithObjectCache.cpp
)
-
-export_executable_symbols(LLJITWithObjectCache)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithObjectLinkingLayerPlugin/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithObjectLinkingLayerPlugin/CMakeLists.txt
index 6177d45..5481462 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithObjectLinkingLayerPlugin/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithObjectLinkingLayerPlugin/CMakeLists.txt
@@ -10,5 +10,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithObjectLinkingLayerPlugin
LLJITWithObjectLinkingLayerPlugin.cpp
)
-
-export_executable_symbols(LLJITWithObjectLinkingLayerPlugin)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithOptimizingIRTransform/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithOptimizingIRTransform/CMakeLists.txt
index b40e309..a9cd91a 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithOptimizingIRTransform/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithOptimizingIRTransform/CMakeLists.txt
@@ -12,5 +12,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithOptimizingIRTransform
LLJITWithOptimizingIRTransform.cpp
)
-
-export_executable_symbols(LLJITWithOptimizingIRTransform)
diff --git a/llvm/examples/OrcV2Examples/LLJITWithRemoteDebugging/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithRemoteDebugging/CMakeLists.txt
index 51b3925..6052622 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithRemoteDebugging/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithRemoteDebugging/CMakeLists.txt
@@ -20,6 +20,4 @@ if (LLVM_INCLUDE_UTILS)
DEPENDS
llvm-jitlink-executor
)
-
- export_executable_symbols(LLJITWithRemoteDebugging)
endif()
diff --git a/llvm/examples/OrcV2Examples/LLJITWithThinLTOSummaries/CMakeLists.txt b/llvm/examples/OrcV2Examples/LLJITWithThinLTOSummaries/CMakeLists.txt
index 148b1aa..5ce2fb4 100644
--- a/llvm/examples/OrcV2Examples/LLJITWithThinLTOSummaries/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/LLJITWithThinLTOSummaries/CMakeLists.txt
@@ -12,5 +12,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(LLJITWithThinLTOSummaries
LLJITWithThinLTOSummaries.cpp
)
-
-export_executable_symbols(LLJITWithThinLTOSummaries)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsAddObjectFile/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsAddObjectFile/CMakeLists.txt
index 376ee1d..cc50112 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsAddObjectFile/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsAddObjectFile/CMakeLists.txt
@@ -13,5 +13,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsAddObjectFile
OrcV2CBindingsAddObjectFile.c
)
-
-export_executable_symbols(OrcV2CBindingsAddObjectFile)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsBasicUsage/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsBasicUsage/CMakeLists.txt
index 3e0ac39..0f18d6c 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsBasicUsage/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsBasicUsage/CMakeLists.txt
@@ -13,5 +13,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsBasicUsage
OrcV2CBindingsBasicUsage.c
)
-
-export_executable_symbols(OrcV2CBindingsBasicUsage)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsDumpObjects/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsDumpObjects/CMakeLists.txt
index ad927d1..8e2c97d 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsDumpObjects/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsDumpObjects/CMakeLists.txt
@@ -13,5 +13,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsDumpObjects
OrcV2CBindingsDumpObjects.c
)
-
-export_executable_symbols(OrcV2CBindingsDumpObjects)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsIRTransforms/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsIRTransforms/CMakeLists.txt
index c90534c..af1b43e 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsIRTransforms/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsIRTransforms/CMakeLists.txt
@@ -14,5 +14,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsIRTransforms
OrcV2CBindingsIRTransforms.c
)
-
-export_executable_symbols(OrcV2CBindingsIRTransforms)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsLazy/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsLazy/CMakeLists.txt
index 74238cb..52eb2d4 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsLazy/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsLazy/CMakeLists.txt
@@ -13,5 +13,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsLazy
OrcV2CBindingsLazy.c
)
-
-export_executable_symbols(OrcV2CBindingsLazy)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsRemovableCode/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsRemovableCode/CMakeLists.txt
index 65299ff..5b73755 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsRemovableCode/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsRemovableCode/CMakeLists.txt
@@ -13,5 +13,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsRemovableCode
OrcV2CBindingsRemovableCode.c
)
-
-export_executable_symbols(OrcV2CBindingsRemovableCode)
diff --git a/llvm/examples/OrcV2Examples/OrcV2CBindingsVeryLazy/CMakeLists.txt b/llvm/examples/OrcV2Examples/OrcV2CBindingsVeryLazy/CMakeLists.txt
index 5d3e730..0bc9610 100644
--- a/llvm/examples/OrcV2Examples/OrcV2CBindingsVeryLazy/CMakeLists.txt
+++ b/llvm/examples/OrcV2Examples/OrcV2CBindingsVeryLazy/CMakeLists.txt
@@ -13,5 +13,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_example(OrcV2CBindingsVeryLazy
OrcV2CBindingsVeryLazy.c
)
-
-export_executable_symbols(OrcV2CBindingsVeryLazy)
diff --git a/llvm/include/llvm/ADT/GraphTraits.h b/llvm/include/llvm/ADT/GraphTraits.h
index 3a77735..0764ecb 100644
--- a/llvm/include/llvm/ADT/GraphTraits.h
+++ b/llvm/include/llvm/ADT/GraphTraits.h
@@ -19,6 +19,7 @@
#ifndef LLVM_ADT_GRAPHTRAITS_H
#define LLVM_ADT_GRAPHTRAITS_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator_range.h"
namespace llvm {
@@ -71,6 +72,20 @@ struct GraphTraits {
// static unsigned size (GraphType *G)
// Return total number of nodes in the graph
+ // Optionally implement the following:
+ // static unsigned getNumber(NodeRef)
+ // Return a unique number of a node. Numbers are ideally dense, these are
+ // used to store nodes in a vector.
+ // static unsigned getMaxNumber(GraphType *G)
+ // Return the maximum number that getNumber() will return, or 0 if this is
+ // unknown. Intended for reserving large enough buffers.
+ // static unsigned getNumberEpoch(GraphType *G)
+ // Return the "epoch" of the node numbers. Should return a different
+ // number after renumbering, so users can assert that the epoch didn't
+ // change => numbers are still valid. If renumberings are not tracked, it
+ // is always valid to return a constant value. This is solely for to ease
+ // debugging by having a way to detect use of outdated numbers.
+
// If anyone tries to use this class without having an appropriate
// specialization, make an error. If you get this error, it's because you
// need to include the appropriate specialization of GraphTraits<> for your
@@ -80,6 +95,16 @@ struct GraphTraits {
using NodeRef = typename GraphType::UnknownGraphTypeError;
};
+namespace detail {
+template <typename T>
+using has_number_t = decltype(GraphTraits<T>::getNumber(std::declval<T>()));
+} // namespace detail
+
+/// Indicate whether a GraphTraits<NodeT>::getNumber() is supported.
+template <typename NodeT>
+constexpr bool GraphHasNodeNumbers =
+ is_detected<detail::has_number_t, NodeT>::value;
+
// Inverse - This class is used as a little marker class to tell the graph
// iterator to iterate over the graph in a graph defined "Inverse" ordering.
// Not all graphs define an inverse ordering, and if they do, it depends on
diff --git a/llvm/include/llvm/ADT/STLExtras.h b/llvm/include/llvm/ADT/STLExtras.h
index 8f988d0..3f5ea3a 100644
--- a/llvm/include/llvm/ADT/STLExtras.h
+++ b/llvm/include/llvm/ADT/STLExtras.h
@@ -2366,7 +2366,7 @@ public:
detail::index_iterator end() const { return {End}; }
};
-/// Given two or more input ranges, returns a new range whose values are are
+/// Given two or more input ranges, returns a new range whose values are
/// tuples (A, B, C, ...), such that A is the 0-based index of the item in the
/// sequence, and B, C, ..., are the values from the original input ranges. All
/// input ranges are required to have equal lengths. Note that the returned
diff --git a/llvm/include/llvm/Analysis/CtxProfAnalysis.h b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
new file mode 100644
index 0000000..d77c81d
--- /dev/null
+++ b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
@@ -0,0 +1,70 @@
+//===- CtxProfAnalysis.h - maintain contextual profile info -*- C++ ---*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+#ifndef LLVM_ANALYSIS_CTXPROFANALYSIS_H
+#define LLVM_ANALYSIS_CTXPROFANALYSIS_H
+
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/ProfileData/PGOCtxProfReader.h"
+#include <map>
+
+namespace llvm {
+
+class CtxProfAnalysis;
+
+/// The instrumented contextual profile, produced by the CtxProfAnalysis.
+class PGOContextualProfile {
+ std::optional<PGOCtxProfContext::CallTargetMapTy> Profiles;
+
+public:
+ explicit PGOContextualProfile(PGOCtxProfContext::CallTargetMapTy &&Profiles)
+ : Profiles(std::move(Profiles)) {}
+ PGOContextualProfile() = default;
+ PGOContextualProfile(const PGOContextualProfile &) = delete;
+ PGOContextualProfile(PGOContextualProfile &&) = default;
+
+ operator bool() const { return Profiles.has_value(); }
+
+ const PGOCtxProfContext::CallTargetMapTy &profiles() const {
+ return *Profiles;
+ }
+
+ bool invalidate(Module &, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &) {
+ // Check whether the analysis has been explicitly invalidated. Otherwise,
+ // it's stateless and remains preserved.
+ auto PAC = PA.getChecker<CtxProfAnalysis>();
+ return !PAC.preservedWhenStateless();
+ }
+};
+
+class CtxProfAnalysis : public AnalysisInfoMixin<CtxProfAnalysis> {
+ StringRef Profile;
+
+public:
+ static AnalysisKey Key;
+ explicit CtxProfAnalysis(StringRef Profile) : Profile(Profile) {};
+
+ using Result = PGOContextualProfile;
+
+ PGOContextualProfile run(Module &M, ModuleAnalysisManager &MAM);
+};
+
+class CtxProfAnalysisPrinterPass
+ : public PassInfoMixin<CtxProfAnalysisPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit CtxProfAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+ static bool isRequired() { return true; }
+};
+} // namespace llvm
+#endif // LLVM_ANALYSIS_CTXPROFANALYSIS_H
diff --git a/llvm/include/llvm/Analysis/RegionInfoImpl.h b/llvm/include/llvm/Analysis/RegionInfoImpl.h
index c5e8821..ebfb060 100644
--- a/llvm/include/llvm/Analysis/RegionInfoImpl.h
+++ b/llvm/include/llvm/Analysis/RegionInfoImpl.h
@@ -814,7 +814,8 @@ RegionInfoBase<Tr>::getMaxRegionExit(BlockT *BB) const {
// Get the single exit of BB.
if (R && R->getEntry() == BB)
Exit = R->getExit();
- else if (++BlockTraits::child_begin(BB) == BlockTraits::child_end(BB))
+ else if (std::next(BlockTraits::child_begin(BB)) ==
+ BlockTraits::child_end(BB))
Exit = *BlockTraits::child_begin(BB);
else // No single exit exists.
return Exit;
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/llvm/include/llvm/Analysis/TargetLibraryInfo.def
index 754f09c..23c910b 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.def
@@ -356,6 +356,32 @@ TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Ptr, Bool)
+/// The following are variants of operator new which return the actual size
+/// reserved by the allocator proposed in P0901R5 (Size feedback in operator new).
+/// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0901r5.html
+/// They are implemented by tcmalloc, see source at
+/// https://github.com/google/tcmalloc/blob/master/tcmalloc/malloc_extension.h
+
+/// __sized_ptr_t __size_returning_new(size_t size)
+TLI_DEFINE_ENUM_INTERNAL(size_returning_new)
+TLI_DEFINE_STRING_INTERNAL("__size_returning_new")
+TLI_DEFINE_SIG_INTERNAL(Struct, Long)
+
+/// __sized_ptr_t __size_returning_new_hot_cold(size_t, __hot_cold_t)
+TLI_DEFINE_ENUM_INTERNAL(size_returning_new_hot_cold)
+TLI_DEFINE_STRING_INTERNAL("__size_returning_new_hot_cold")
+TLI_DEFINE_SIG_INTERNAL(Struct, Long, Bool)
+
+/// __sized_ptr_t __size_returning_new_aligned(size_t, std::align_val_t)
+TLI_DEFINE_ENUM_INTERNAL(size_returning_new_aligned)
+TLI_DEFINE_STRING_INTERNAL("__size_returning_new_aligned")
+TLI_DEFINE_SIG_INTERNAL(Struct, Long, Long)
+
+/// __sized_ptr_t __size_returning_new_aligned(size_t, std::align_val_t, __hot_cold_t)
+TLI_DEFINE_ENUM_INTERNAL(size_returning_new_aligned_hot_cold)
+TLI_DEFINE_STRING_INTERNAL("__size_returning_new_aligned_hot_cold")
+TLI_DEFINE_SIG_INTERNAL(Struct, Long, Long, Bool)
+
/// double __acos_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(acos_finite)
TLI_DEFINE_STRING_INTERNAL("__acos_finite")
diff --git a/llvm/include/llvm/BinaryFormat/COFF.h b/llvm/include/llvm/BinaryFormat/COFF.h
index c864286..3fc543f 100644
--- a/llvm/include/llvm/BinaryFormat/COFF.h
+++ b/llvm/include/llvm/BinaryFormat/COFF.h
@@ -417,6 +417,21 @@ enum RelocationTypesARM64 : unsigned {
IMAGE_REL_ARM64_REL32 = 0x0011,
};
+enum DynamicRelocationType : unsigned {
+ IMAGE_DYNAMIC_RELOCATION_GUARD_RF_PROLOGUE = 1,
+ IMAGE_DYNAMIC_RELOCATION_GUARD_RF_EPILOGUE = 2,
+ IMAGE_DYNAMIC_RELOCATION_GUARD_IMPORT_CONTROL_TRANSFER = 3,
+ IMAGE_DYNAMIC_RELOCATION_GUARD_INDIR_CONTROL_TRANSFER = 4,
+ IMAGE_DYNAMIC_RELOCATION_GUARD_SWITCHTABLE_BRANCH = 5,
+ IMAGE_DYNAMIC_RELOCATION_ARM64X = 6,
+};
+
+enum Arm64XFixupType : uint8_t {
+ IMAGE_DVRT_ARM64X_FIXUP_TYPE_ZEROFILL = 0,
+ IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE = 1,
+ IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA = 2,
+};
+
enum COMDATType : uint8_t {
IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
IMAGE_COMDAT_SELECT_ANY,
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index fb39bb4..c21bb41 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -1799,8 +1799,10 @@ enum : unsigned {
AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR = 4,
AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR = 5,
AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI = 6,
+ AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC = 7,
+ AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT = 8,
AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST =
- AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI,
+ AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT,
};
// x86 processor feature bits.
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 17f99e7..93086d4 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -2151,45 +2151,17 @@ public:
return Cost;
}
case Intrinsic::sadd_sat:
- case Intrinsic::ssub_sat: {
- Type *CondTy = RetTy->getWithNewBitWidth(1);
-
- Type *OpTy = StructType::create({RetTy, CondTy});
- Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
- ? Intrinsic::sadd_with_overflow
- : Intrinsic::ssub_with_overflow;
- CmpInst::Predicate Pred = CmpInst::ICMP_SGT;
-
- // SatMax -> Overflow && SumDiff < 0
- // SatMin -> Overflow && SumDiff >= 0
- InstructionCost Cost = 0;
- IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
- nullptr, ScalarizationCostPassed);
- Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
- Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
- Pred, CostKind);
- Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
- CondTy, Pred, CostKind);
- return Cost;
- }
+ ISD = ISD::SADDSAT;
+ break;
+ case Intrinsic::ssub_sat:
+ ISD = ISD::SSUBSAT;
+ break;
case Intrinsic::uadd_sat:
- case Intrinsic::usub_sat: {
- Type *CondTy = RetTy->getWithNewBitWidth(1);
-
- Type *OpTy = StructType::create({RetTy, CondTy});
- Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
- ? Intrinsic::uadd_with_overflow
- : Intrinsic::usub_with_overflow;
-
- InstructionCost Cost = 0;
- IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
- nullptr, ScalarizationCostPassed);
- Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
- return Cost;
- }
+ ISD = ISD::UADDSAT;
+ break;
+ case Intrinsic::usub_sat:
+ ISD = ISD::USUBSAT;
+ break;
case Intrinsic::smul_fix:
case Intrinsic::umul_fix: {
unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
@@ -2355,14 +2327,17 @@ public:
return (LT.first * 2);
}
- // If we can't lower fmuladd into an FMA estimate the cost as a floating
- // point mul followed by an add.
- if (IID == Intrinsic::fmuladd)
+ switch (IID) {
+ case Intrinsic::fmuladd: {
+ // If we can't lower fmuladd into an FMA estimate the cost as a floating
+ // point mul followed by an add.
+
return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
CostKind) +
thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
CostKind);
- if (IID == Intrinsic::experimental_constrained_fmuladd) {
+ }
+ case Intrinsic::experimental_constrained_fmuladd: {
IntrinsicCostAttributes FMulAttrs(
Intrinsic::experimental_constrained_fmul, RetTy, Tys);
IntrinsicCostAttributes FAddAttrs(
@@ -2370,6 +2345,50 @@ public:
return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
}
+ case Intrinsic::sadd_sat:
+ case Intrinsic::ssub_sat: {
+ // Assume a default expansion.
+ Type *CondTy = RetTy->getWithNewBitWidth(1);
+
+ Type *OpTy = StructType::create({RetTy, CondTy});
+ Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
+ ? Intrinsic::sadd_with_overflow
+ : Intrinsic::ssub_with_overflow;
+ CmpInst::Predicate Pred = CmpInst::ICMP_SGT;
+
+ // SatMax -> Overflow && SumDiff < 0
+ // SatMin -> Overflow && SumDiff >= 0
+ InstructionCost Cost = 0;
+ IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
+ nullptr, ScalarizationCostPassed);
+ Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
+ Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
+ Pred, CostKind);
+ Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
+ CondTy, Pred, CostKind);
+ return Cost;
+ }
+ case Intrinsic::uadd_sat:
+ case Intrinsic::usub_sat: {
+ Type *CondTy = RetTy->getWithNewBitWidth(1);
+
+ Type *OpTy = StructType::create({RetTy, CondTy});
+ Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
+ ? Intrinsic::uadd_with_overflow
+ : Intrinsic::usub_with_overflow;
+
+ InstructionCost Cost = 0;
+ IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
+ nullptr, ScalarizationCostPassed);
+ Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ return Cost;
+ }
+ default:
+ break;
+ }
// Else, assume that we need to scalarize this intrinsic. For math builtins
// this will emit a costly libcall, adding call overhead and spills. Make it
diff --git a/llvm/include/llvm/CodeGen/ExpandVectorPredication.h b/llvm/include/llvm/CodeGen/ExpandVectorPredication.h
index b69adb9..c42c644 100644
--- a/llvm/include/llvm/CodeGen/ExpandVectorPredication.h
+++ b/llvm/include/llvm/CodeGen/ExpandVectorPredication.h
@@ -13,11 +13,14 @@
namespace llvm {
-class ExpandVectorPredicationPass
- : public PassInfoMixin<ExpandVectorPredicationPass> {
-public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
+class TargetTransformInfo;
+class VPIntrinsic;
+
+/// Expand a vector predication intrinsic. Returns true if the intrinsic was
+/// removed/replaced.
+bool expandVectorPredicationIntrinsic(VPIntrinsic &VPI,
+ const TargetTransformInfo &TTI);
+
} // end namespace llvm
#endif // LLVM_CODEGEN_EXPANDVECTORPREDICATION_H
diff --git a/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index 0e08b9b..c17cacb 100644
--- a/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -73,8 +73,8 @@ public:
/// allocated to hold a pointer to the hidden sret parameter.
Register DemoteRegister;
- /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
- DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
+ /// A mapping from LLVM basic block number to their machine block.
+ SmallVector<MachineBasicBlock *> MBBMap;
/// ValueMap - Since we emit code for the function a basic block at a time,
/// we must remember which virtual registers hold the values for
@@ -172,9 +172,9 @@ public:
/// for a value.
DenseMap<const Value *, ISD::NodeType> PreferredExtendType;
- /// VisitedBBs - The set of basic blocks visited thus far by instruction
- /// selection.
- SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
+ /// The set of basic blocks visited thus far by instruction selection. Indexed
+ /// by basic block number.
+ SmallVector<bool> VisitedBBs;
/// PHINodesToUpdate - A list of phi instructions whose operand list will
/// be updated after processing the current basic block.
@@ -213,7 +213,8 @@ public:
}
MachineBasicBlock *getMBB(const BasicBlock *BB) const {
- return MBBMap.lookup(BB);
+ assert(BB->getNumber() < MBBMap.size() && "uninitialized MBBMap?");
+ return MBBMap[BB->getNumber()];
}
Register CreateReg(MVT VT, bool isDivergent = false);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index deae2c5..2796ea4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -146,11 +146,6 @@ private:
/// virtual registers and offsets.
ValueToVRegInfo VMap;
- // N.b. it's not completely obvious that this will be sufficient for every
- // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
- // lives.
- DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
-
// One BasicBlock can be translated to multiple MachineBasicBlocks. For such
// BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
// a mapping between the edges arriving at the BasicBlock to the corresponding
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index b8153fd..797e29d 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -157,8 +157,8 @@ private:
Instructions Insts;
/// Keep track of the predecessor / successor basic blocks.
- std::vector<MachineBasicBlock *> Predecessors;
- std::vector<MachineBasicBlock *> Successors;
+ SmallVector<MachineBasicBlock *, 4> Predecessors;
+ SmallVector<MachineBasicBlock *, 2> Successors;
/// Keep track of the probabilities to the successors. This vector has the
/// same order as Successors, or it is empty if we don't use it (disable
@@ -387,18 +387,20 @@ public:
}
// Machine-CFG iterators
- using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
- using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
- using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
- using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+ using pred_iterator = SmallVectorImpl<MachineBasicBlock *>::iterator;
+ using const_pred_iterator =
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator;
+ using succ_iterator = SmallVectorImpl<MachineBasicBlock *>::iterator;
+ using const_succ_iterator =
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator;
using pred_reverse_iterator =
- std::vector<MachineBasicBlock *>::reverse_iterator;
+ SmallVectorImpl<MachineBasicBlock *>::reverse_iterator;
using const_pred_reverse_iterator =
- std::vector<MachineBasicBlock *>::const_reverse_iterator;
+ SmallVectorImpl<MachineBasicBlock *>::const_reverse_iterator;
using succ_reverse_iterator =
- std::vector<MachineBasicBlock *>::reverse_iterator;
+ SmallVectorImpl<MachineBasicBlock *>::reverse_iterator;
using const_succ_reverse_iterator =
- std::vector<MachineBasicBlock *>::const_reverse_iterator;
+ SmallVectorImpl<MachineBasicBlock *>::const_reverse_iterator;
pred_iterator pred_begin() { return Predecessors.begin(); }
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
pred_iterator pred_end() { return Predecessors.end(); }
@@ -1295,6 +1297,11 @@ template <> struct GraphTraits<MachineBasicBlock *> {
static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
+
+ static unsigned getNumber(MachineBasicBlock *BB) {
+ assert(BB->getNumber() >= 0 && "negative block number");
+ return BB->getNumber();
+ }
};
template <> struct GraphTraits<const MachineBasicBlock *> {
@@ -1304,6 +1311,11 @@ template <> struct GraphTraits<const MachineBasicBlock *> {
static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
+
+ static unsigned getNumber(const MachineBasicBlock *BB) {
+ assert(BB->getNumber() >= 0 && "negative block number");
+ return BB->getNumber();
+ }
};
// Provide specializations of GraphTraits to be able to treat a
@@ -1322,6 +1334,11 @@ template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
+
+ static unsigned getNumber(MachineBasicBlock *BB) {
+ assert(BB->getNumber() >= 0 && "negative block number");
+ return BB->getNumber();
+ }
};
template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
@@ -1334,6 +1351,11 @@ template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
+
+ static unsigned getNumber(const MachineBasicBlock *BB) {
+ assert(BB->getNumber() >= 0 && "negative block number");
+ return BB->getNumber();
+ }
};
// These accessors are handy for sharing templated code between IR and MIR.
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index e1d03fe..9845520 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -293,6 +293,10 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
// numbered and this vector keeps track of the mapping from ID's to MBB's.
std::vector<MachineBasicBlock*> MBBNumbering;
+ // MBBNumbering epoch, incremented after renumbering to detect use of old
+ // block numbers.
+ unsigned MBBNumberingEpoch = 0;
+
// Pool-allocate MachineFunction-lifetime and IR objects.
BumpPtrAllocator Allocator;
@@ -856,6 +860,11 @@ public:
/// getNumBlockIDs - Return the number of MBB ID's allocated.
unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
+ /// Return the numbering "epoch" of block numbers, incremented after each
+ /// numbering. Intended for asserting that no renumbering was performed when
+ /// used by, e.g., preserved analyses.
+ unsigned getBlockNumberEpoch() const { return MBBNumberingEpoch; }
+
/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
/// recomputes them. This guarantees that the MBB numbers are sequential,
/// dense, and match the ordering of the blocks within the function. If a
@@ -1404,6 +1413,13 @@ template <> struct GraphTraits<MachineFunction*> :
}
static unsigned size (MachineFunction *F) { return F->size(); }
+
+ static unsigned getMaxNumber(MachineFunction *F) {
+ return F->getNumBlockIDs();
+ }
+ static unsigned getNumberEpoch(MachineFunction *F) {
+ return F->getBlockNumberEpoch();
+ }
};
template <> struct GraphTraits<const MachineFunction*> :
public GraphTraits<const MachineBasicBlock*> {
@@ -1423,6 +1439,13 @@ template <> struct GraphTraits<const MachineFunction*> :
static unsigned size (const MachineFunction *F) {
return F->size();
}
+
+ static unsigned getMaxNumber(const MachineFunction *F) {
+ return F->getNumBlockIDs();
+ }
+ static unsigned getNumberEpoch(const MachineFunction *F) {
+ return F->getBlockNumberEpoch();
+ }
};
// Provide specializations of GraphTraits to be able to treat a function as a
@@ -1435,12 +1458,26 @@ template <> struct GraphTraits<Inverse<MachineFunction*>> :
static NodeRef getEntryNode(Inverse<MachineFunction *> G) {
return &G.Graph->front();
}
+
+ static unsigned getMaxNumber(MachineFunction *F) {
+ return F->getNumBlockIDs();
+ }
+ static unsigned getNumberEpoch(MachineFunction *F) {
+ return F->getBlockNumberEpoch();
+ }
};
template <> struct GraphTraits<Inverse<const MachineFunction*>> :
public GraphTraits<Inverse<const MachineBasicBlock*>> {
static NodeRef getEntryNode(Inverse<const MachineFunction *> G) {
return &G.Graph->front();
}
+
+ static unsigned getMaxNumber(const MachineFunction *F) {
+ return F->getNumBlockIDs();
+ }
+ static unsigned getNumberEpoch(const MachineFunction *F) {
+ return F->getBlockNumberEpoch();
+ }
};
void verifyMachineFunction(const std::string &Banner,
diff --git a/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 09d9a0b..2367d8d 100644
--- a/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -801,6 +801,7 @@ public:
/// of an earlier hint it will be overwritten.
void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg) {
assert(VReg.isVirtual());
+ RegAllocHints.grow(Register::index2VirtReg(getNumVirtRegs()));
RegAllocHints[VReg].first = Type;
RegAllocHints[VReg].second.clear();
RegAllocHints[VReg].second.push_back(PrefReg);
@@ -810,6 +811,7 @@ public:
/// vector for VReg.
void addRegAllocationHint(Register VReg, Register PrefReg) {
assert(VReg.isVirtual());
+ RegAllocHints.grow(Register::index2VirtReg(getNumVirtRegs()));
RegAllocHints[VReg].second.push_back(PrefReg);
}
@@ -822,7 +824,8 @@ public:
void clearSimpleHint(Register VReg) {
assert (!RegAllocHints[VReg].first &&
"Expected to clear a non-target hint!");
- RegAllocHints[VReg].second.clear();
+ if (RegAllocHints.inBounds(VReg))
+ RegAllocHints[VReg].second.clear();
}
/// getRegAllocationHint - Return the register allocation hint for the
@@ -830,6 +833,8 @@ public:
/// one with the greatest weight.
std::pair<unsigned, Register> getRegAllocationHint(Register VReg) const {
assert(VReg.isVirtual());
+ if (!RegAllocHints.inBounds(VReg))
+ return {0, Register()};
Register BestHint = (RegAllocHints[VReg.id()].second.size() ?
RegAllocHints[VReg.id()].second[0] : Register());
return {RegAllocHints[VReg.id()].first, BestHint};
@@ -845,10 +850,10 @@ public:
/// getRegAllocationHints - Return a reference to the vector of all
/// register allocation hints for VReg.
- const std::pair<unsigned, SmallVector<Register, 4>> &
+ const std::pair<unsigned, SmallVector<Register, 4>> *
getRegAllocationHints(Register VReg) const {
assert(VReg.isVirtual());
- return RegAllocHints[VReg];
+ return RegAllocHints.inBounds(VReg) ? &RegAllocHints[VReg] : nullptr;
}
/// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index cafb9781..20273d0 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -513,11 +513,6 @@ namespace llvm {
// the corresponding function in a vector library (e.g., SVML, libmvec).
FunctionPass *createReplaceWithVeclibLegacyPass();
- /// This pass expands the vector predication intrinsics into unpredicated
- /// instructions with selects or just the explicit vector length into the
- /// predicate mask.
- FunctionPass *createExpandVectorPredicationPass();
-
// Expands large div/rem instructions.
FunctionPass *createExpandLargeDivRemPass();
diff --git a/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h b/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h
index aa6a0e6..955a243 100644
--- a/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h
+++ b/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h
@@ -22,9 +22,9 @@ class TargetMachine;
struct PreISelIntrinsicLoweringPass
: PassInfoMixin<PreISelIntrinsicLoweringPass> {
- const TargetMachine &TM;
+ const TargetMachine *TM;
- PreISelIntrinsicLoweringPass(const TargetMachine &TM) : TM(TM) {}
+ PreISelIntrinsicLoweringPass(const TargetMachine *TM) : TM(TM) {}
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
diff --git a/llvm/include/llvm/CodeGen/SDPatternMatch.h b/llvm/include/llvm/CodeGen/SDPatternMatch.h
index ad1c541..96ece15 100644
--- a/llvm/include/llvm/CodeGen/SDPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/SDPatternMatch.h
@@ -477,10 +477,9 @@ struct TernaryOpc_match {
};
template <typename T0_P, typename T1_P, typename T2_P>
-inline TernaryOpc_match<T0_P, T1_P, T2_P, false, false>
+inline TernaryOpc_match<T0_P, T1_P, T2_P>
m_SetCC(const T0_P &LHS, const T1_P &RHS, const T2_P &CC) {
- return TernaryOpc_match<T0_P, T1_P, T2_P, false, false>(ISD::SETCC, LHS, RHS,
- CC);
+ return TernaryOpc_match<T0_P, T1_P, T2_P>(ISD::SETCC, LHS, RHS, CC);
}
template <typename T0_P, typename T1_P, typename T2_P>
@@ -529,9 +528,9 @@ struct BinaryOpc_match {
};
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_BinOp(unsigned Opc, const LHS &L,
- const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(Opc, L, R);
+inline BinaryOpc_match<LHS, RHS> m_BinOp(unsigned Opc, const LHS &L,
+ const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(Opc, L, R);
}
template <typename LHS, typename RHS>
inline BinaryOpc_match<LHS, RHS, true> m_c_BinOp(unsigned Opc, const LHS &L,
@@ -557,8 +556,8 @@ inline BinaryOpc_match<LHS, RHS, true> m_Add(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_Sub(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::SUB, L, R);
+inline BinaryOpc_match<LHS, RHS> m_Sub(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::SUB, L, R);
}
template <typename LHS, typename RHS>
@@ -602,35 +601,35 @@ inline BinaryOpc_match<LHS, RHS, true> m_UMax(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_UDiv(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::UDIV, L, R);
+inline BinaryOpc_match<LHS, RHS> m_UDiv(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::UDIV, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_SDiv(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::SDIV, L, R);
+inline BinaryOpc_match<LHS, RHS> m_SDiv(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::SDIV, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_URem(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::UREM, L, R);
+inline BinaryOpc_match<LHS, RHS> m_URem(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::UREM, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_SRem(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::SREM, L, R);
+inline BinaryOpc_match<LHS, RHS> m_SRem(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::SREM, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_Shl(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::SHL, L, R);
+inline BinaryOpc_match<LHS, RHS> m_Shl(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::SHL, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_Sra(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::SRA, L, R);
+inline BinaryOpc_match<LHS, RHS> m_Sra(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::SRA, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_Srl(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::SRL, L, R);
+inline BinaryOpc_match<LHS, RHS> m_Srl(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::SRL, L, R);
}
template <typename LHS, typename RHS>
@@ -639,8 +638,8 @@ inline BinaryOpc_match<LHS, RHS, true> m_FAdd(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_FSub(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::FSUB, L, R);
+inline BinaryOpc_match<LHS, RHS> m_FSub(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::FSUB, L, R);
}
template <typename LHS, typename RHS>
@@ -649,13 +648,13 @@ inline BinaryOpc_match<LHS, RHS, true> m_FMul(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_FDiv(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::FDIV, L, R);
+inline BinaryOpc_match<LHS, RHS> m_FDiv(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::FDIV, L, R);
}
template <typename LHS, typename RHS>
-inline BinaryOpc_match<LHS, RHS, false> m_FRem(const LHS &L, const RHS &R) {
- return BinaryOpc_match<LHS, RHS, false>(ISD::FREM, L, R);
+inline BinaryOpc_match<LHS, RHS> m_FRem(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS>(ISD::FREM, L, R);
}
// === Unary operations ===
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 6a80c8c..1d0124e 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -2320,6 +2320,11 @@ public:
isConstantFPBuildVectorOrConstantFP(N);
}
+ /// Check if a value \op N is a constant using the target's BooleanContent for
+ /// its type.
+ std::optional<bool> isBoolConstant(SDValue N,
+ bool AllowTruncation = false) const;
+
/// Set CallSiteInfo to be associated with Node.
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo) {
SDEI[Node].CSInfo = std::move(CallInfo);
diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index 0656c0d..3df9e56 100644
--- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -15,6 +15,7 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/Support/TypeSize.h"
#include <vector>
@@ -473,6 +474,15 @@ public:
/// Return the frame base information to be encoded in the DWARF subprogram
/// debug info.
virtual DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const;
+
+ /// If frame pointer or base pointer is clobbered by an instruction, we should
+ /// spill/restore it around that instruction.
+ virtual void spillFPBP(MachineFunction &MF) const {}
+
+ /// This method is called at the end of prolog/epilog code insertion, so
+ /// targets can emit remarks based on the final frame layout.
+ virtual void emitRemarks(const MachineFunction &MF,
+ MachineOptimizationRemarkEmitter *ORE) const {};
};
} // End llvm namespace
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 33c4c74..197f66e 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -630,6 +630,12 @@ public:
return false;
}
+ /// Returns true if RC is a class/subclass of general purpose register.
+ virtual bool
+ isGeneralPurposeRegisterClass(const TargetRegisterClass *RC) const {
+ return false;
+ }
+
/// Prior to adding the live-out mask to a stackmap or patchpoint
/// instruction, provide the target the opportunity to adjust it (mainly to
/// remove pseudo-registers that should be ignored).
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
index c09398f..e8c3e34 100644
--- a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
@@ -233,6 +233,21 @@ enum EdgeKind_aarch64 : Edge::Kind {
/// out-of-range error will be returned.
PageOffset12,
+ /// The 15-bit offset of the GOT entry from the GOT table.
+ ///
+ /// Used for load/store instructions addressing a GOT entry.
+ ///
+ /// Fixup expression:
+ ///
+ /// Fixup <- ((Target + Addend - Page(GOT))) & 0x7fff) >> 3 : uint12
+ ///
+ /// Errors:
+ /// - The result of the unshifted part of the fixup expression must be
+ /// aligned otherwise an alignment error will be returned.
+ /// - The result of the fixup expression must fit into a uint12 otherwise an
+ /// out-of-range error will be returned.
+ GotPageOffset15,
+
/// A GOT entry getter/constructor, transformed to Page21 pointing at the GOT
/// entry for the original target.
///
@@ -273,6 +288,23 @@ enum EdgeKind_aarch64 : Edge::Kind {
///
RequestGOTAndTransformToPageOffset12,
+ /// A GOT entry getter/constructor, transformed to Pageoffset15 pointing at
+ /// the GOT entry for the original target.
+ ///
+ /// Indicates that this edge should be transformed into a GotPageOffset15
+ /// targeting the GOT entry for the edge's current target, maintaining the
+ /// same addend. A GOT entry for the target should be created if one does not
+ /// already exist.
+ ///
+ /// Fixup expression:
+ /// NONE
+ ///
+ /// Errors:
+ /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
+ /// phase will result in an assert/unreachable during the fixup phase.
+ ///
+ RequestGOTAndTransformToPageOffset15,
+
/// A GOT entry getter/constructor, transformed to Delta32 pointing at the GOT
/// entry for the original target.
///
@@ -430,7 +462,8 @@ inline unsigned getMoveWide16Shift(uint32_t Instr) {
}
/// Apply fixup expression for edge to block content.
-inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
+inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
+ const Symbol *GOTSymbol) {
using namespace support;
char *BlockWorkingMem = B.getAlreadyMutableContent().data();
@@ -603,6 +636,24 @@ inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
*(ulittle32_t *)FixupPtr = FixedInstr;
break;
}
+ case GotPageOffset15: {
+ assert(GOTSymbol && "No GOT section symbol");
+ uint64_t TargetOffset =
+ (E.getTarget().getAddress() + E.getAddend()).getValue() -
+ (GOTSymbol->getAddress().getValue() & ~static_cast<uint64_t>(4096 - 1));
+ if (TargetOffset > 0x7fff)
+ return make_error<JITLinkError>("PAGEOFF15 target is out of range");
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ const unsigned ImmShift = 3;
+ if (TargetOffset & ((1 << ImmShift) - 1))
+ return make_error<JITLinkError>("PAGEOFF15 target is not aligned");
+
+ uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
default:
return make_error<JITLinkError>(
"In graph " + G.getName() + ", section " + B.getSection().getName() +
@@ -701,6 +752,15 @@ public:
"RawInstr isn't a 64-bit LDR immediate");
break;
}
+ case aarch64::RequestGOTAndTransformToPageOffset15: {
+ KindToSet = aarch64::GotPageOffset15;
+ uint32_t RawInstr = *(const support::ulittle32_t *)FixupPtr;
+ (void)RawInstr;
+ assert(E.getAddend() == 0 && "GOTPageOffset15 with non-zero addend");
+ assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
+ "RawInstr isn't a 64-bit LDR immediate");
+ break;
+ }
case aarch64::RequestGOTAndTransformToDelta32: {
KindToSet = aarch64::Delta32;
break;
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index 4d13c38..2788751 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -1056,6 +1056,19 @@ public:
return !getAddrDiscriminator()->isNullValue();
}
+ /// A constant value for the address discriminator which has special
+ /// significance to ctors/dtors lowering. Regular address discrimination can't
+ /// be applied for them since uses of llvm.global_{c|d}tors are disallowed
+ /// (see Verifier::visitGlobalVariable) and we can't emit getelementptr
+ /// expressions referencing these special arrays.
+ enum { AddrDiscriminator_CtorsDtors = 1 };
+
+ /// Whether the address uses a special address discriminator.
+ /// These discriminators can't be used in real pointer-auth values; they
+ /// can only be used in "prototype" values that indicate how some real
+ /// schema is supposed to be produced.
+ bool hasSpecialAddressDiscriminator(uint64_t Value) const;
+
/// Check whether an authentication operation with key \p Key and (possibly
/// blended) discriminator \p Discriminator is known to be compatible with
/// this ptrauth signed pointer.
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 1e7fdb5..7caada2 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -1418,6 +1418,20 @@ let TargetPrefix = "nvvm" in {
def int_nvvm_fence_sc_cluster:
Intrinsic<[], [], [IntrNoCallback]>;
+// Proxy fence (uni-directional)
+foreach scope = ["cta", "cluster", "gpu", "sys"] in {
+
+ def int_nvvm_fence_proxy_tensormap_generic_release_ # scope:
+ Intrinsic<[], [], [IntrNoCallback],
+ "llvm.nvvm.fence.proxy.tensormap_generic.release." # scope>;
+
+ def int_nvvm_fence_proxy_tensormap_generic_acquire_ # scope:
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrNoCallback, IntrArgMemOnly, ImmArg<ArgIndex<1>>],
+ "llvm.nvvm.fence.proxy.tensormap_generic.acquire." # scope>;
+
+}
+
// Async Copy
def int_nvvm_cp_async_mbarrier_arrive :
ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive">,
diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
index aff1fc7..6f49ed3 100644
--- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -632,6 +632,19 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
DefaultAttrsIntrinsic<[llvm_v1i128_ty],[llvm_v1i128_ty],[IntrNoMem]>;
// BCD intrinsics.
+ def int_ppc_cdtbcdd : ClangBuiltin<"__builtin_ppc_cdtbcd">,
+ DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_ppc_cbcdtdd: ClangBuiltin<"__builtin_ppc_cbcdtd">,
+ DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_ppc_addg6sd: ClangBuiltin<"__builtin_ppc_addg6s">,
+ DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_ppc_cdtbcd : ClangBuiltin<"__builtin_cdtbcd">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_ppc_cbcdtd: ClangBuiltin<"__builtin_cbcdtd">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_ppc_addg6s: ClangBuiltin<"__builtin_addg6s">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
def int_ppc_bcdadd : ClangBuiltin<"__builtin_ppc_bcdadd">,
DefaultAttrsIntrinsic<
[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td
index eb2cb3f..1ab2002 100644
--- a/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -6914,66 +6914,178 @@ let TargetPrefix = "x86" in {
let TargetPrefix = "x86" in {
def int_x86_avx10_vminmaxnepbf16128 : ClangBuiltin<"__builtin_ia32_vminmaxnepbf16128">,
- Intrinsic<[llvm_v8bf16_ty], [llvm_v8bf16_ty, llvm_v8bf16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v8bf16_ty, llvm_v8bf16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_vminmaxnepbf16256 : ClangBuiltin<"__builtin_ia32_vminmaxnepbf16256">,
- Intrinsic<[llvm_v16bf16_ty], [llvm_v16bf16_ty, llvm_v16bf16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v16bf16_ty], [llvm_v16bf16_ty, llvm_v16bf16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_vminmaxnepbf16512 : ClangBuiltin<"__builtin_ia32_vminmaxnepbf16512">,
- Intrinsic<[llvm_v32bf16_ty], [llvm_v32bf16_ty, llvm_v32bf16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v32bf16_ty], [llvm_v32bf16_ty, llvm_v32bf16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_vminmaxpd128 : ClangBuiltin<"__builtin_ia32_vminmaxpd128">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxpd128 : ClangBuiltin<"__builtin_ia32_vminmaxpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_vminmaxpd256 : ClangBuiltin<"__builtin_ia32_vminmaxpd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxpd256_round : ClangBuiltin<"__builtin_ia32_vminmaxpd256_round_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_mask_vminmaxpd_round : ClangBuiltin<"__builtin_ia32_vminmaxpd512_round_mask">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_vminmaxph128 : ClangBuiltin<"__builtin_ia32_vminmaxph128">,
- Intrinsic<[llvm_v8f16_ty], [llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v8f16_ty], [llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxph128 : ClangBuiltin<"__builtin_ia32_vminmaxph128_mask">,
- Intrinsic<[llvm_v8f16_ty], [llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty, llvm_i8_ty],
+ DefaultAttrsIntrinsic<[llvm_v8f16_ty], [llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty, llvm_i8_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_vminmaxph256 : ClangBuiltin<"__builtin_ia32_vminmaxph256">,
- Intrinsic<[llvm_v16f16_ty], [llvm_v16f16_ty, llvm_v16f16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v16f16_ty], [llvm_v16f16_ty, llvm_v16f16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxph256_round : ClangBuiltin<"__builtin_ia32_vminmaxph256_round_mask">,
- Intrinsic<[llvm_v16f16_ty], [llvm_v16f16_ty, llvm_v16f16_ty, llvm_i32_ty, llvm_v16f16_ty, llvm_i16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v16f16_ty], [llvm_v16f16_ty, llvm_v16f16_ty, llvm_i32_ty, llvm_v16f16_ty, llvm_i16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxph_round : ClangBuiltin<"__builtin_ia32_vminmaxph512_round_mask">,
- Intrinsic<[llvm_v32f16_ty], [llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty, llvm_v32f16_ty, llvm_i32_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v32f16_ty], [llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty, llvm_v32f16_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_vminmaxps128 : ClangBuiltin<"__builtin_ia32_vminmaxps128">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxps128 : ClangBuiltin<"__builtin_ia32_vminmaxps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_vminmaxps256 : ClangBuiltin<"__builtin_ia32_vminmaxps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx10_mask_vminmaxps256_round : ClangBuiltin<"__builtin_ia32_vminmaxps256_round_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_mask_vminmaxps_round : ClangBuiltin<"__builtin_ia32_vminmaxps512_round_mask">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_mask_vminmaxsd_round : ClangBuiltin<"__builtin_ia32_vminmaxsd_round_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_mask_vminmaxsh_round : ClangBuiltin<"__builtin_ia32_vminmaxsh_round_mask">,
- Intrinsic<[llvm_v8f16_ty], [llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty, llvm_i8_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v8f16_ty], [llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx10_mask_vminmaxss_round : ClangBuiltin<"__builtin_ia32_vminmaxss_round_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+ DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
}
+
+//===----------------------------------------------------------------------===//
+let TargetPrefix = "x86" in {
+def int_x86_avx10_vcvtnebf162ibs128 : ClangBuiltin<"__builtin_ia32_vcvtnebf162ibs128">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvtnebf162ibs256 : ClangBuiltin<"__builtin_ia32_vcvtnebf162ibs256">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvtnebf162ibs512 : ClangBuiltin<"__builtin_ia32_vcvtnebf162ibs512">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvtnebf162iubs128 : ClangBuiltin<"__builtin_ia32_vcvtnebf162iubs128">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvtnebf162iubs256 : ClangBuiltin<"__builtin_ia32_vcvtnebf162iubs256">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvtnebf162iubs512 : ClangBuiltin<"__builtin_ia32_vcvtnebf162iubs512">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvtph2ibs128 : ClangBuiltin<"__builtin_ia32_vcvtph2ibs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvtph2ibs256 : ClangBuiltin<"__builtin_ia32_vcvtph2ibs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtph2ibs512 : ClangBuiltin<"__builtin_ia32_vcvtph2ibs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtph2iubs128 : ClangBuiltin<"__builtin_ia32_vcvtph2iubs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvtph2iubs256 : ClangBuiltin<"__builtin_ia32_vcvtph2iubs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtph2iubs512 : ClangBuiltin<"__builtin_ia32_vcvtph2iubs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtps2ibs128 : ClangBuiltin<"__builtin_ia32_vcvtps2ibs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvtps2ibs256 : ClangBuiltin<"__builtin_ia32_vcvtps2ibs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtps2ibs512 : ClangBuiltin<"__builtin_ia32_vcvtps2ibs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtps2iubs128 : ClangBuiltin<"__builtin_ia32_vcvtps2iubs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvtps2iubs256 : ClangBuiltin<"__builtin_ia32_vcvtps2iubs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvtps2iubs512 : ClangBuiltin<"__builtin_ia32_vcvtps2iubs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_vcvttnebf162ibs128 : ClangBuiltin<"__builtin_ia32_vcvttnebf162ibs128">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvttnebf162ibs256 : ClangBuiltin<"__builtin_ia32_vcvttnebf162ibs256">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvttnebf162ibs512 : ClangBuiltin<"__builtin_ia32_vcvttnebf162ibs512">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvttnebf162iubs128 : ClangBuiltin<"__builtin_ia32_vcvttnebf162iubs128">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvttnebf162iubs256 : ClangBuiltin<"__builtin_ia32_vcvttnebf162iubs256">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_vcvttnebf162iubs512 : ClangBuiltin<"__builtin_ia32_vcvttnebf162iubs512">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32bf16_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvttph2ibs128 : ClangBuiltin<"__builtin_ia32_vcvttph2ibs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvttph2ibs256 : ClangBuiltin<"__builtin_ia32_vcvttph2ibs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttph2ibs512 : ClangBuiltin<"__builtin_ia32_vcvttph2ibs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttph2iubs128 : ClangBuiltin<"__builtin_ia32_vcvttph2iubs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvttph2iubs256 : ClangBuiltin<"__builtin_ia32_vcvttph2iubs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttph2iubs512 : ClangBuiltin<"__builtin_ia32_vcvttph2iubs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttps2ibs128 : ClangBuiltin<"__builtin_ia32_vcvttps2ibs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvttps2ibs256 : ClangBuiltin<"__builtin_ia32_vcvttps2ibs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttps2ibs512 : ClangBuiltin<"__builtin_ia32_vcvttps2ibs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttps2iubs128 : ClangBuiltin<"__builtin_ia32_vcvttps2iubs128_mask">,
+ DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+def int_x86_avx10_mask_vcvttps2iubs256 : ClangBuiltin<"__builtin_ia32_vcvttps2iubs256_mask">,
+ DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_x86_avx10_mask_vcvttps2iubs512 : ClangBuiltin<"__builtin_ia32_vcvttps2iubs512_mask">,
+ DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+}
diff --git a/llvm/include/llvm/IR/Module.h b/llvm/include/llvm/IR/Module.h
index d2b2fe4..7042a54 100644
--- a/llvm/include/llvm/IR/Module.h
+++ b/llvm/include/llvm/IR/Module.h
@@ -158,11 +158,6 @@ public:
/// converted result in MFB.
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB);
- /// Check if the given module flag metadata represents a valid module flag,
- /// and store the flag behavior, the key string and the value metadata.
- static bool isValidModuleFlag(const MDNode &ModFlag, ModFlagBehavior &MFB,
- MDString *&Key, Metadata *&Val);
-
struct ModuleFlagEntry {
ModFlagBehavior Behavior;
MDString *Key;
@@ -502,7 +497,7 @@ public:
/// Return the first NamedMDNode in the module with the specified name. This
/// method returns null if a NamedMDNode with the specified name is not found.
- NamedMDNode *getNamedMetadata(const Twine &Name) const;
+ NamedMDNode *getNamedMetadata(StringRef Name) const;
/// Return the named MDNode in the module with the specified name. This method
/// returns a new NamedMDNode if a NamedMDNode with the specified name is not
diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h
index d23d598..c00e425 100644
--- a/llvm/include/llvm/LinkAllPasses.h
+++ b/llvm/include/llvm/LinkAllPasses.h
@@ -119,7 +119,6 @@ namespace {
(void) llvm::createMergeICmpsLegacyPass();
(void) llvm::createExpandLargeDivRemPass();
(void)llvm::createExpandMemCmpLegacyPass();
- (void) llvm::createExpandVectorPredicationPass();
std::string buf;
llvm::raw_string_ostream os(buf);
(void) llvm::createPrintModulePass(os);
diff --git a/llvm/include/llvm/MC/MCELFObjectWriter.h b/llvm/include/llvm/MC/MCELFObjectWriter.h
index 4952e03..3b41808 100644
--- a/llvm/include/llvm/MC/MCELFObjectWriter.h
+++ b/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -99,8 +99,6 @@ public:
virtual void sortRelocs(const MCAssembler &Asm,
std::vector<ELFRelocationEntry> &Relocs);
- virtual void addTargetSectionFlags(MCContext &Ctx, MCSectionELF &Sec);
-
/// \name Accessors
/// @{
uint8_t getOSABI() const { return OSABI; }
diff --git a/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h b/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h
index f9286ace..7fa7c89 100644
--- a/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h
+++ b/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h
@@ -21,7 +21,7 @@
namespace llvm {
namespace mca {
-class LSUnit;
+class LSUnitBase;
class RegisterFile;
struct StallInfo {
@@ -56,7 +56,7 @@ class InOrderIssueStage final : public Stage {
RegisterFile &PRF;
ResourceManager RM;
CustomBehaviour &CB;
- LSUnit &LSU;
+ LSUnitBase &LSU;
/// Instructions that were issued, but not executed yet.
SmallVector<InstRef, 4> IssuedInst;
@@ -113,7 +113,7 @@ class InOrderIssueStage final : public Stage {
public:
InOrderIssueStage(const MCSubtargetInfo &STI, RegisterFile &PRF,
- CustomBehaviour &CB, LSUnit &LSU);
+ CustomBehaviour &CB, LSUnitBase &LSU);
unsigned getIssueWidth() const;
bool isAvailable(const InstRef &) const override;
diff --git a/llvm/include/llvm/Object/COFF.h b/llvm/include/llvm/Object/COFF.h
index a548b2c..b084754 100644
--- a/llvm/include/llvm/Object/COFF.h
+++ b/llvm/include/llvm/Object/COFF.h
@@ -35,8 +35,10 @@ template <typename T> class ArrayRef;
namespace object {
+class Arm64XRelocRef;
class BaseRelocRef;
class DelayImportDirectoryEntryRef;
+class DynamicRelocRef;
class ExportDirectoryEntryRef;
class ImportDirectoryEntryRef;
class ImportedSymbolRef;
@@ -48,6 +50,8 @@ using delay_import_directory_iterator =
using export_directory_iterator = content_iterator<ExportDirectoryEntryRef>;
using imported_symbol_iterator = content_iterator<ImportedSymbolRef>;
using base_reloc_iterator = content_iterator<BaseRelocRef>;
+using dynamic_reloc_iterator = content_iterator<DynamicRelocRef>;
+using arm64x_reloc_iterator = content_iterator<Arm64XRelocRef>;
/// The DOS compatible header at the front of all PE/COFF executables.
struct dos_header {
@@ -832,6 +836,37 @@ struct debug_h_header {
support::ulittle16_t HashAlgorithm;
};
+struct coff_dynamic_reloc_table {
+ support::ulittle32_t Version;
+ support::ulittle32_t Size;
+};
+
+struct coff_dynamic_relocation32 {
+ support::ulittle32_t Symbol;
+ support::ulittle32_t BaseRelocSize;
+};
+
+struct coff_dynamic_relocation64 {
+ support::ulittle64_t Symbol;
+ support::ulittle32_t BaseRelocSize;
+};
+
+struct coff_dynamic_relocation32_v2 {
+ support::ulittle32_t HeaderSize;
+ support::ulittle32_t FixupInfoSize;
+ support::ulittle32_t Symbol;
+ support::ulittle32_t SymbolGroup;
+ support::ulittle32_t Flags;
+};
+
+struct coff_dynamic_relocation64_v2 {
+ support::ulittle32_t HeaderSize;
+ support::ulittle32_t FixupInfoSize;
+ support::ulittle64_t Symbol;
+ support::ulittle32_t SymbolGroup;
+ support::ulittle32_t Flags;
+};
+
class COFFObjectFile : public ObjectFile {
private:
COFFObjectFile(MemoryBufferRef Object);
@@ -861,6 +896,7 @@ private:
// Either coff_load_configuration32 or coff_load_configuration64.
const void *LoadConfig = nullptr;
const chpe_metadata *CHPEMetadata = nullptr;
+ const coff_dynamic_reloc_table *DynamicRelocTable = nullptr;
Expected<StringRef> getString(uint32_t offset) const;
@@ -880,6 +916,7 @@ private:
Error initDebugDirectoryPtr();
Error initTLSDirectoryPtr();
Error initLoadConfigPtr();
+ Error initDynamicRelocPtr(uint32_t SectionIndex, uint32_t SectionOffset);
public:
static Expected<std::unique_ptr<COFFObjectFile>>
@@ -986,6 +1023,9 @@ public:
}
const chpe_metadata *getCHPEMetadata() const { return CHPEMetadata; }
+ const coff_dynamic_reloc_table *getDynamicRelocTable() const {
+ return DynamicRelocTable;
+ }
StringRef getRelocationTypeName(uint16_t Type) const;
@@ -1054,6 +1094,8 @@ public:
export_directory_iterator export_directory_end() const;
base_reloc_iterator base_reloc_begin() const;
base_reloc_iterator base_reloc_end() const;
+ dynamic_reloc_iterator dynamic_reloc_begin() const;
+ dynamic_reloc_iterator dynamic_reloc_end() const;
const debug_directory *debug_directory_begin() const {
return DebugDirectoryBegin;
}
@@ -1066,6 +1108,7 @@ public:
delay_import_directories() const;
iterator_range<export_directory_iterator> export_directories() const;
iterator_range<base_reloc_iterator> base_relocs() const;
+ iterator_range<dynamic_reloc_iterator> dynamic_relocs() const;
iterator_range<const debug_directory *> debug_directories() const {
return make_range(debug_directory_begin(), debug_directory_end());
}
@@ -1295,6 +1338,62 @@ private:
uint32_t Index;
};
+class DynamicRelocRef {
+public:
+ DynamicRelocRef() = default;
+ DynamicRelocRef(const void *Header, const COFFObjectFile *Owner)
+ : Obj(Owner), Header(reinterpret_cast<const uint8_t *>(Header)) {}
+
+ bool operator==(const DynamicRelocRef &Other) const;
+ void moveNext();
+ uint32_t getType() const;
+ void getContents(ArrayRef<uint8_t> &Ref) const;
+
+ arm64x_reloc_iterator arm64x_reloc_begin() const;
+ arm64x_reloc_iterator arm64x_reloc_end() const;
+ iterator_range<arm64x_reloc_iterator> arm64x_relocs() const;
+
+private:
+ Error validate() const;
+
+ const COFFObjectFile *Obj;
+ const uint8_t *Header;
+
+ friend class COFFObjectFile;
+};
+
+class Arm64XRelocRef {
+public:
+ Arm64XRelocRef() = default;
+ Arm64XRelocRef(const coff_base_reloc_block_header *Header, uint32_t Index = 0)
+ : Header(Header), Index(Index) {}
+
+ bool operator==(const Arm64XRelocRef &Other) const;
+ void moveNext();
+
+ COFF::Arm64XFixupType getType() const {
+ return COFF::Arm64XFixupType((getReloc() >> 12) & 3);
+ }
+ uint32_t getRVA() const { return Header->PageRVA + (getReloc() & 0xfff); }
+ uint8_t getSize() const;
+ uint64_t getValue() const;
+
+private:
+ const support::ulittle16_t &getReloc(uint32_t Offset = 0) const {
+ return reinterpret_cast<const support::ulittle16_t *>(Header +
+ 1)[Index + Offset];
+ }
+
+ uint16_t getArg() const { return getReloc() >> 14; }
+ uint8_t getEntrySize() const;
+ Error validate(const COFFObjectFile *Obj) const;
+
+ const coff_base_reloc_block_header *Header;
+ uint32_t Index;
+
+ friend class DynamicRelocRef;
+};
+
class ResourceSectionRef {
public:
ResourceSectionRef() = default;
diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
index fb7a3c1..81c00a3 100644
--- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
@@ -628,7 +628,7 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addISelPasses(
if (TM.useEmulatedTLS())
addPass(LowerEmuTLSPass());
- addPass(PreISelIntrinsicLoweringPass(TM));
+ addPass(PreISelIntrinsicLoweringPass(&TM));
derived().addIRPasses(addPass);
derived().addCodeGenPrepare(addPass);
diff --git a/llvm/include/llvm/Passes/MachinePassRegistry.def b/llvm/include/llvm/Passes/MachinePassRegistry.def
index e702721..8e669ee 100644
--- a/llvm/include/llvm/Passes/MachinePassRegistry.def
+++ b/llvm/include/llvm/Passes/MachinePassRegistry.def
@@ -53,7 +53,6 @@ FUNCTION_PASS("expand-large-div-rem", ExpandLargeDivRemPass(TM))
FUNCTION_PASS("expand-large-fp-convert", ExpandLargeFpConvertPass(TM))
FUNCTION_PASS("expand-memcmp", ExpandMemCmpPass(TM))
FUNCTION_PASS("expand-reductions", ExpandReductionsPass())
-FUNCTION_PASS("expandvp", ExpandVectorPredicationPass())
FUNCTION_PASS("gc-lowering", GCLoweringPass())
FUNCTION_PASS("indirectbr-expand", IndirectBrExpandPass(TM))
FUNCTION_PASS("interleaved-access", InterleavedAccessPass(TM))
diff --git a/llvm/include/llvm/ProfileData/PGOCtxProfReader.h b/llvm/include/llvm/ProfileData/PGOCtxProfReader.h
index 28f05e9..190deae 100644
--- a/llvm/include/llvm/ProfileData/PGOCtxProfReader.h
+++ b/llvm/include/llvm/ProfileData/PGOCtxProfReader.h
@@ -24,16 +24,16 @@
#include <vector>
namespace llvm {
-/// The loaded contextual profile, suitable for mutation during IPO passes. We
-/// generally expect a fraction of counters and of callsites to be populated.
-/// We continue to model counters as vectors, but callsites are modeled as a map
-/// of a map. The expectation is that, typically, there is a small number of
-/// indirect targets (usually, 1 for direct calls); but potentially a large
-/// number of callsites, and, as inlining progresses, the callsite count of a
-/// caller will grow.
-class PGOContextualProfile final {
+/// A node (context) in the loaded contextual profile, suitable for mutation
+/// during IPO passes. We generally expect a fraction of counters and
+/// callsites to be populated. We continue to model counters as vectors, but
+/// callsites are modeled as a map of a map. The expectation is that, typically,
+/// there is a small number of indirect targets (usually, 1 for direct calls);
+/// but potentially a large number of callsites, and, as inlining progresses,
+/// the callsite count of a caller will grow.
+class PGOCtxProfContext final {
public:
- using CallTargetMapTy = std::map<GlobalValue::GUID, PGOContextualProfile>;
+ using CallTargetMapTy = std::map<GlobalValue::GUID, PGOCtxProfContext>;
using CallsiteMapTy = DenseMap<uint32_t, CallTargetMapTy>;
private:
@@ -42,19 +42,18 @@ private:
SmallVector<uint64_t, 16> Counters;
CallsiteMapTy Callsites;
- PGOContextualProfile(GlobalValue::GUID G,
- SmallVectorImpl<uint64_t> &&Counters)
+ PGOCtxProfContext(GlobalValue::GUID G, SmallVectorImpl<uint64_t> &&Counters)
: GUID(G), Counters(std::move(Counters)) {}
- Expected<PGOContextualProfile &>
+ Expected<PGOCtxProfContext &>
getOrEmplace(uint32_t Index, GlobalValue::GUID G,
SmallVectorImpl<uint64_t> &&Counters);
public:
- PGOContextualProfile(const PGOContextualProfile &) = delete;
- PGOContextualProfile &operator=(const PGOContextualProfile &) = delete;
- PGOContextualProfile(PGOContextualProfile &&) = default;
- PGOContextualProfile &operator=(PGOContextualProfile &&) = default;
+ PGOCtxProfContext(const PGOCtxProfContext &) = delete;
+ PGOCtxProfContext &operator=(const PGOCtxProfContext &) = delete;
+ PGOCtxProfContext(PGOCtxProfContext &&) = default;
+ PGOCtxProfContext &operator=(PGOCtxProfContext &&) = default;
GlobalValue::GUID guid() const { return GUID; }
const SmallVectorImpl<uint64_t> &counters() const { return Counters; }
@@ -80,7 +79,7 @@ class PGOCtxProfileReader final {
Error wrongValue(const Twine &);
Error unsupported(const Twine &);
- Expected<std::pair<std::optional<uint32_t>, PGOContextualProfile>>
+ Expected<std::pair<std::optional<uint32_t>, PGOCtxProfContext>>
readContext(bool ExpectIndex);
bool canReadContext();
@@ -89,7 +88,7 @@ public:
: Magic(Buffer.substr(0, PGOCtxProfileWriter::ContainerMagic.size())),
Cursor(Buffer.substr(PGOCtxProfileWriter::ContainerMagic.size())) {}
- Expected<std::map<GlobalValue::GUID, PGOContextualProfile>> loadContexts();
+ Expected<std::map<GlobalValue::GUID, PGOCtxProfContext>> loadContexts();
};
} // namespace llvm
#endif
diff --git a/llvm/include/llvm/SandboxIR/SandboxIR.h b/llvm/include/llvm/SandboxIR/SandboxIR.h
index 9e5db20..12ecd30 100644
--- a/llvm/include/llvm/SandboxIR/SandboxIR.h
+++ b/llvm/include/llvm/SandboxIR/SandboxIR.h
@@ -81,6 +81,8 @@
// | +- CastInst
// |
// +- UnaryOperator
+// |
+// +- UnreachableInst
//
// Use
//
@@ -115,6 +117,7 @@ class LoadInst;
class ReturnInst;
class StoreInst;
class User;
+class UnreachableInst;
class Value;
class CallBase;
class CallInst;
@@ -124,6 +127,7 @@ class GetElementPtrInst;
class CastInst;
class PtrToIntInst;
class BitCastInst;
+class AllocaInst;
/// Iterator for the `Use` edges of a User's operands.
/// \Returns the operand `Use` when dereferenced.
@@ -240,8 +244,10 @@ protected:
friend class InvokeInst; // For getting `Val`.
friend class CallBrInst; // For getting `Val`.
friend class GetElementPtrInst; // For getting `Val`.
+ friend class AllocaInst; // For getting `Val`.
friend class CastInst; // For getting `Val`.
friend class PHINode; // For getting `Val`.
+ friend class UnreachableInst; // For getting `Val`.
/// All values point to the context.
Context &Ctx;
@@ -633,8 +639,10 @@ protected:
friend class InvokeInst; // For getTopmostLLVMInstruction().
friend class CallBrInst; // For getTopmostLLVMInstruction().
friend class GetElementPtrInst; // For getTopmostLLVMInstruction().
+ friend class AllocaInst; // For getTopmostLLVMInstruction().
friend class CastInst; // For getTopmostLLVMInstruction().
friend class PHINode; // For getTopmostLLVMInstruction().
+ friend class UnreachableInst; // For getTopmostLLVMInstruction().
/// \Returns the LLVM IR Instructions that this SandboxIR maps to in program
/// order.
@@ -952,6 +960,36 @@ public:
#endif
};
+class UnreachableInst final : public Instruction {
+ /// Use UnreachableInst::create() instead of calling the constructor.
+ UnreachableInst(llvm::UnreachableInst *I, Context &Ctx)
+ : Instruction(ClassID::Unreachable, Opcode::Unreachable, I, Ctx) {}
+ friend Context;
+ Use getOperandUseInternal(unsigned OpIdx, bool Verify) const final {
+ return getOperandUseDefault(OpIdx, Verify);
+ }
+ SmallVector<llvm::Instruction *, 1> getLLVMInstrs() const final {
+ return {cast<llvm::Instruction>(Val)};
+ }
+
+public:
+ static UnreachableInst *create(Instruction *InsertBefore, Context &Ctx);
+ static UnreachableInst *create(BasicBlock *InsertAtEnd, Context &Ctx);
+ static bool classof(const Value *From);
+ unsigned getNumSuccessors() const { return 0; }
+ unsigned getUseOperandNo(const Use &Use) const final {
+ llvm_unreachable("UnreachableInst has no operands!");
+ }
+ unsigned getNumOfIRInstrs() const final { return 1u; }
+#ifndef NDEBUG
+ void verify() const final {
+ assert(isa<llvm::UnreachableInst>(Val) && "Expected UnreachableInst!");
+ }
+ void dump(raw_ostream &OS) const override;
+ LLVM_DUMP_METHOD void dump() const override;
+#endif
+};
+
class ReturnInst final : public Instruction {
/// Use ReturnInst::create() instead of calling the constructor.
ReturnInst(llvm::Instruction *I, Context &Ctx)
@@ -1393,6 +1431,103 @@ public:
#endif
};
+class AllocaInst final : public UnaryInstruction {
+ Use getOperandUseInternal(unsigned OpIdx, bool Verify) const final {
+ return getOperandUseDefault(OpIdx, Verify);
+ }
+ SmallVector<llvm::Instruction *, 1> getLLVMInstrs() const final {
+ return {cast<llvm::Instruction>(Val)};
+ }
+
+ AllocaInst(llvm::AllocaInst *AI, Context &Ctx)
+ : UnaryInstruction(ClassID::Alloca, Instruction::Opcode::Alloca, AI,
+ Ctx) {}
+ friend class Context; // For constructor.
+
+public:
+ static AllocaInst *create(Type *Ty, unsigned AddrSpace, BBIterator WhereIt,
+ BasicBlock *WhereBB, Context &Ctx,
+ Value *ArraySize = nullptr, const Twine &Name = "");
+ static AllocaInst *create(Type *Ty, unsigned AddrSpace,
+ Instruction *InsertBefore, Context &Ctx,
+ Value *ArraySize = nullptr, const Twine &Name = "");
+ static AllocaInst *create(Type *Ty, unsigned AddrSpace,
+ BasicBlock *InsertAtEnd, Context &Ctx,
+ Value *ArraySize = nullptr, const Twine &Name = "");
+
+ unsigned getUseOperandNo(const Use &Use) const final {
+ return getUseOperandNoDefault(Use);
+ }
+ unsigned getNumOfIRInstrs() const final { return 1u; }
+
+ /// Return true if there is an allocation size parameter to the allocation
+ /// instruction that is not 1.
+ bool isArrayAllocation() const {
+ return cast<llvm::AllocaInst>(Val)->isArrayAllocation();
+ }
+ /// Get the number of elements allocated. For a simple allocation of a single
+ /// element, this will return a constant 1 value.
+ Value *getArraySize();
+ const Value *getArraySize() const {
+ return const_cast<AllocaInst *>(this)->getArraySize();
+ }
+ /// Overload to return most specific pointer type.
+ PointerType *getType() const {
+ return cast<llvm::AllocaInst>(Val)->getType();
+ }
+ /// Return the address space for the allocation.
+ unsigned getAddressSpace() const {
+ return cast<llvm::AllocaInst>(Val)->getAddressSpace();
+ }
+ /// Get allocation size in bytes. Returns std::nullopt if size can't be
+ /// determined, e.g. in case of a VLA.
+ std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const {
+ return cast<llvm::AllocaInst>(Val)->getAllocationSize(DL);
+ }
+ /// Get allocation size in bits. Returns std::nullopt if size can't be
+ /// determined, e.g. in case of a VLA.
+ std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const {
+ return cast<llvm::AllocaInst>(Val)->getAllocationSizeInBits(DL);
+ }
+ /// Return the type that is being allocated by the instruction.
+ Type *getAllocatedType() const {
+ return cast<llvm::AllocaInst>(Val)->getAllocatedType();
+ }
+ /// for use only in special circumstances that need to generically
+ /// transform a whole instruction (eg: IR linking and vectorization).
+ void setAllocatedType(Type *Ty);
+ /// Return the alignment of the memory that is being allocated by the
+ /// instruction.
+ Align getAlign() const { return cast<llvm::AllocaInst>(Val)->getAlign(); }
+ void setAlignment(Align Align);
+ /// Return true if this alloca is in the entry block of the function and is a
+ /// constant size. If so, the code generator will fold it into the
+ /// prolog/epilog code, so it is basically free.
+ bool isStaticAlloca() const {
+ return cast<llvm::AllocaInst>(Val)->isStaticAlloca();
+ }
+ /// Return true if this alloca is used as an inalloca argument to a call. Such
+ /// allocas are never considered static even if they are in the entry block.
+ bool isUsedWithInAlloca() const {
+ return cast<llvm::AllocaInst>(Val)->isUsedWithInAlloca();
+ }
+ /// Specify whether this alloca is used to represent the arguments to a call.
+ void setUsedWithInAlloca(bool V);
+
+ static bool classof(const Value *From) {
+ if (auto *I = dyn_cast<Instruction>(From))
+ return I->getSubclassID() == Instruction::ClassID::Alloca;
+ return false;
+ }
+#ifndef NDEBUG
+ void verify() const final {
+ assert(isa<llvm::AllocaInst>(Val) && "Expected AllocaInst!");
+ }
+ void dump(raw_ostream &OS) const override;
+ LLVM_DUMP_METHOD void dump() const override;
+#endif
+};
+
class CastInst : public UnaryInstruction {
static Opcode getCastOpcode(llvm::Instruction::CastOps CastOp) {
switch (CastOp) {
@@ -1607,12 +1742,11 @@ public:
return cast<llvm::PHINode>(Val)->hasConstantOrUndefValue();
}
bool isComplete() const { return cast<llvm::PHINode>(Val)->isComplete(); }
- // TODO: Implement the below functions:
- // void replaceIncomingBlockWith (const BasicBlock *Old, BasicBlock *New);
+ void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New);
+ void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate);
+ // TODO: Implement
// void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
// uint32_t ToIdx = 0)
- // void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate,
- // bool DeletePHIIfEmpty=true)
#ifndef NDEBUG
void verify() const final {
assert(isa<llvm::PHINode>(Val) && "Expected PHINode!");
@@ -1727,10 +1861,14 @@ protected:
friend CallBrInst; // For createCallBrInst()
GetElementPtrInst *createGetElementPtrInst(llvm::GetElementPtrInst *I);
friend GetElementPtrInst; // For createGetElementPtrInst()
+ AllocaInst *createAllocaInst(llvm::AllocaInst *I);
+ friend AllocaInst; // For createAllocaInst()
CastInst *createCastInst(llvm::CastInst *I);
friend CastInst; // For createCastInst()
PHINode *createPHINode(llvm::PHINode *I);
friend PHINode; // For createPHINode()
+ UnreachableInst *createUnreachableInst(llvm::UnreachableInst *UI);
+ friend UnreachableInst; // For createUnreachableInst()
public:
Context(LLVMContext &LLVMCtx)
diff --git a/llvm/include/llvm/SandboxIR/SandboxIRValues.def b/llvm/include/llvm/SandboxIR/SandboxIRValues.def
index 4cb6011..dda629f 100644
--- a/llvm/include/llvm/SandboxIR/SandboxIRValues.def
+++ b/llvm/include/llvm/SandboxIR/SandboxIRValues.def
@@ -43,6 +43,7 @@ DEF_INSTR(Call, OP(Call), CallInst)
DEF_INSTR(Invoke, OP(Invoke), InvokeInst)
DEF_INSTR(CallBr, OP(CallBr), CallBrInst)
DEF_INSTR(GetElementPtr, OP(GetElementPtr), GetElementPtrInst)
+DEF_INSTR(Alloca, OP(Alloca), AllocaInst)
DEF_INSTR(Cast, OPCODES(\
OP(ZExt) \
OP(SExt) \
@@ -59,7 +60,8 @@ DEF_INSTR(Cast, OPCODES(\
OP(AddrSpaceCast) \
), CastInst)
DEF_INSTR(PHI, OP(PHI), PHINode)
-
+DEF_INSTR(Unreachable, OP(Unreachable), UnreachableInst)
+
// clang-format on
#ifdef DEF_VALUE
#undef DEF_VALUE
diff --git a/llvm/include/llvm/SandboxIR/Tracker.h b/llvm/include/llvm/SandboxIR/Tracker.h
index 3c491bf..af6d015 100644
--- a/llvm/include/llvm/SandboxIR/Tracker.h
+++ b/llvm/include/llvm/SandboxIR/Tracker.h
@@ -58,6 +58,7 @@ class LoadInst;
class StoreInst;
class Instruction;
class Tracker;
+class AllocaInst;
/// The base class for IR Change classes.
class IRChangeBase {
@@ -255,6 +256,57 @@ public:
#endif
};
+class AllocaSetAllocatedType final : public IRChangeBase {
+ AllocaInst *Alloca;
+ Type *OrigType;
+
+public:
+ AllocaSetAllocatedType(AllocaInst *Alloca, Tracker &Tracker);
+ void revert() final;
+ void accept() final {}
+#ifndef NDEBUG
+ void dump(raw_ostream &OS) const final {
+ dumpCommon(OS);
+ OS << "AllocaSetAllocatedType";
+ }
+ LLVM_DUMP_METHOD void dump() const final;
+#endif
+};
+
+class AllocaSetAlignment final : public IRChangeBase {
+ AllocaInst *Alloca;
+ Align OrigAlign;
+
+public:
+ AllocaSetAlignment(AllocaInst *Alloca, Tracker &Tracker);
+ void revert() final;
+ void accept() final {}
+#ifndef NDEBUG
+ void dump(raw_ostream &OS) const final {
+ dumpCommon(OS);
+ OS << "AllocaSetAlignment";
+ }
+ LLVM_DUMP_METHOD void dump() const final;
+#endif
+};
+
+class AllocaSetUsedWithInAlloca final : public IRChangeBase {
+ AllocaInst *Alloca;
+ bool Orig;
+
+public:
+ AllocaSetUsedWithInAlloca(AllocaInst *Alloca, Tracker &Tracker);
+ void revert() final;
+ void accept() final {}
+#ifndef NDEBUG
+ void dump(raw_ostream &OS) const final {
+ dumpCommon(OS);
+ OS << "AllocaSetUsedWithInAlloca";
+ }
+ LLVM_DUMP_METHOD void dump() const final;
+#endif
+};
+
class CallBrInstSetIndirectDest : public IRChangeBase {
CallBrInst *CallBr;
unsigned Idx;
diff --git a/llvm/include/llvm/Support/GenericDomTree.h b/llvm/include/llvm/Support/GenericDomTree.h
index e9ebfd4..d7b94d5 100644
--- a/llvm/include/llvm/Support/GenericDomTree.h
+++ b/llvm/include/llvm/Support/GenericDomTree.h
@@ -258,14 +258,17 @@ protected:
// Dominators always have a single root, postdominators can have more.
SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
- using DomTreeNodeMapType =
- DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
- DomTreeNodeMapType DomTreeNodes;
+ using DomTreeNodeStorageTy =
+ SmallVector<std::unique_ptr<DomTreeNodeBase<NodeT>>>;
+ DomTreeNodeStorageTy DomTreeNodes;
+ // For graphs where blocks don't have numbers, create a numbering here.
+ DenseMap<const NodeT *, unsigned> NodeNumberMap;
DomTreeNodeBase<NodeT> *RootNode = nullptr;
ParentPtr Parent = nullptr;
mutable bool DFSInfoValid = false;
mutable unsigned int SlowQueries = 0;
+ unsigned BlockNumberEpoch = 0;
friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
@@ -273,22 +276,22 @@ protected:
DominatorTreeBase() = default;
DominatorTreeBase(DominatorTreeBase &&Arg)
- : Roots(std::move(Arg.Roots)),
- DomTreeNodes(std::move(Arg.DomTreeNodes)),
- RootNode(Arg.RootNode),
- Parent(Arg.Parent),
- DFSInfoValid(Arg.DFSInfoValid),
- SlowQueries(Arg.SlowQueries) {
+ : Roots(std::move(Arg.Roots)), DomTreeNodes(std::move(Arg.DomTreeNodes)),
+ NodeNumberMap(std::move(Arg.NodeNumberMap)), RootNode(Arg.RootNode),
+ Parent(Arg.Parent), DFSInfoValid(Arg.DFSInfoValid),
+ SlowQueries(Arg.SlowQueries), BlockNumberEpoch(Arg.BlockNumberEpoch) {
Arg.wipe();
}
DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
Roots = std::move(RHS.Roots);
DomTreeNodes = std::move(RHS.DomTreeNodes);
+ NodeNumberMap = std::move(RHS.NodeNumberMap);
RootNode = RHS.RootNode;
Parent = RHS.Parent;
DFSInfoValid = RHS.DFSInfoValid;
SlowQueries = RHS.SlowQueries;
+ BlockNumberEpoch = RHS.BlockNumberEpoch;
RHS.wipe();
return *this;
}
@@ -333,35 +336,70 @@ protected:
if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
return true;
- const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
- if (DomTreeNodes.size() != OtherDomTreeNodes.size())
- return true;
-
- for (const auto &DomTreeNode : DomTreeNodes) {
- NodeT *BB = DomTreeNode.first;
- typename DomTreeNodeMapType::const_iterator OI =
- OtherDomTreeNodes.find(BB);
- if (OI == OtherDomTreeNodes.end())
+ size_t NumNodes = 0;
+ // All nodes we have must exist and be equal in the other tree.
+ for (const auto &Node : DomTreeNodes) {
+ if (!Node)
+ continue;
+ if (Node->compare(Other.getNode(Node->getBlock())))
return true;
+ NumNodes++;
+ }
- DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
- DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
+ // If the other tree has more nodes than we have, they're not equal.
+ size_t NumOtherNodes = 0;
+ for (const auto &OtherNode : Other.DomTreeNodes)
+ if (OtherNode)
+ NumOtherNodes++;
+ return NumNodes != NumOtherNodes;
+ }
- if (MyNd.compare(&OtherNd))
- return true;
+private:
+ template <typename T>
+ using has_number_t =
+ decltype(GraphTraits<T *>::getNumber(std::declval<T *>()));
+
+ std::optional<unsigned> getNodeIndex(const NodeT *BB) const {
+ if constexpr (is_detected<has_number_t, NodeT>::value) {
+ // BB can be nullptr, map nullptr to index 0.
+ assert(BlockNumberEpoch ==
+ GraphTraits<ParentPtr>::getNumberEpoch(Parent) &&
+ "dominator tree used with outdated block numbers");
+ return BB ? GraphTraits<const NodeT *>::getNumber(BB) + 1 : 0;
+ } else {
+ if (auto It = NodeNumberMap.find(BB); It != NodeNumberMap.end())
+ return It->second;
+ return std::nullopt;
}
+ }
- return false;
+ unsigned getNodeIndexForInsert(const NodeT *BB) {
+ if constexpr (is_detected<has_number_t, NodeT>::value) {
+ // getNodeIndex will never fail if nodes have getNumber().
+ unsigned Idx = *getNodeIndex(BB);
+ if (Idx >= DomTreeNodes.size()) {
+ unsigned Max = GraphTraits<ParentPtr>::getMaxNumber(Parent);
+ DomTreeNodes.resize(Max > Idx + 1 ? Max : Idx + 1);
+ }
+ return Idx;
+ } else {
+ // We might already have a number stored for BB.
+ unsigned Idx =
+ NodeNumberMap.try_emplace(BB, DomTreeNodes.size()).first->second;
+ if (Idx >= DomTreeNodes.size())
+ DomTreeNodes.resize(Idx + 1);
+ return Idx;
+ }
}
+public:
/// getNode - return the (Post)DominatorTree node for the specified basic
/// block. This is the same as using operator[] on this class. The result
/// may (but is not required to) be null for a forward (backwards)
/// statically unreachable block.
DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const {
- auto I = DomTreeNodes.find(BB);
- if (I != DomTreeNodes.end())
- return I->second.get();
+ if (auto Idx = getNodeIndex(BB); Idx && *Idx < DomTreeNodes.size())
+ return DomTreeNodes[*Idx].get();
return nullptr;
}
@@ -678,8 +716,10 @@ protected:
/// dominate any other blocks. Removes node from its immediate dominator's
/// children list. Deletes dominator node associated with basic block BB.
void eraseNode(NodeT *BB) {
- DomTreeNodeBase<NodeT> *Node = getNode(BB);
- assert(Node && "Removing node that isn't in dominator tree.");
+ std::optional<unsigned> IdxOpt = getNodeIndex(BB);
+ assert(IdxOpt && DomTreeNodes[*IdxOpt] &&
+ "Removing node that isn't in dominator tree.");
+ DomTreeNodeBase<NodeT> *Node = DomTreeNodes[*IdxOpt].get();
assert(Node->isLeaf() && "Node is not a leaf node.");
DFSInfoValid = false;
@@ -695,7 +735,8 @@ protected:
IDom->Children.pop_back();
}
- DomTreeNodes.erase(BB);
+ DomTreeNodes[*IdxOpt] = nullptr;
+ NodeNumberMap.erase(BB);
if (!IsPostDom) return;
@@ -786,17 +827,48 @@ public:
DFSInfoValid = true;
}
+private:
+ void updateBlockNumberEpoch() {
+ // Nothing to do for graphs that don't number their blocks.
+ if constexpr (is_detected<has_number_t, NodeT>::value)
+ BlockNumberEpoch = GraphTraits<ParentPtr>::getNumberEpoch(Parent);
+ }
+
+public:
/// recalculate - compute a dominator tree for the given function
void recalculate(ParentType &Func) {
Parent = &Func;
+ updateBlockNumberEpoch();
DomTreeBuilder::Calculate(*this);
}
void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) {
Parent = &Func;
+ updateBlockNumberEpoch();
DomTreeBuilder::CalculateWithUpdates(*this, Updates);
}
+ /// Update dominator tree after renumbering blocks.
+ template <class T_ = NodeT>
+ std::enable_if_t<is_detected<has_number_t, T_>::value, void>
+ updateBlockNumbers() {
+ updateBlockNumberEpoch();
+
+ unsigned MaxNumber = GraphTraits<ParentPtr>::getMaxNumber(Parent);
+ DomTreeNodeStorageTy NewVector;
+ NewVector.resize(MaxNumber + 1); // +1, because index 0 is for nullptr
+ for (auto &Node : DomTreeNodes) {
+ if (!Node)
+ continue;
+ unsigned Idx = *getNodeIndex(Node->getBlock());
+ // getMaxNumber is not necessarily supported
+ if (Idx >= NewVector.size())
+ NewVector.resize(Idx + 1);
+ NewVector[Idx] = std::move(Node);
+ }
+ DomTreeNodes = std::move(NewVector);
+ }
+
/// verify - checks if the tree is correct. There are 3 level of verification:
/// - Full -- verifies if the tree is correct by making sure all the
/// properties (including the parent and the sibling property)
@@ -817,6 +889,7 @@ public:
void reset() {
DomTreeNodes.clear();
+ NodeNumberMap.clear();
Roots.clear();
RootNode = nullptr;
Parent = nullptr;
@@ -831,7 +904,8 @@ protected:
DomTreeNodeBase<NodeT> *IDom = nullptr) {
auto Node = std::make_unique<DomTreeNodeBase<NodeT>>(BB, IDom);
auto *NodePtr = Node.get();
- DomTreeNodes[BB] = std::move(Node);
+ unsigned NodeIdx = getNodeIndexForInsert(BB);
+ DomTreeNodes[NodeIdx] = std::move(Node);
if (IDom)
IDom->addChild(NodePtr);
return NodePtr;
@@ -915,6 +989,7 @@ protected:
/// assignable and destroyable state, but otherwise invalid.
void wipe() {
DomTreeNodes.clear();
+ NodeNumberMap.clear();
RootNode = nullptr;
Parent = nullptr;
}
diff --git a/llvm/include/llvm/Support/GenericDomTreeConstruction.h b/llvm/include/llvm/Support/GenericDomTreeConstruction.h
index af7ac04..9aab5ec 100644
--- a/llvm/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/llvm/include/llvm/Support/GenericDomTreeConstruction.h
@@ -73,7 +73,11 @@ struct SemiNCAInfo {
// Number to node mapping is 1-based. Initialize the mapping to start with
// a dummy element.
SmallVector<NodePtr, 64> NumToNode = {nullptr};
- DenseMap<NodePtr, InfoRec> NodeToInfo;
+ // If blocks have numbers (e.g., BasicBlock, MachineBasicBlock), store node
+ // infos in a vector. Otherwise, store them in a map.
+ std::conditional_t<GraphHasNodeNumbers<NodePtr>, SmallVector<InfoRec, 64>,
+ DenseMap<NodePtr, InfoRec>>
+ NodeInfos;
using UpdateT = typename DomTreeT::UpdateType;
using UpdateKind = typename DomTreeT::UpdateKind;
@@ -99,7 +103,7 @@ struct SemiNCAInfo {
void clear() {
NumToNode = {nullptr}; // Restore to initial state with a dummy start node.
- NodeToInfo.clear();
+ NodeInfos.clear();
// Don't reset the pointer to BatchUpdateInfo here -- if there's an update
// in progress, we need this information to continue it.
}
@@ -123,13 +127,25 @@ struct SemiNCAInfo {
return Res;
}
- NodePtr getIDom(NodePtr BB) const {
- auto InfoIt = NodeToInfo.find(BB);
- if (InfoIt == NodeToInfo.end()) return nullptr;
-
- return InfoIt->second.IDom;
+ InfoRec &getNodeInfo(NodePtr BB) {
+ if constexpr (GraphHasNodeNumbers<NodePtr>) {
+ unsigned Idx = BB ? GraphTraits<NodePtr>::getNumber(BB) + 1 : 0;
+ if (Idx >= NodeInfos.size()) {
+ unsigned Max = 0;
+ if (BB)
+ Max = GraphTraits<decltype(BB->getParent())>::getMaxNumber(
+ BB->getParent());
+ // Max might be zero, graphs might not support getMaxNumber().
+ NodeInfos.resize(Max ? Max + 1 : Idx + 1);
+ }
+ return NodeInfos[Idx];
+ } else {
+ return NodeInfos[BB];
+ }
}
+ NodePtr getIDom(NodePtr BB) { return getNodeInfo(BB).IDom; }
+
TreeNodePtr getNodeForBlock(NodePtr BB, DomTreeT &DT) {
if (TreeNodePtr Node = DT.getNode(BB)) return Node;
@@ -181,11 +197,11 @@ struct SemiNCAInfo {
const NodeOrderMap *SuccOrder = nullptr) {
assert(V);
SmallVector<std::pair<NodePtr, unsigned>, 64> WorkList = {{V, AttachToNum}};
- NodeToInfo[V].Parent = AttachToNum;
+ getNodeInfo(V).Parent = AttachToNum;
while (!WorkList.empty()) {
const auto [BB, ParentNum] = WorkList.pop_back_val();
- auto &BBInfo = NodeToInfo[BB];
+ auto &BBInfo = getNodeInfo(BB);
BBInfo.ReverseChildren.push_back(ParentNum);
// Visited nodes always have positive DFS numbers.
@@ -264,7 +280,7 @@ struct SemiNCAInfo {
// Initialize IDoms to spanning tree parents.
for (unsigned i = 1; i < NextDFSNum; ++i) {
const NodePtr V = NumToNode[i];
- auto &VInfo = NodeToInfo[V];
+ auto &VInfo = getNodeInfo(V);
VInfo.IDom = NumToNode[VInfo.Parent];
NumToInfo.push_back(&VInfo);
}
@@ -292,7 +308,7 @@ struct SemiNCAInfo {
const unsigned SDomNum = NumToInfo[WInfo.Semi]->DFSNum;
NodePtr WIDomCandidate = WInfo.IDom;
while (true) {
- auto &WIDomCandidateInfo = NodeToInfo.find(WIDomCandidate)->second;
+ auto &WIDomCandidateInfo = getNodeInfo(WIDomCandidate);
if (WIDomCandidateInfo.DFSNum <= SDomNum)
break;
WIDomCandidate = WIDomCandidateInfo.IDom;
@@ -311,7 +327,7 @@ struct SemiNCAInfo {
assert(IsPostDom && "Only postdominators have a virtual root");
assert(NumToNode.size() == 1 && "SNCAInfo must be freshly constructed");
- auto &BBInfo = NodeToInfo[nullptr];
+ auto &BBInfo = getNodeInfo(nullptr);
BBInfo.DFSNum = BBInfo.Semi = BBInfo.Label = 1;
NumToNode.push_back(nullptr); // NumToNode[1] = nullptr;
@@ -393,7 +409,7 @@ struct SemiNCAInfo {
auto InitSuccOrderOnce = [&]() {
SuccOrder = NodeOrderMap();
for (const auto Node : nodes(DT.Parent))
- if (SNCA.NodeToInfo.count(Node) == 0)
+ if (SNCA.getNodeInfo(Node).DFSNum == 0)
for (const auto Succ : getChildren<false>(Node, SNCA.BatchUpdates))
SuccOrder->try_emplace(Succ, 0);
@@ -417,7 +433,7 @@ struct SemiNCAInfo {
// unreachable node once, we may just visit it in two directions,
// depending on how lucky we get.
for (const NodePtr I : nodes(DT.Parent)) {
- if (SNCA.NodeToInfo.count(I) == 0) {
+ if (SNCA.getNodeInfo(I).DFSNum == 0) {
LLVM_DEBUG(dbgs()
<< "\t\t\tVisiting node " << BlockNamePrinter(I) << "\n");
// Find the furthest away we can get by following successors, then
@@ -449,7 +465,7 @@ struct SemiNCAInfo {
const NodePtr N = SNCA.NumToNode[i];
LLVM_DEBUG(dbgs() << "\t\t\t\tRemoving DFS info for "
<< BlockNamePrinter(N) << "\n");
- SNCA.NodeToInfo.erase(N);
+ SNCA.getNodeInfo(N) = {};
SNCA.NumToNode.pop_back();
}
const unsigned PrevNum = Num;
@@ -582,7 +598,7 @@ struct SemiNCAInfo {
void attachNewSubtree(DomTreeT& DT, const TreeNodePtr AttachTo) {
// Attach the first unreachable block to AttachTo.
- NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
+ getNodeInfo(NumToNode[1]).IDom = AttachTo->getBlock();
// Loop over all of the discovered blocks in the function...
for (NodePtr W : llvm::drop_begin(NumToNode)) {
if (DT.getNode(W))
@@ -600,11 +616,11 @@ struct SemiNCAInfo {
}
void reattachExistingSubtree(DomTreeT &DT, const TreeNodePtr AttachTo) {
- NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
+ getNodeInfo(NumToNode[1]).IDom = AttachTo->getBlock();
for (const NodePtr N : llvm::drop_begin(NumToNode)) {
const TreeNodePtr TN = DT.getNode(N);
assert(TN);
- const TreeNodePtr NewIDom = DT.getNode(NodeToInfo[N].IDom);
+ const TreeNodePtr NewIDom = DT.getNode(getNodeInfo(N).IDom);
TN->setIDom(NewIDom);
}
}
@@ -1231,13 +1247,15 @@ struct SemiNCAInfo {
doFullDFSWalk(DT, AlwaysDescend);
for (auto &NodeToTN : DT.DomTreeNodes) {
- const TreeNodePtr TN = NodeToTN.second.get();
+ const TreeNodePtr TN = NodeToTN.get();
+ if (!TN)
+ continue;
const NodePtr BB = TN->getBlock();
// Virtual root has a corresponding virtual CFG node.
if (DT.isVirtualRoot(TN)) continue;
- if (NodeToInfo.count(BB) == 0) {
+ if (getNodeInfo(BB).DFSNum == 0) {
errs() << "DomTree node " << BlockNamePrinter(BB)
<< " not found by DFS walk!\n";
errs().flush();
@@ -1264,7 +1282,9 @@ struct SemiNCAInfo {
// Running time: O(N).
static bool VerifyLevels(const DomTreeT &DT) {
for (auto &NodeToTN : DT.DomTreeNodes) {
- const TreeNodePtr TN = NodeToTN.second.get();
+ const TreeNodePtr TN = NodeToTN.get();
+ if (!TN)
+ continue;
const NodePtr BB = TN->getBlock();
if (!BB) continue;
@@ -1319,7 +1339,9 @@ struct SemiNCAInfo {
// For each tree node verify if children's DFS numbers cover their parent's
// DFS numbers with no gaps.
for (const auto &NodeToTN : DT.DomTreeNodes) {
- const TreeNodePtr Node = NodeToTN.second.get();
+ const TreeNodePtr Node = NodeToTN.get();
+ if (!Node)
+ continue;
// Handle tree leaves.
if (Node->isLeaf()) {
@@ -1432,7 +1454,9 @@ struct SemiNCAInfo {
// the nodes it dominated previously will now become unreachable.
bool verifyParentProperty(const DomTreeT &DT) {
for (auto &NodeToTN : DT.DomTreeNodes) {
- const TreeNodePtr TN = NodeToTN.second.get();
+ const TreeNodePtr TN = NodeToTN.get();
+ if (!TN)
+ continue;
const NodePtr BB = TN->getBlock();
if (!BB || TN->isLeaf())
continue;
@@ -1445,7 +1469,7 @@ struct SemiNCAInfo {
});
for (TreeNodePtr Child : TN->children())
- if (NodeToInfo.count(Child->getBlock()) != 0) {
+ if (getNodeInfo(Child->getBlock()).DFSNum != 0) {
errs() << "Child " << BlockNamePrinter(Child)
<< " reachable after its parent " << BlockNamePrinter(BB)
<< " is removed!\n";
@@ -1466,7 +1490,9 @@ struct SemiNCAInfo {
// siblings will now still be reachable.
bool verifySiblingProperty(const DomTreeT &DT) {
for (auto &NodeToTN : DT.DomTreeNodes) {
- const TreeNodePtr TN = NodeToTN.second.get();
+ const TreeNodePtr TN = NodeToTN.get();
+ if (!TN)
+ continue;
const NodePtr BB = TN->getBlock();
if (!BB || TN->isLeaf())
continue;
@@ -1481,7 +1507,7 @@ struct SemiNCAInfo {
for (const TreeNodePtr S : TN->children()) {
if (S == N) continue;
- if (NodeToInfo.count(S->getBlock()) == 0) {
+ if (getNodeInfo(S->getBlock()).DFSNum == 0) {
errs() << "Node " << BlockNamePrinter(S)
<< " not reachable when its sibling " << BlockNamePrinter(N)
<< " is removed!\n";
diff --git a/llvm/include/llvm/Support/Windows/WindowsSupport.h b/llvm/include/llvm/Support/Windows/WindowsSupport.h
index d3aacd14..6f5aae2 100644
--- a/llvm/include/llvm/Support/Windows/WindowsSupport.h
+++ b/llvm/include/llvm/Support/Windows/WindowsSupport.h
@@ -23,11 +23,9 @@
// mingw-w64 tends to define it as 0x0502 in its headers.
#undef _WIN32_WINNT
-#undef _WIN32_IE
// Require at least Windows 7 API.
#define _WIN32_WINNT 0x0601
-#define _WIN32_IE 0x0800 // MinGW at it again. FIXME: verify if still needed.
#define WIN32_LEAN_AND_MEAN
#ifndef NOMINMAX
#define NOMINMAX
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 46044aa..fd47ddd 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -1748,6 +1748,8 @@ multiclass binary_atomic_op_fp<SDNode atomic_op> {
let IsAtomic = true;
let MemoryVT = vt;
}
+
+ defm NAME#_#vt : binary_atomic_op_ord;
}
}
diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
index ebd92f2..e504128 100644
--- a/llvm/include/llvm/TargetParser/Triple.h
+++ b/llvm/include/llvm/TargetParser/Triple.h
@@ -88,8 +88,6 @@ public:
xtensa, // Tensilica: Xtensa
nvptx, // NVPTX: 32-bit
nvptx64, // NVPTX: 64-bit
- le32, // le32: generic little-endian 32-bit CPU (PNaCl)
- le64, // le64: generic little-endian 64-bit CPU (PNaCl)
amdil, // AMDIL
amdil64, // AMDIL with 64-bit pointers
hsail, // AMD HSAIL
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index e474899..7bfb23e 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -337,6 +337,10 @@ struct CastedValue {
assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
"Incompatible bit width");
if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
+ if (IsNonNegative && !N.isAllNonNegative())
+ N = N.intersectWith(
+ ConstantRange(APInt::getZero(N.getBitWidth()),
+ APInt::getSignedMinValue(N.getBitWidth())));
if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
return N;
@@ -693,15 +697,17 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
// If the integer type is smaller than the index size, it is implicitly
// sign extended or truncated to index size.
+ bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
+ bool NonNeg = NUSW && GEPOp->hasNoUnsignedWrap();
unsigned Width = Index->getType()->getIntegerBitWidth();
unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
LinearExpression LE = GetLinearExpression(
- CastedValue(Index, 0, SExtBits, TruncBits, false), DL, 0, AC, DT);
+ CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
// Scale by the type size.
unsigned TypeSize = AllocTypeSize.getFixedValue();
- LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
+ LE = LE.mul(APInt(IndexSize, TypeSize), NUSW);
Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
APInt Scale = LE.Scale.sext(MaxIndexSize);
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 997bb7a..2cb3547 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -46,6 +46,7 @@ add_llvm_component_library(LLVMAnalysis
CostModel.cpp
CodeMetrics.cpp
ConstantFolding.cpp
+ CtxProfAnalysis.cpp
CycleAnalysis.cpp
DDG.cpp
DDGPrinter.cpp
diff --git a/llvm/lib/Analysis/CtxProfAnalysis.cpp b/llvm/lib/Analysis/CtxProfAnalysis.cpp
new file mode 100644
index 0000000..f56f910
--- /dev/null
+++ b/llvm/lib/Analysis/CtxProfAnalysis.cpp
@@ -0,0 +1,95 @@
+//===- CtxProfAnalysis.cpp - contextual profile analysis ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the contextual profile analysis, which maintains contextual
+// profiling info through IPO passes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CtxProfAnalysis.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/Analysis.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/ProfileData/PGOCtxProfReader.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#define DEBUG_TYPE "ctx_prof"
+
+namespace llvm {
+namespace json {
+Value toJSON(const PGOCtxProfContext &P) {
+ Object Ret;
+ Ret["Guid"] = P.guid();
+ Ret["Counters"] = Array(P.counters());
+ if (P.callsites().empty())
+ return Ret;
+ auto AllCS =
+ ::llvm::map_range(P.callsites(), [](const auto &P) { return P.first; });
+ auto MaxIt = ::llvm::max_element(AllCS);
+ assert(MaxIt != AllCS.end() && "We should have a max value because the "
+ "callsites collection is not empty.");
+ Array CSites;
+ // Iterate to, and including, the maximum index.
+ for (auto I = 0U, Max = *MaxIt; I <= Max; ++I) {
+ CSites.push_back(Array());
+ Array &Targets = *CSites.back().getAsArray();
+ if (P.hasCallsite(I))
+ for (const auto &[_, Ctx] : P.callsite(I))
+ Targets.push_back(toJSON(Ctx));
+ }
+ Ret["Callsites"] = std::move(CSites);
+
+ return Ret;
+}
+
+Value toJSON(const PGOCtxProfContext::CallTargetMapTy &P) {
+ Array Ret;
+ for (const auto &[_, Ctx] : P)
+ Ret.push_back(toJSON(Ctx));
+ return Ret;
+}
+} // namespace json
+} // namespace llvm
+
+using namespace llvm;
+
+AnalysisKey CtxProfAnalysis::Key;
+
+CtxProfAnalysis::Result CtxProfAnalysis::run(Module &M,
+ ModuleAnalysisManager &MAM) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MB = MemoryBuffer::getFile(Profile);
+ if (auto EC = MB.getError()) {
+ M.getContext().emitError("could not open contextual profile file: " +
+ EC.message());
+ return {};
+ }
+ PGOCtxProfileReader Reader(MB.get()->getBuffer());
+ auto MaybeCtx = Reader.loadContexts();
+ if (!MaybeCtx) {
+ M.getContext().emitError("contextual profile file is invalid: " +
+ toString(MaybeCtx.takeError()));
+ return {};
+ }
+ return Result(std::move(*MaybeCtx));
+}
+
+PreservedAnalyses CtxProfAnalysisPrinterPass::run(Module &M,
+ ModuleAnalysisManager &MAM) {
+ CtxProfAnalysis::Result &C = MAM.getResult<CtxProfAnalysis>(M);
+ if (!C) {
+ M.getContext().emitError("Invalid CtxProfAnalysis");
+ return PreservedAnalyses::all();
+ }
+ const auto JSONed = ::llvm::json::toJSON(C.profiles());
+
+ OS << formatv("{0:2}", JSONed);
+ OS << "\n";
+ return PreservedAnalyses::all();
+}
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 12a3193..c4c1749 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5181,6 +5181,10 @@ Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
(Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
return Vec;
+ // Inserting the splatted value into a constant splat does nothing.
+ if (VecC && ValC && VecC->getSplatValue() == ValC)
+ return Vec;
+
// If we are extracting a value from a vector, then inserting it into the same
// place, that's the input vector:
// insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 5b9a7b0..b26df6e 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -512,6 +512,10 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
TLI.setUnavailable(LibFunc_Znam12__hot_cold_t);
TLI.setUnavailable(LibFunc_ZnamSt11align_val_t12__hot_cold_t);
TLI.setUnavailable(LibFunc_ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t);
+ TLI.setUnavailable(LibFunc_size_returning_new);
+ TLI.setUnavailable(LibFunc_size_returning_new_hot_cold);
+ TLI.setUnavailable(LibFunc_size_returning_new_aligned);
+ TLI.setUnavailable(LibFunc_size_returning_new_aligned_hot_cold);
} else {
// Not MSVC, assume it's Itanium.
TLI.setUnavailable(LibFunc_msvc_new_int);
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 2b7611c..202eaad 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -9108,6 +9108,26 @@ static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
if (L0 == R0 && L1 == R1)
return isImpliedCondMatchingOperands(LPred, RPred);
+ // It only really makes sense in the context of signed comparison for "X - Y
+ // must be positive if X >= Y and no overflow".
+ // Take SGT as an example: L0:x > L1:y and C >= 0
+ // ==> R0:(x -nsw y) < R1:(-C) is false
+ if ((LPred == ICmpInst::ICMP_SGT || LPred == ICmpInst::ICMP_SGE) &&
+ match(R0, m_NSWSub(m_Specific(L0), m_Specific(L1)))) {
+ if (match(R1, m_NonPositive()) &&
+ isImpliedCondMatchingOperands(LPred, RPred) == false)
+ return false;
+ }
+
+ // Take SLT as an example: L0:x < L1:y and C <= 0
+ // ==> R0:(x -nsw y) < R1:(-C) is true
+ if ((LPred == ICmpInst::ICMP_SLT || LPred == ICmpInst::ICMP_SLE) &&
+ match(R0, m_NSWSub(m_Specific(L0), m_Specific(L1)))) {
+ if (match(R1, m_NonNegative()) &&
+ isImpliedCondMatchingOperands(LPred, RPred) == true)
+ return true;
+ }
+
// L0 = R0 = L1 + R1, L0 >=u L1 implies R0 >=u R1, L0 <u L1 implies R0 <u R1
if (L0 == R0 &&
(LPred == ICmpInst::ICMP_ULT || LPred == ICmpInst::ICMP_UGE) &&
diff --git a/llvm/lib/CodeGen/BasicBlockSections.cpp b/llvm/lib/CodeGen/BasicBlockSections.cpp
index 09e45ea..0071284 100644
--- a/llvm/lib/CodeGen/BasicBlockSections.cpp
+++ b/llvm/lib/CodeGen/BasicBlockSections.cpp
@@ -72,8 +72,10 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/BasicBlockSectionUtils.h"
#include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/InitializePasses.h"
@@ -393,12 +395,21 @@ bool BasicBlockSections::runOnMachineFunction(MachineFunction &MF) {
auto R1 = handleBBSections(MF);
// Handle basic block address map after basic block sections are finalized.
auto R2 = handleBBAddrMap(MF);
+
+ // We renumber blocks, so update the dominator tree we want to preserve.
+ if (auto *WP = getAnalysisIfAvailable<MachineDominatorTreeWrapperPass>())
+ WP->getDomTree().updateBlockNumbers();
+ if (auto *WP = getAnalysisIfAvailable<MachinePostDominatorTreeWrapperPass>())
+ WP->getPostDomTree().updateBlockNumbers();
+
return R1 || R2;
}
void BasicBlockSections::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<BasicBlockSectionsProfileReaderWrapperPass>();
+ AU.addUsedIfAvailable<MachineDominatorTreeWrapperPass>();
+ AU.addUsedIfAvailable<MachinePostDominatorTreeWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/llvm/lib/CodeGen/CFIInstrInserter.cpp b/llvm/lib/CodeGen/CFIInstrInserter.cpp
index 06de925..f5bedc7 100644
--- a/llvm/lib/CodeGen/CFIInstrInserter.cpp
+++ b/llvm/lib/CodeGen/CFIInstrInserter.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/InitializePasses.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
using namespace llvm;
@@ -184,6 +185,10 @@ void CFIInstrInserter::calculateOutgoingCFAInfo(MBBCFAInfo &MBBInfo) {
unsigned NumRegs = TRI.getNumSupportedRegs(*MF);
BitVector CSRSaved(NumRegs), CSRRestored(NumRegs);
+#ifndef NDEBUG
+ int RememberState = 0;
+#endif
+
// Determine cfa offset and register set by the block.
for (MachineInstr &MI : *MBBInfo.MBB) {
if (MI.isCFIInstruction()) {
@@ -228,17 +233,25 @@ void CFIInstrInserter::calculateOutgoingCFAInfo(MBBCFAInfo &MBBInfo) {
case MCCFIInstruction::OpRememberState:
// TODO: Add support for handling cfi_remember_state.
#ifndef NDEBUG
- report_fatal_error(
- "Support for cfi_remember_state not implemented! Value of CFA "
- "may be incorrect!\n");
+ // Currently we need cfi_remember_state and cfi_restore_state to be in
+ // the same BB, so it will not impact outgoing CFA.
+ ++RememberState;
+ if (RememberState != 1)
+ MF->getContext().reportError(
+ SMLoc(),
+ "Support for cfi_remember_state not implemented! Value of CFA "
+ "may be incorrect!\n");
#endif
break;
case MCCFIInstruction::OpRestoreState:
// TODO: Add support for handling cfi_restore_state.
#ifndef NDEBUG
- report_fatal_error(
- "Support for cfi_restore_state not implemented! Value of CFA may "
- "be incorrect!\n");
+ --RememberState;
+ if (RememberState != 0)
+ MF->getContext().reportError(
+ SMLoc(),
+ "Support for cfi_restore_state not implemented! Value of CFA may "
+ "be incorrect!\n");
#endif
break;
// Other CFI directives do not affect CFA value.
@@ -264,6 +277,14 @@ void CFIInstrInserter::calculateOutgoingCFAInfo(MBBCFAInfo &MBBInfo) {
}
}
+#ifndef NDEBUG
+ if (RememberState != 0)
+ MF->getContext().reportError(
+ SMLoc(),
+ "Support for cfi_remember_state not implemented! Value of CFA may be "
+ "incorrect!\n");
+#endif
+
MBBInfo.Processed = true;
// Update outgoing CFA info.
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index 97c6ee4..5ffdbcd 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass implements IR expansion for vector predication intrinsics, allowing
+// This file implements IR expansion for vector predication intrinsics, allowing
// targets to enable vector predication until just before codegen.
//
//===----------------------------------------------------------------------===//
@@ -16,7 +16,6 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
@@ -24,8 +23,6 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/InitializePasses.h"
-#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -137,7 +134,6 @@ namespace {
// Expansion pass state at function scope.
struct CachingVPExpander {
- Function &F;
const TargetTransformInfo &TTI;
/// \returns A (fixed length) vector with ascending integer indices
@@ -207,10 +203,10 @@ struct CachingVPExpander {
bool UsingTTIOverrides;
public:
- CachingVPExpander(Function &F, const TargetTransformInfo &TTI)
- : F(F), TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
+ CachingVPExpander(const TargetTransformInfo &TTI)
+ : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
- bool expandVectorPredication();
+ bool expandVectorPredication(VPIntrinsic &VPI);
};
//// CachingVPExpander {
@@ -571,7 +567,7 @@ CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
VPIntrinsic &VPI) {
assert(VPI.canIgnoreVectorLengthParam());
- const auto &DL = F.getDataLayout();
+ const auto &DL = VPI.getDataLayout();
Value *MaskParam = VPI.getMaskParam();
Value *PtrParam = VPI.getMemoryPointerParam();
@@ -775,15 +771,6 @@ Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
//// } CachingVPExpander
-struct TransformJob {
- VPIntrinsic *PI;
- TargetTransformInfo::VPLegalization Strategy;
- TransformJob(VPIntrinsic *PI, TargetTransformInfo::VPLegalization InitStrat)
- : PI(PI), Strategy(InitStrat) {}
-
- bool isDone() const { return Strategy.shouldDoNothing(); }
-};
-
void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
// Operations with speculatable lanes do not strictly need predication.
if (maySpeculateLanes(VPI)) {
@@ -821,98 +808,43 @@ CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
}
/// Expand llvm.vp.* intrinsics as requested by \p TTI.
-bool CachingVPExpander::expandVectorPredication() {
- SmallVector<TransformJob, 16> Worklist;
-
- // Collect all VPIntrinsics that need expansion and determine their expansion
- // strategy.
- for (auto &I : instructions(F)) {
- auto *VPI = dyn_cast<VPIntrinsic>(&I);
- if (!VPI)
- continue;
- auto VPStrat = getVPLegalizationStrategy(*VPI);
- sanitizeStrategy(*VPI, VPStrat);
- if (!VPStrat.shouldDoNothing())
- Worklist.emplace_back(VPI, VPStrat);
- }
- if (Worklist.empty())
- return false;
+bool CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
+ auto Strategy = getVPLegalizationStrategy(VPI);
+ sanitizeStrategy(VPI, Strategy);
- // Transform all VPIntrinsics on the worklist.
- LLVM_DEBUG(dbgs() << "\n:::: Transforming " << Worklist.size()
- << " instructions ::::\n");
- for (TransformJob Job : Worklist) {
- // Transform the EVL parameter.
- switch (Job.Strategy.EVLParamStrategy) {
- case VPLegalization::Legal:
- break;
- case VPLegalization::Discard:
- discardEVLParameter(*Job.PI);
- break;
- case VPLegalization::Convert:
- if (foldEVLIntoMask(*Job.PI))
- ++NumFoldedVL;
- break;
- }
- Job.Strategy.EVLParamStrategy = VPLegalization::Legal;
+ // Transform the EVL parameter.
+ switch (Strategy.EVLParamStrategy) {
+ case VPLegalization::Legal:
+ break;
+ case VPLegalization::Discard:
+ discardEVLParameter(VPI);
+ break;
+ case VPLegalization::Convert:
+ if (foldEVLIntoMask(VPI))
+ ++NumFoldedVL;
+ break;
+ }
- // Replace with a non-predicated operation.
- switch (Job.Strategy.OpStrategy) {
- case VPLegalization::Legal:
- break;
- case VPLegalization::Discard:
- llvm_unreachable("Invalid strategy for operators.");
- case VPLegalization::Convert:
- expandPredication(*Job.PI);
+ // Replace with a non-predicated operation.
+ switch (Strategy.OpStrategy) {
+ case VPLegalization::Legal:
+ break;
+ case VPLegalization::Discard:
+ llvm_unreachable("Invalid strategy for operators.");
+ case VPLegalization::Convert:
+ if (Value *V = expandPredication(VPI); V != &VPI) {
++NumLoweredVPOps;
- break;
+ // Return true if and only if the intrinsic was actually removed.
+ return true;
}
- Job.Strategy.OpStrategy = VPLegalization::Legal;
-
- assert(Job.isDone() && "incomplete transformation");
+ break;
}
- return true;
+ return false;
}
-class ExpandVectorPredication : public FunctionPass {
-public:
- static char ID;
- ExpandVectorPredication() : FunctionPass(ID) {
- initializeExpandVectorPredicationPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnFunction(Function &F) override {
- const auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- CachingVPExpander VPExpander(F, *TTI);
- return VPExpander.expandVectorPredication();
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<TargetTransformInfoWrapperPass>();
- AU.setPreservesCFG();
- }
-};
} // namespace
-char ExpandVectorPredication::ID;
-INITIALIZE_PASS_BEGIN(ExpandVectorPredication, "expandvp",
- "Expand vector predication intrinsics", false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_END(ExpandVectorPredication, "expandvp",
- "Expand vector predication intrinsics", false, false)
-
-FunctionPass *llvm::createExpandVectorPredicationPass() {
- return new ExpandVectorPredication();
-}
-
-PreservedAnalyses
-ExpandVectorPredicationPass::run(Function &F, FunctionAnalysisManager &AM) {
- const auto &TTI = AM.getResult<TargetIRAnalysis>(F);
- CachingVPExpander VPExpander(F, TTI);
- if (!VPExpander.expandVectorPredication())
- return PreservedAnalyses::all();
- PreservedAnalyses PA;
- PA.preserveSet<CFGAnalyses>();
- return PA;
+bool llvm::expandVectorPredicationIntrinsic(VPIntrinsic &VPI,
+ const TargetTransformInfo &TTI) {
+ return CachingVPExpander(TTI).expandVectorPredication(VPI);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 68a8a27..0169a0e 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -287,7 +287,7 @@ Align IRTranslator::getMemOpAlign(const Instruction &I) {
}
MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
- MachineBasicBlock *&MBB = BBToMBB[&BB];
+ MachineBasicBlock *MBB = FuncInfo.getMBB(&BB);
assert(MBB && "BasicBlock was not encountered before");
return *MBB;
}
@@ -3907,8 +3907,9 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
bool HasMustTailInVarArgFn = false;
// Create all blocks, in IR order, to preserve the layout.
+ FuncInfo.MBBMap.resize(F.getMaxBlockNumber());
for (const BasicBlock &BB: F) {
- auto *&MBB = BBToMBB[&BB];
+ auto *&MBB = FuncInfo.MBBMap[BB.getNumber()];
MBB = MF->CreateMachineBasicBlock(&BB);
MF->push_back(MBB);
diff --git a/llvm/lib/CodeGen/IntrinsicLowering.cpp b/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 45fba43..256c081 100644
--- a/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -449,7 +449,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::invariant_start:
case Intrinsic::lifetime_start:
// Discard region information.
- CI->replaceAllUsesWith(UndefValue::get(CI->getType()));
+ CI->replaceAllUsesWith(PoisonValue::get(CI->getType()));
break;
case Intrinsic::invariant_end:
case Intrinsic::lifetime_end:
diff --git a/llvm/lib/CodeGen/MIRSampleProfile.cpp b/llvm/lib/CodeGen/MIRSampleProfile.cpp
index ce82f28..23db09b 100644
--- a/llvm/lib/CodeGen/MIRSampleProfile.cpp
+++ b/llvm/lib/CodeGen/MIRSampleProfile.cpp
@@ -126,8 +126,10 @@ template <> struct IRTraits<MachineBasicBlock> {
using PostDominatorTreeT = MachinePostDominatorTree;
using OptRemarkEmitterT = MachineOptimizationRemarkEmitter;
using OptRemarkAnalysisT = MachineOptimizationRemarkAnalysis;
- using PredRangeT = iterator_range<std::vector<MachineBasicBlock *>::iterator>;
- using SuccRangeT = iterator_range<std::vector<MachineBasicBlock *>::iterator>;
+ using PredRangeT =
+ iterator_range<SmallVectorImpl<MachineBasicBlock *>::iterator>;
+ using SuccRangeT =
+ iterator_range<SmallVectorImpl<MachineBasicBlock *>::iterator>;
static Function &getFunction(MachineFunction &F) { return F.getFunction(); }
static const MachineBasicBlock *getEntryBB(const MachineFunction *F) {
return GraphTraits<const MachineFunction *>::getEntryNode(F);
@@ -364,13 +366,18 @@ bool MIRProfileLoaderPass::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "MIRProfileLoader pass working on Func: "
<< MF.getFunction().getName() << "\n");
MBFI = &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI();
+ auto *MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
+ auto *MPDT =
+ &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
+
+ MF.RenumberBlocks();
+ MDT->updateBlockNumbers();
+ MPDT->updateBlockNumbers();
+
MIRSampleLoader->setInitVals(
- &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree(),
- &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree(),
- &getAnalysis<MachineLoopInfoWrapperPass>().getLI(), MBFI,
+ MDT, MPDT, &getAnalysis<MachineLoopInfoWrapperPass>().getLI(), MBFI,
&getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE());
- MF.RenumberBlocks();
if (ViewBFIBefore && ViewBlockLayoutWithBFI != GVDT_None &&
(ViewBlockFreqFuncName.empty() ||
MF.getFunction().getName() == ViewBlockFreqFuncName)) {
diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 6179e09..9010c3c 100644
--- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -3649,6 +3649,7 @@ void MachineBlockPlacement::assignBlockOrder(
const std::vector<const MachineBasicBlock *> &NewBlockOrder) {
assert(F->size() == NewBlockOrder.size() && "Incorrect size of block order");
F->RenumberBlocks();
+ MPDT->updateBlockNumbers();
bool HasChanges = false;
for (size_t I = 0; I < NewBlockOrder.size(); I++) {
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index 40bde20..ab45663 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -375,6 +375,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
// numbering, shrink MBBNumbering now.
assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
MBBNumbering.resize(BlockNo);
+ MBBNumberingEpoch++;
}
/// This method iterates over the basic blocks and assigns their IsBeginSection
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 5c68711..416129f 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -3049,9 +3049,10 @@ void SMSchedule::orderDependence(const SwingSchedulerDAG *SSD, SUnit *SU,
MoveUse = Pos;
}
// We did not handle HW dependences in previous for loop,
- // and we normally set Latency = 0 for Anti deps,
- // so may have nodes in same cycle with Anti denpendent on HW regs.
- else if (S.getKind() == SDep::Anti && stageScheduled(*I) == StageInst1) {
+ // and we normally set Latency = 0 for Anti/Output deps,
+ // so may have nodes in same cycle with Anti/Output dependent on HW regs.
+ else if ((S.getKind() == SDep::Anti || S.getKind() == SDep::Output) &&
+ stageScheduled(*I) == StageInst1) {
OrderBeforeUse = true;
if ((MoveUse == 0) || (Pos < MoveUse))
MoveUse = Pos;
@@ -3060,7 +3061,9 @@ void SMSchedule::orderDependence(const SwingSchedulerDAG *SSD, SUnit *SU,
for (auto &P : SU->Preds) {
if (P.getSUnit() != *I)
continue;
- if (P.getKind() == SDep::Order && stageScheduled(*I) == StageInst1) {
+ if ((P.getKind() == SDep::Order || P.getKind() == SDep::Anti ||
+ P.getKind() == SDep::Output) &&
+ stageScheduled(*I) == StageInst1) {
OrderAfterDef = true;
MoveDef = Pos;
}
diff --git a/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 3caa96c..fcedb30 100644
--- a/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -47,7 +47,6 @@ MachineRegisterInfo::MachineRegisterInfo(MachineFunction *MF)
: MF->getSubtarget().enableSubRegLiveness()) {
unsigned NumRegs = getTargetRegisterInfo()->getNumRegs();
VRegInfo.reserve(256);
- RegAllocHints.reserve(256);
UsedPhysRegMask.resize(NumRegs);
PhysRegUseDefLists.reset(new MachineOperand*[NumRegs]());
TheDelegates.clear();
@@ -147,7 +146,6 @@ MachineRegisterInfo::recomputeRegClass(Register Reg) {
Register MachineRegisterInfo::createIncompleteVirtualRegister(StringRef Name) {
Register Reg = Register::index2VirtReg(getNumVirtRegs());
VRegInfo.grow(Reg);
- RegAllocHints.grow(Reg);
insertVRegByName(Name, Reg);
return Reg;
}
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 51dad43..0d3dd65 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -16,6 +16,7 @@
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/ExpandVectorPredication.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -46,7 +47,7 @@ static cl::opt<int64_t> MemIntrinsicExpandSizeThresholdOpt(
namespace {
struct PreISelIntrinsicLowering {
- const TargetMachine &TM;
+ const TargetMachine *TM;
const function_ref<TargetTransformInfo &(Function &)> LookupTTI;
const function_ref<TargetLibraryInfo &(Function &)> LookupTLI;
@@ -56,7 +57,7 @@ struct PreISelIntrinsicLowering {
const bool UseMemIntrinsicLibFunc;
explicit PreISelIntrinsicLowering(
- const TargetMachine &TM_,
+ const TargetMachine *TM_,
function_ref<TargetTransformInfo &(Function &)> LookupTTI_,
function_ref<TargetLibraryInfo &(Function &)> LookupTLI_,
bool UseMemIntrinsicLibFunc_ = true)
@@ -222,10 +223,12 @@ bool PreISelIntrinsicLowering::shouldExpandMemIntrinsicWithSize(
return SizeVal > Threshold || Threshold == 0;
}
-static bool canEmitLibcall(const TargetMachine &TM, Function *F,
+static bool canEmitLibcall(const TargetMachine *TM, Function *F,
RTLIB::Libcall LC) {
// TODO: Should this consider the address space of the memcpy?
- const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering();
+ if (!TM)
+ return true;
+ const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
return TLI->getLibcallName(LC) != nullptr;
}
@@ -351,6 +354,18 @@ bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
return Changed;
});
break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
+ case Intrinsic::VPID:
+#include "llvm/IR/VPIntrinsics.def"
+ forEachCall(F, [&](CallInst *CI) {
+ Function *Parent = CI->getParent()->getParent();
+ const TargetTransformInfo &TTI = LookupTTI(*Parent);
+ auto *VPI = cast<VPIntrinsic>(CI);
+ return expandVectorPredicationIntrinsic(*VPI, TTI);
+ });
+ // Not all intrinsics are removed, but the code is changed in any case.
+ Changed = true;
+ break;
case Intrinsic::objc_autorelease:
Changed |= lowerObjCCall(F, "objc_autorelease");
break;
@@ -453,7 +468,7 @@ public:
return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
};
- const auto &TM = getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
+ const auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
return Lowering.lowerIntrinsics(M);
}
diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index cd5d877..ee03eaa 100644
--- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -228,6 +228,11 @@ bool PEI::runOnMachineFunction(MachineFunction &MF) {
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF);
ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
+ // Spill frame pointer and/or base pointer registers if they are clobbered.
+ // It is placed before call frame instruction elimination so it will not mess
+ // with stack arguments.
+ TFI->spillFPBP(MF);
+
// Calculate the MaxCallFrameSize value for the function's frame
// information. Also eliminates call frame pseudo instructions.
calculateCallFrameInfo(MF);
@@ -341,6 +346,9 @@ bool PEI::runOnMachineFunction(MachineFunction &MF) {
<< ore::NV("Function", MF.getFunction().getName()) << "'";
});
+ // Emit any remarks implemented for the target, based on final frame layout.
+ TFI->emitRemarks(MF, ORE);
+
delete RS;
SaveBlocks.clear();
RestoreBlocks.clear();
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 20b3ca2..a5b7397 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -598,6 +598,8 @@ namespace {
const SDLoc &DL);
SDValue foldSubToUSubSat(EVT DstVT, SDNode *N, const SDLoc &DL);
SDValue foldABSToABD(SDNode *N, const SDLoc &DL);
+ SDValue foldSelectToABD(SDValue LHS, SDValue RHS, SDValue True,
+ SDValue False, ISD::CondCode CC, const SDLoc &DL);
SDValue unfoldMaskedMerge(SDNode *N);
SDValue unfoldExtremeBitClearingToShifts(SDNode *N);
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
@@ -3260,28 +3262,9 @@ static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
if (V.getOpcode() != ISD::XOR)
return SDValue();
- ConstantSDNode *Const = isConstOrConstSplat(V.getOperand(1), false);
- if (!Const)
- return SDValue();
-
- EVT VT = V.getValueType();
-
- bool IsFlip = false;
- switch(TLI.getBooleanContents(VT)) {
- case TargetLowering::ZeroOrOneBooleanContent:
- IsFlip = Const->isOne();
- break;
- case TargetLowering::ZeroOrNegativeOneBooleanContent:
- IsFlip = Const->isAllOnes();
- break;
- case TargetLowering::UndefinedBooleanContent:
- IsFlip = (Const->getAPIntValue() & 0x01) == 1;
- break;
- }
-
- if (IsFlip)
+ if (DAG.isBoolConstant(V.getOperand(1)) == true)
return V.getOperand(0);
- if (Force)
+ if (Force && isConstOrConstSplat(V.getOperand(1), false))
return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
return SDValue();
}
@@ -11585,6 +11568,45 @@ static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+// Match SELECTs with absolute difference patterns.
+// (select (setcc a, b, set?gt), (sub a, b), (sub b, a)) --> (abd? a, b)
+// (select (setcc a, b, set?ge), (sub a, b), (sub b, a)) --> (abd? a, b)
+// (select (setcc a, b, set?lt), (sub b, a), (sub a, b)) --> (abd? a, b)
+// (select (setcc a, b, set?le), (sub b, a), (sub a, b)) --> (abd? a, b)
+SDValue DAGCombiner::foldSelectToABD(SDValue LHS, SDValue RHS, SDValue True,
+ SDValue False, ISD::CondCode CC,
+ const SDLoc &DL) {
+ bool IsSigned = isSignedIntSetCC(CC);
+ unsigned ABDOpc = IsSigned ? ISD::ABDS : ISD::ABDU;
+ EVT VT = LHS.getValueType();
+
+ if (!hasOperation(ABDOpc, VT))
+ return SDValue();
+
+ switch (CC) {
+ case ISD::SETGT:
+ case ISD::SETGE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ if (sd_match(True, m_Sub(m_Specific(LHS), m_Specific(RHS))) &&
+ sd_match(False, m_Sub(m_Specific(RHS), m_Specific(LHS))))
+ return DAG.getNode(ABDOpc, DL, VT, LHS, RHS);
+ break;
+ case ISD::SETLT:
+ case ISD::SETLE:
+ case ISD::SETULT:
+ case ISD::SETULE:
+ if (sd_match(True, m_Sub(m_Specific(RHS), m_Specific(LHS))) &&
+ sd_match(False, m_Sub(m_Specific(LHS), m_Specific(RHS))))
+ return DAG.getNode(ABDOpc, DL, VT, LHS, RHS);
+ break;
+ default:
+ break;
+ }
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -12385,37 +12407,8 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
}
}
- // Match VSELECTs with absolute difference patterns.
- // (vselect (setcc a, b, set?gt), (sub a, b), (sub b, a)) --> (abd? a, b)
- // (vselect (setcc a, b, set?ge), (sub a, b), (sub b, a)) --> (abd? a, b)
- // (vselect (setcc a, b, set?lt), (sub b, a), (sub a, b)) --> (abd? a, b)
- // (vselect (setcc a, b, set?le), (sub b, a), (sub a, b)) --> (abd? a, b)
- if (N1.getOpcode() == ISD::SUB && N2.getOpcode() == ISD::SUB &&
- N1.getOperand(0) == N2.getOperand(1) &&
- N1.getOperand(1) == N2.getOperand(0)) {
- bool IsSigned = isSignedIntSetCC(CC);
- unsigned ABDOpc = IsSigned ? ISD::ABDS : ISD::ABDU;
- if (hasOperation(ABDOpc, VT)) {
- switch (CC) {
- case ISD::SETGT:
- case ISD::SETGE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- if (LHS == N1.getOperand(0) && RHS == N1.getOperand(1))
- return DAG.getNode(ABDOpc, DL, VT, LHS, RHS);
- break;
- case ISD::SETLT:
- case ISD::SETLE:
- case ISD::SETULT:
- case ISD::SETULE:
- if (RHS == N1.getOperand(0) && LHS == N1.getOperand(1) )
- return DAG.getNode(ABDOpc, DL, VT, LHS, RHS);
- break;
- default:
- break;
- }
- }
- }
+ if (SDValue ABD = foldSelectToABD(LHS, RHS, N1, N2, CC, DL))
+ return ABD;
// Match VSELECTs into add with unsigned saturation.
if (hasOperation(ISD::UADDSAT, VT)) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 9ca76aa..8405ba9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -236,6 +236,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
// also creates the initial PHI MachineInstrs, though none of the input
// operands are populated.
+ MBBMap.resize(Fn->getMaxBlockNumber());
for (const BasicBlock &BB : *Fn) {
// Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
// are really data, and no instructions can live here.
@@ -261,7 +262,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
}
MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB);
- MBBMap[&BB] = MBB;
+ MBBMap[BB.getNumber()] = MBB;
MF->push_back(MBB);
// Transfer the address-taken flag. This is necessary because there could
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index a9f2be5..5c6a245 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -9924,15 +9924,8 @@ SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
// select true, T, F --> T
// select false, T, F --> F
- if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
- return CondC->isZero() ? F : T;
-
- // TODO: This should simplify VSELECT with non-zero constant condition using
- // something like this (but check boolean contents to be complete?):
- if (ConstantSDNode *CondC = isConstOrConstSplat(Cond, /*AllowUndefs*/ false,
- /*AllowTruncation*/ true))
- if (CondC->isZero())
- return F;
+ if (auto C = isBoolConstant(Cond, /*AllowTruncation=*/true))
+ return *C ? T : F;
// select ?, T, T --> T
if (T == F)
@@ -13141,6 +13134,32 @@ SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
return nullptr;
}
+std::optional<bool> SelectionDAG::isBoolConstant(SDValue N,
+ bool AllowTruncation) const {
+ ConstantSDNode *Const = isConstOrConstSplat(N, false, AllowTruncation);
+ if (!Const)
+ return std::nullopt;
+
+ const APInt &CVal = Const->getAPIntValue();
+ switch (TLI->getBooleanContents(N.getValueType())) {
+ case TargetLowering::ZeroOrOneBooleanContent:
+ if (CVal.isOne())
+ return true;
+ if (CVal.isZero())
+ return false;
+ return std::nullopt;
+ case TargetLowering::ZeroOrNegativeOneBooleanContent:
+ if (CVal.isAllOnes())
+ return true;
+ if (CVal.isZero())
+ return false;
+ return std::nullopt;
+ case TargetLowering::UndefinedBooleanContent:
+ return CVal[0];
+ }
+ llvm_unreachable("Unknown BooleanContent enum");
+}
+
void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
assert(!Node->OperandList && "Node already has operands");
assert(SDNode::getMaxNumOperands() >= Vals.size() &&
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 9d617c7..1f4436f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4287,6 +4287,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(Op0);
SDLoc dl = getCurSDLoc();
auto &TLI = DAG.getTargetLoweringInfo();
+ GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags();
// Normalize Vector GEP - all scalar operands should be converted to the
// splat vector.
@@ -4314,7 +4315,8 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
// In an inbounds GEP with an offset that is nonnegative even when
// interpreted as signed, assume there is no unsigned overflow.
SDNodeFlags Flags;
- if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
+ if (NW.hasNoUnsignedWrap() ||
+ (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap()))
Flags.setNoUnsignedWrap(true);
N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
@@ -4355,7 +4357,8 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
// In an inbounds GEP with an offset that is nonnegative even when
// interpreted as signed, assume there is no unsigned overflow.
SDNodeFlags Flags;
- if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
+ if (NW.hasNoUnsignedWrap() ||
+ (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap()))
Flags.setNoUnsignedWrap(true);
OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 95b6d27..3e517a5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -1643,11 +1643,12 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
// Iterate over all basic blocks in the function.
+ FuncInfo->VisitedBBs.assign(Fn.getMaxBlockNumber(), false);
for (const BasicBlock *LLVMBB : RPOT) {
if (OptLevel != CodeGenOptLevel::None) {
bool AllPredsVisited = true;
for (const BasicBlock *Pred : predecessors(LLVMBB)) {
- if (!FuncInfo->VisitedBBs.count(Pred)) {
+ if (!FuncInfo->VisitedBBs[Pred->getNumber()]) {
AllPredsVisited = false;
break;
}
@@ -1661,7 +1662,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FuncInfo->InvalidatePHILiveOutRegInfo(&PN);
}
- FuncInfo->VisitedBBs.insert(LLVMBB);
+ FuncInfo->VisitedBBs[LLVMBB->getNumber()] = true;
}
BasicBlock::const_iterator const Begin =
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 7fa83a5..e269ca8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10245,10 +10245,13 @@ SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
ArgListTy Args;
ArgListEntry Entry;
- std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
- Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
+ const GlobalValue *GV =
+ cast<GlobalValue>(GA->getGlobal()->stripPointerCastsAndAliases());
+ SmallString<32> NameString("__emutls_v.");
+ NameString += GV->getName();
StringRef EmuTlsVarName(NameString);
- GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
+ const GlobalVariable *EmuTlsVar =
+ GV->getParent()->getNamedGlobal(EmuTlsVarName);
assert(EmuTlsVar && "Cannot find EmuTlsVar ");
Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
Entry.Ty = VoidPtrType;
diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp
index eb74c6f..3a20b34 100644
--- a/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -865,11 +865,6 @@ void TargetPassConfig::addIRPasses() {
if (getOptLevel() != CodeGenOptLevel::None && !DisablePartialLibcallInlining)
addPass(createPartiallyInlineLibCallsPass());
- // Expand vector predication intrinsics into standard IR instructions.
- // This pass has to run before ScalarizeMaskedMemIntrin and ExpandReduction
- // passes since it emits those kinds of intrinsics.
- addPass(createExpandVectorPredicationPass());
-
// Instrument function entry after all inlining.
addPass(createPostInlineEntryExitInstrumenterPass());
diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index ffc8055..16dab97 100644
--- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -421,13 +421,16 @@ bool TargetRegisterInfo::getRegAllocationHints(
SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
const MachineRegisterInfo &MRI = MF.getRegInfo();
- const std::pair<unsigned, SmallVector<Register, 4>> &Hints_MRI =
+ const std::pair<unsigned, SmallVector<Register, 4>> *Hints_MRI =
MRI.getRegAllocationHints(VirtReg);
+ if (!Hints_MRI)
+ return false;
+
SmallSet<Register, 32> HintedRegs;
// First hint may be a target hint.
- bool Skip = (Hints_MRI.first != 0);
- for (auto Reg : Hints_MRI.second) {
+ bool Skip = (Hints_MRI->first != 0);
+ for (auto Reg : Hints_MRI->second) {
if (Skip) {
Skip = false;
continue;
diff --git a/llvm/lib/CodeGen/UnreachableBlockElim.cpp b/llvm/lib/CodeGen/UnreachableBlockElim.cpp
index 8194f3c..6e3b69b 100644
--- a/llvm/lib/CodeGen/UnreachableBlockElim.cpp
+++ b/llvm/lib/CodeGen/UnreachableBlockElim.cpp
@@ -195,6 +195,8 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
}
F.RenumberBlocks();
+ if (MDT)
+ MDT->updateBlockNumbers();
return (!DeadBlocks.empty() || ModifiedPHI);
}
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
index 44e1789..939a516 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
@@ -1331,9 +1331,9 @@ DWARFDebugLine::LineTable::lookupAddress(object::SectionedAddress Address,
uint32_t
DWARFDebugLine::LineTable::lookupAddressImpl(object::SectionedAddress Address,
bool *IsApproximateLine) const {
- assert(!IsApproximateLine ||
- !*IsApproximateLine && "Make sure IsApproximateLine is appropriately "
- "initialized, if provided");
+ assert((!IsApproximateLine || !*IsApproximateLine) &&
+ "Make sure IsApproximateLine is appropriately "
+ "initialized, if provided");
// First, find an instruction sequence containing the given address.
DWARFDebugLine::Sequence Sequence;
Sequence.SectionIndex = Address.SectionIndex;
diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
index 9ce8aec..86e5054 100644
--- a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
@@ -29,6 +29,8 @@ using namespace llvm::jitlink;
namespace {
+constexpr StringRef ELFGOTSymbolName = "_GLOBAL_OFFSET_TABLE_";
+
class ELFJITLinker_aarch64 : public JITLinker<ELFJITLinker_aarch64> {
friend class JITLinker<ELFJITLinker_aarch64>;
@@ -36,11 +38,83 @@ public:
ELFJITLinker_aarch64(std::unique_ptr<JITLinkContext> Ctx,
std::unique_ptr<LinkGraph> G,
PassConfiguration PassConfig)
- : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {
+ if (shouldAddDefaultTargetPasses(getGraph().getTargetTriple()))
+ getPassConfig().PostAllocationPasses.push_back(
+ [this](LinkGraph &G) { return getOrCreateGOTSymbol(G); });
+ }
private:
+ Symbol *GOTSymbol = nullptr;
+
Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
- return aarch64::applyFixup(G, B, E);
+ return aarch64::applyFixup(G, B, E, GOTSymbol);
+ }
+
+ Error getOrCreateGOTSymbol(LinkGraph &G) {
+ auto DefineExternalGOTSymbolIfPresent =
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ [&](LinkGraph &LG, Symbol &Sym) -> SectionRangeSymbolDesc {
+ if (Sym.getName() == ELFGOTSymbolName)
+ if (auto *GOTSection = G.findSectionByName(
+ aarch64::GOTTableManager::getSectionName())) {
+ GOTSymbol = &Sym;
+ return {*GOTSection, true};
+ }
+ return {};
+ });
+
+ // Try to attach _GLOBAL_OFFSET_TABLE_ to the GOT if it's defined as an
+ // external.
+ if (auto Err = DefineExternalGOTSymbolIfPresent(G))
+ return Err;
+
+ // If we succeeded then we're done.
+ if (GOTSymbol)
+ return Error::success();
+
+ // Otherwise look for a GOT section: If it already has a start symbol we'll
+ // record it, otherwise we'll create our own.
+ // If there's a GOT section but we didn't find an external GOT symbol...
+ if (auto *GOTSection =
+ G.findSectionByName(aarch64::GOTTableManager::getSectionName())) {
+
+ // Check for an existing defined symbol.
+ for (auto *Sym : GOTSection->symbols())
+ if (Sym->getName() == ELFGOTSymbolName) {
+ GOTSymbol = Sym;
+ return Error::success();
+ }
+
+ // If there's no defined symbol then create one.
+ SectionRange SR(*GOTSection);
+ if (SR.empty())
+ GOTSymbol =
+ &G.addAbsoluteSymbol(ELFGOTSymbolName, orc::ExecutorAddr(), 0,
+ Linkage::Strong, Scope::Local, true);
+ else
+ GOTSymbol =
+ &G.addDefinedSymbol(*SR.getFirstBlock(), 0, ELFGOTSymbolName, 0,
+ Linkage::Strong, Scope::Local, false, true);
+ }
+
+ // If we still haven't found a GOT symbol then double check the externals.
+ // We may have a GOT-relative reference but no GOT section, in which case
+ // we just need to point the GOT symbol at some address in this graph.
+ if (!GOTSymbol) {
+ for (auto *Sym : G.external_symbols()) {
+ if (Sym->getName() == ELFGOTSymbolName) {
+ auto Blocks = G.blocks();
+ if (!Blocks.empty()) {
+ G.makeAbsolute(*Sym, (*Blocks.begin())->getAddress());
+ GOTSymbol = Sym;
+ break;
+ }
+ }
+ }
+ }
+
+ return Error::success();
}
};
@@ -70,6 +144,7 @@ private:
ELFPrel64,
ELFAdrGOTPage21,
ELFLd64GOTLo12,
+ ELFLd64GOTPAGELo15,
ELFTLSDescAdrPage21,
ELFTLSDescAddLo12,
ELFTLSDescLd64Lo12,
@@ -125,6 +200,8 @@ private:
return ELFAdrGOTPage21;
case ELF::R_AARCH64_LD64_GOT_LO12_NC:
return ELFLd64GOTLo12;
+ case ELF::R_AARCH64_LD64_GOTPAGE_LO15:
+ return ELFLd64GOTPAGELo15;
case ELF::R_AARCH64_TLSDESC_ADR_PAGE21:
return ELFTLSDescAdrPage21;
case ELF::R_AARCH64_TLSDESC_ADD_LO12:
@@ -362,6 +439,10 @@ private:
Kind = aarch64::RequestGOTAndTransformToPageOffset12;
break;
}
+ case ELFLd64GOTPAGELo15: {
+ Kind = aarch64::RequestGOTAndTransformToPageOffset15;
+ break;
+ }
case ELFTLSDescAdrPage21: {
Kind = aarch64::RequestTLSDescEntryAndTransformToPage21;
break;
@@ -427,6 +508,8 @@ private:
return "ELFAdrGOTPage21";
case ELFLd64GOTLo12:
return "ELFLd64GOTLo12";
+ case ELFLd64GOTPAGELo15:
+ return "ELFLd64GOTPAGELo15";
case ELFTLSDescAdrPage21:
return "ELFTLSDescAdrPage21";
case ELFTLSDescAddLo12:
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
index 8733306..125c637 100644
--- a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -552,7 +552,7 @@ public:
private:
Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
- return aarch64::applyFixup(G, B, E);
+ return aarch64::applyFixup(G, B, E, nullptr);
}
uint64_t NullValue = 0;
diff --git a/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp b/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp
index cc58255..4d3c195 100644
--- a/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp
@@ -57,10 +57,14 @@ const char *getEdgeKindName(Edge::Kind R) {
return "Page21";
case PageOffset12:
return "PageOffset12";
+ case GotPageOffset15:
+ return "GotPageOffset15";
case RequestGOTAndTransformToPage21:
return "RequestGOTAndTransformToPage21";
case RequestGOTAndTransformToPageOffset12:
return "RequestGOTAndTransformToPageOffset12";
+ case RequestGOTAndTransformToPageOffset15:
+ return "RequestGOTAndTransformToPageOffset15";
case RequestGOTAndTransformToDelta32:
return "RequestGOTAndTransformToDelta32";
case RequestTLVPAndTransformToPage21:
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index afbb9f9..6645acb 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -7211,7 +7211,7 @@ OpenMPIRBuilder::getOrCreateInternalVariable(Type *Ty, const StringRef &Name,
// create different versions of the function for different OMP internal
// variables.
auto Linkage = this->M.getTargetTriple().rfind("wasm32") == 0
- ? GlobalValue::ExternalLinkage
+ ? GlobalValue::InternalLinkage
: GlobalValue::CommonLinkage;
auto *GV = new GlobalVariable(M, Ty, /*IsConstant=*/false, Linkage,
Constant::getNullValue(Ty), Elem.first(),
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 53de9ee..ec71975 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4886,7 +4886,25 @@ bool llvm::UpgradeDebugInfo(Module &M) {
if (DisableAutoUpgradeDebugInfo)
return false;
- unsigned Version = getDebugMetadataVersionFromModule(M);
+ // We need to get metadata before the module is verified (i.e., getModuleFlag
+ // makes assumptions that we haven't verified yet). Carefully extract the flag
+ // from the metadata.
+ unsigned Version = 0;
+ if (NamedMDNode *ModFlags = M.getModuleFlagsMetadata()) {
+ auto OpIt = find_if(ModFlags->operands(), [](const MDNode *Flag) {
+ if (Flag->getNumOperands() < 3)
+ return false;
+ if (MDString *K = dyn_cast_or_null<MDString>(Flag->getOperand(1)))
+ return K->getString() == "Debug Info Version";
+ return false;
+ });
+ if (OpIt != ModFlags->op_end()) {
+ const MDOperand &ValOp = (*OpIt)->getOperand(2);
+ if (auto *CI = mdconst::dyn_extract_or_null<ConstantInt>(ValOp))
+ Version = CI->getZExtValue();
+ }
+ }
+
if (Version == DEBUG_METADATA_VERSION) {
bool BrokenDebugInfo = false;
if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp
index 50b211a..c389d72 100644
--- a/llvm/lib/IR/ConstantRange.cpp
+++ b/llvm/lib/IR/ConstantRange.cpp
@@ -1617,21 +1617,107 @@ ConstantRange::shl(const ConstantRange &Other) const {
return ConstantRange::getNonEmpty(std::move(Min), std::move(Max) + 1);
}
+static ConstantRange computeShlNUW(const ConstantRange &LHS,
+ const ConstantRange &RHS) {
+ unsigned BitWidth = LHS.getBitWidth();
+ bool Overflow;
+ APInt LHSMin = LHS.getUnsignedMin();
+ unsigned RHSMin = RHS.getUnsignedMin().getLimitedValue(BitWidth);
+ APInt MinShl = LHSMin.ushl_ov(RHSMin, Overflow);
+ if (Overflow)
+ return ConstantRange::getEmpty(BitWidth);
+ APInt LHSMax = LHS.getUnsignedMax();
+ unsigned RHSMax = RHS.getUnsignedMax().getLimitedValue(BitWidth);
+ APInt MaxShl = MinShl;
+ unsigned MaxShAmt = LHSMax.countLeadingZeros();
+ if (RHSMin <= MaxShAmt)
+ MaxShl = LHSMax << std::min(RHSMax, MaxShAmt);
+ RHSMin = std::max(RHSMin, MaxShAmt + 1);
+ RHSMax = std::min(RHSMax, LHSMin.countLeadingZeros());
+ if (RHSMin <= RHSMax)
+ MaxShl = APIntOps::umax(MaxShl,
+ APInt::getHighBitsSet(BitWidth, BitWidth - RHSMin));
+ return ConstantRange::getNonEmpty(MinShl, MaxShl + 1);
+}
+
+static ConstantRange computeShlNSWWithNNegLHS(const APInt &LHSMin,
+ const APInt &LHSMax,
+ unsigned RHSMin,
+ unsigned RHSMax) {
+ unsigned BitWidth = LHSMin.getBitWidth();
+ bool Overflow;
+ APInt MinShl = LHSMin.sshl_ov(RHSMin, Overflow);
+ if (Overflow)
+ return ConstantRange::getEmpty(BitWidth);
+ APInt MaxShl = MinShl;
+ unsigned MaxShAmt = LHSMax.countLeadingZeros() - 1;
+ if (RHSMin <= MaxShAmt)
+ MaxShl = LHSMax << std::min(RHSMax, MaxShAmt);
+ RHSMin = std::max(RHSMin, MaxShAmt + 1);
+ RHSMax = std::min(RHSMax, LHSMin.countLeadingZeros() - 1);
+ if (RHSMin <= RHSMax)
+ MaxShl = APIntOps::umax(MaxShl,
+ APInt::getBitsSet(BitWidth, RHSMin, BitWidth - 1));
+ return ConstantRange::getNonEmpty(MinShl, MaxShl + 1);
+}
+
+static ConstantRange computeShlNSWWithNegLHS(const APInt &LHSMin,
+ const APInt &LHSMax,
+ unsigned RHSMin, unsigned RHSMax) {
+ unsigned BitWidth = LHSMin.getBitWidth();
+ bool Overflow;
+ APInt MaxShl = LHSMax.sshl_ov(RHSMin, Overflow);
+ if (Overflow)
+ return ConstantRange::getEmpty(BitWidth);
+ APInt MinShl = MaxShl;
+ unsigned MaxShAmt = LHSMin.countLeadingOnes() - 1;
+ if (RHSMin <= MaxShAmt)
+ MinShl = LHSMin.shl(std::min(RHSMax, MaxShAmt));
+ RHSMin = std::max(RHSMin, MaxShAmt + 1);
+ RHSMax = std::min(RHSMax, LHSMax.countLeadingOnes() - 1);
+ if (RHSMin <= RHSMax)
+ MinShl = APInt::getSignMask(BitWidth);
+ return ConstantRange::getNonEmpty(MinShl, MaxShl + 1);
+}
+
+static ConstantRange computeShlNSW(const ConstantRange &LHS,
+ const ConstantRange &RHS) {
+ unsigned BitWidth = LHS.getBitWidth();
+ unsigned RHSMin = RHS.getUnsignedMin().getLimitedValue(BitWidth);
+ unsigned RHSMax = RHS.getUnsignedMax().getLimitedValue(BitWidth);
+ APInt LHSMin = LHS.getSignedMin();
+ APInt LHSMax = LHS.getSignedMax();
+ if (LHSMin.isNonNegative())
+ return computeShlNSWWithNNegLHS(LHSMin, LHSMax, RHSMin, RHSMax);
+ else if (LHSMax.isNegative())
+ return computeShlNSWWithNegLHS(LHSMin, LHSMax, RHSMin, RHSMax);
+ return computeShlNSWWithNNegLHS(APInt::getZero(BitWidth), LHSMax, RHSMin,
+ RHSMax)
+ .unionWith(computeShlNSWWithNegLHS(LHSMin, APInt::getAllOnes(BitWidth),
+ RHSMin, RHSMax),
+ ConstantRange::Signed);
+}
+
ConstantRange ConstantRange::shlWithNoWrap(const ConstantRange &Other,
unsigned NoWrapKind,
PreferredRangeType RangeType) const {
if (isEmptySet() || Other.isEmptySet())
return getEmpty();
- ConstantRange Result = shl(Other);
-
- if (NoWrapKind & OverflowingBinaryOperator::NoSignedWrap)
- Result = Result.intersectWith(sshl_sat(Other), RangeType);
-
- if (NoWrapKind & OverflowingBinaryOperator::NoUnsignedWrap)
- Result = Result.intersectWith(ushl_sat(Other), RangeType);
-
- return Result;
+ switch (NoWrapKind) {
+ case 0:
+ return shl(Other);
+ case OverflowingBinaryOperator::NoSignedWrap:
+ return computeShlNSW(*this, Other);
+ case OverflowingBinaryOperator::NoUnsignedWrap:
+ return computeShlNUW(*this, Other);
+ case OverflowingBinaryOperator::NoSignedWrap |
+ OverflowingBinaryOperator::NoUnsignedWrap:
+ return computeShlNSW(*this, Other)
+ .intersectWith(computeShlNUW(*this, Other), RangeType);
+ default:
+ llvm_unreachable("Invalid NoWrapKind");
+ }
}
ConstantRange
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index 70803c1..a1c9e92 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -2098,6 +2098,18 @@ Value *ConstantPtrAuth::handleOperandChangeImpl(Value *From, Value *ToV) {
Values, this, From, To, NumUpdated, OperandNo);
}
+bool ConstantPtrAuth::hasSpecialAddressDiscriminator(uint64_t Value) const {
+ const auto *CastV = dyn_cast<ConstantExpr>(getAddrDiscriminator());
+ if (!CastV || CastV->getOpcode() != Instruction::IntToPtr)
+ return false;
+
+ const auto *IntVal = dyn_cast<ConstantInt>(CastV->getOperand(0));
+ if (!IntVal)
+ return false;
+
+ return IntVal->getValue() == Value;
+}
+
bool ConstantPtrAuth::isKnownCompatibleWith(const Value *Key,
const Value *Discriminator,
const DataLayout &DL) const {
diff --git a/llvm/lib/IR/DebugProgramInstruction.cpp b/llvm/lib/IR/DebugProgramInstruction.cpp
index 362d467..7937e30 100644
--- a/llvm/lib/IR/DebugProgramInstruction.cpp
+++ b/llvm/lib/IR/DebugProgramInstruction.cpp
@@ -491,7 +491,7 @@ void DbgVariableRecord::setAssignId(DIAssignID *New) {
void DbgVariableRecord::setKillAddress() {
resetDebugValue(
- 1, ValueAsMetadata::get(UndefValue::get(getAddress()->getType())));
+ 1, ValueAsMetadata::get(PoisonValue::get(getAddress()->getType())));
}
bool DbgVariableRecord::isKillAddress() const {
diff --git a/llvm/lib/IR/LegacyPassManager.cpp b/llvm/lib/IR/LegacyPassManager.cpp
index f2a83b5..96e2f1d 100644
--- a/llvm/lib/IR/LegacyPassManager.cpp
+++ b/llvm/lib/IR/LegacyPassManager.cpp
@@ -12,7 +12,6 @@
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/Demangle/Demangle.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LLVMContext.h"
@@ -1384,8 +1383,7 @@ bool FPPassManager::runOnFunction(Function &F) {
// Store name outside of loop to avoid redundant calls.
const StringRef Name = F.getName();
- llvm::TimeTraceScope FunctionScope(
- "OptFunction", [&F]() { return demangle(F.getName().str()); });
+ llvm::TimeTraceScope FunctionScope("OptFunction", Name);
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
FunctionPass *FP = getContainedPass(Index);
diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp
index c966c53..80b5408 100644
--- a/llvm/lib/IR/Module.cpp
+++ b/llvm/lib/IR/Module.cpp
@@ -259,10 +259,8 @@ GlobalIFunc *Module::getNamedIFunc(StringRef Name) const {
/// getNamedMetadata - Return the first NamedMDNode in the module with the
/// specified name. This method returns null if a NamedMDNode with the
/// specified name is not found.
-NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
- SmallString<256> NameData;
- StringRef NameRef = Name.toStringRef(NameData);
- return NamedMDSymTab.lookup(NameRef);
+NamedMDNode *Module::getNamedMetadata(StringRef Name) const {
+ return NamedMDSymTab.lookup(Name);
}
/// getOrInsertNamedMetadata - Return the first named MDNode in the module
@@ -296,20 +294,6 @@ bool Module::isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB) {
return false;
}
-bool Module::isValidModuleFlag(const MDNode &ModFlag, ModFlagBehavior &MFB,
- MDString *&Key, Metadata *&Val) {
- if (ModFlag.getNumOperands() < 3)
- return false;
- if (!isValidModFlagBehavior(ModFlag.getOperand(0), MFB))
- return false;
- MDString *K = dyn_cast_or_null<MDString>(ModFlag.getOperand(1));
- if (!K)
- return false;
- Key = K;
- Val = ModFlag.getOperand(2);
- return true;
-}
-
/// getModuleFlagsMetadata - Returns the module flags in the provided vector.
void Module::
getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
@@ -317,25 +301,24 @@ getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
if (!ModFlags) return;
for (const MDNode *Flag : ModFlags->operands()) {
- ModFlagBehavior MFB;
- MDString *Key = nullptr;
- Metadata *Val = nullptr;
- if (isValidModuleFlag(*Flag, MFB, Key, Val)) {
- // Check the operands of the MDNode before accessing the operands.
- // The verifier will actually catch these failures.
- Flags.push_back(ModuleFlagEntry(MFB, Key, Val));
- }
+ // The verifier will catch errors, so no need to check them here.
+ auto *MFBConstant = mdconst::extract<ConstantInt>(Flag->getOperand(0));
+ auto MFB = static_cast<ModFlagBehavior>(MFBConstant->getLimitedValue());
+ MDString *Key = cast<MDString>(Flag->getOperand(1));
+ Metadata *Val = Flag->getOperand(2);
+ Flags.push_back(ModuleFlagEntry(MFB, Key, Val));
}
}
/// Return the corresponding value if Key appears in module flags, otherwise
/// return null.
Metadata *Module::getModuleFlag(StringRef Key) const {
- SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
- getModuleFlagsMetadata(ModuleFlags);
- for (const ModuleFlagEntry &MFE : ModuleFlags) {
- if (Key == MFE.Key->getString())
- return MFE.Val;
+ const NamedMDNode *ModFlags = getModuleFlagsMetadata();
+ if (!ModFlags)
+ return nullptr;
+ for (const MDNode *Flag : ModFlags->operands()) {
+ if (Key == cast<MDString>(Flag->getOperand(1))->getString())
+ return Flag->getOperand(2);
}
return nullptr;
}
@@ -388,10 +371,7 @@ void Module::setModuleFlag(ModFlagBehavior Behavior, StringRef Key,
NamedMDNode *ModFlags = getOrInsertModuleFlagsMetadata();
// Replace the flag if it already exists.
for (MDNode *Flag : ModFlags->operands()) {
- ModFlagBehavior MFB;
- MDString *K = nullptr;
- Metadata *V = nullptr;
- if (isValidModuleFlag(*Flag, MFB, K, V) && K->getString() == Key) {
+ if (cast<MDString>(Flag->getOperand(1))->getString() == Key) {
Flag->replaceOperandWith(2, Val);
return;
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index cb4eaf8..4bcd799 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6329,6 +6329,14 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
"llvm.threadlocal.address operand isThreadLocal() must be true");
break;
}
+ case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cta:
+ case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_cluster:
+ case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_gpu:
+ case Intrinsic::nvvm_fence_proxy_tensormap_generic_acquire_sys: {
+ unsigned size = cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue();
+ Check(size == 128, " The only supported value for size operand is 128");
+ break;
+ }
};
// Verify that there aren't any unmediated control transfers between funclets.
diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp
index f958905..c40a074 100644
--- a/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/llvm/lib/MC/ELFObjectWriter.cpp
@@ -1018,8 +1018,6 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
if (RelSection)
Members.second.push_back(RelSection->getOrdinal());
}
-
- OWriter.TargetObjectWriter->addTargetSectionFlags(Ctx, Section);
}
for (auto &[Group, Members] : Groups) {
diff --git a/llvm/lib/MC/MCELFObjectTargetWriter.cpp b/llvm/lib/MC/MCELFObjectTargetWriter.cpp
index c35e1f26..49cca57 100644
--- a/llvm/lib/MC/MCELFObjectTargetWriter.cpp
+++ b/llvm/lib/MC/MCELFObjectTargetWriter.cpp
@@ -27,6 +27,3 @@ void
MCELFObjectTargetWriter::sortRelocs(const MCAssembler &Asm,
std::vector<ELFRelocationEntry> &Relocs) {
}
-
-void MCELFObjectTargetWriter::addTargetSectionFlags(MCContext &Ctx,
- MCSectionELF &Sec) {}
diff --git a/llvm/lib/MCA/Stages/InOrderIssueStage.cpp b/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
index 8f720db..30def19 100644
--- a/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
+++ b/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
@@ -45,7 +45,7 @@ void StallInfo::cycleEnd() {
InOrderIssueStage::InOrderIssueStage(const MCSubtargetInfo &STI,
RegisterFile &PRF, CustomBehaviour &CB,
- LSUnit &LSU)
+ LSUnitBase &LSU)
: STI(STI), PRF(PRF), RM(STI.getSchedModel()), CB(CB), LSU(LSU),
NumIssued(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
diff --git a/llvm/lib/Object/COFFObjectFile.cpp b/llvm/lib/Object/COFFObjectFile.cpp
index 5a85b8e0..64b5eaf 100644
--- a/llvm/lib/Object/COFFObjectFile.cpp
+++ b/llvm/lib/Object/COFFObjectFile.cpp
@@ -798,6 +798,60 @@ Error COFFObjectFile::initLoadConfigPtr() {
return E;
}
}
+
+ if (Config->Size >=
+ offsetof(coff_load_configuration64, DynamicValueRelocTableSection) +
+ sizeof(Config->DynamicValueRelocTableSection))
+ if (Error E = initDynamicRelocPtr(Config->DynamicValueRelocTableSection,
+ Config->DynamicValueRelocTableOffset))
+ return E;
+ } else {
+ auto Config = getLoadConfig32();
+ if (Config->Size >=
+ offsetof(coff_load_configuration32, DynamicValueRelocTableSection) +
+ sizeof(Config->DynamicValueRelocTableSection)) {
+ if (Error E = initDynamicRelocPtr(Config->DynamicValueRelocTableSection,
+ Config->DynamicValueRelocTableOffset))
+ return E;
+ }
+ }
+ return Error::success();
+}
+
+Error COFFObjectFile::initDynamicRelocPtr(uint32_t SectionIndex,
+ uint32_t SectionOffset) {
+ Expected<const coff_section *> Section = getSection(SectionIndex);
+ if (!Section)
+ return Section.takeError();
+ if (!*Section)
+ return Error::success();
+
+ // Interpret and validate dynamic relocations.
+ ArrayRef<uint8_t> Contents;
+ if (Error E = getSectionContents(*Section, Contents))
+ return E;
+
+ Contents = Contents.drop_front(SectionOffset);
+ if (Contents.size() < sizeof(coff_dynamic_reloc_table))
+ return createStringError(object_error::parse_failed,
+ "Too large DynamicValueRelocTableOffset (" +
+ Twine(SectionOffset) + ")");
+
+ DynamicRelocTable =
+ reinterpret_cast<const coff_dynamic_reloc_table *>(Contents.data());
+
+ if (DynamicRelocTable->Version != 1 && DynamicRelocTable->Version != 2)
+ return createStringError(object_error::parse_failed,
+ "Unsupported dynamic relocations table version (" +
+ Twine(DynamicRelocTable->Version) + ")");
+ if (DynamicRelocTable->Size > Contents.size() - sizeof(*DynamicRelocTable))
+ return createStringError(object_error::parse_failed,
+ "Indvalid dynamic relocations directory size (" +
+ Twine(DynamicRelocTable->Size) + ")");
+
+ for (auto DynReloc : dynamic_relocs()) {
+ if (Error e = DynReloc.validate())
+ return e;
}
return Error::success();
@@ -1047,6 +1101,19 @@ base_reloc_iterator COFFObjectFile::base_reloc_end() const {
return base_reloc_iterator(BaseRelocRef(BaseRelocEnd, this));
}
+dynamic_reloc_iterator COFFObjectFile::dynamic_reloc_begin() const {
+ const void *Header = DynamicRelocTable ? DynamicRelocTable + 1 : nullptr;
+ return dynamic_reloc_iterator(DynamicRelocRef(Header, this));
+}
+
+dynamic_reloc_iterator COFFObjectFile::dynamic_reloc_end() const {
+ const void *Header = nullptr;
+ if (DynamicRelocTable)
+ Header = reinterpret_cast<const uint8_t *>(DynamicRelocTable + 1) +
+ DynamicRelocTable->Size;
+ return dynamic_reloc_iterator(DynamicRelocRef(Header, this));
+}
+
uint8_t COFFObjectFile::getBytesInAddress() const {
return getArch() == Triple::x86_64 || getArch() == Triple::aarch64 ? 8 : 4;
}
@@ -1100,6 +1167,10 @@ iterator_range<base_reloc_iterator> COFFObjectFile::base_relocs() const {
return make_range(base_reloc_begin(), base_reloc_end());
}
+iterator_range<dynamic_reloc_iterator> COFFObjectFile::dynamic_relocs() const {
+ return make_range(dynamic_reloc_begin(), dynamic_reloc_end());
+}
+
const data_directory *COFFObjectFile::getDataDirectory(uint32_t Index) const {
if (!DataDirectory)
return nullptr;
@@ -1789,6 +1860,275 @@ Error BaseRelocRef::getRVA(uint32_t &Result) const {
return Error::success();
}
+bool DynamicRelocRef::operator==(const DynamicRelocRef &Other) const {
+ return Header == Other.Header;
+}
+
+void DynamicRelocRef::moveNext() {
+ switch (Obj->getDynamicRelocTable()->Version) {
+ case 1:
+ if (Obj->is64()) {
+ auto H = reinterpret_cast<const coff_dynamic_relocation64 *>(Header);
+ Header += sizeof(*H) + H->BaseRelocSize;
+ } else {
+ auto H = reinterpret_cast<const coff_dynamic_relocation32 *>(Header);
+ Header += sizeof(*H) + H->BaseRelocSize;
+ }
+ break;
+ case 2:
+ if (Obj->is64()) {
+ auto H = reinterpret_cast<const coff_dynamic_relocation64_v2 *>(Header);
+ Header += H->HeaderSize + H->FixupInfoSize;
+ } else {
+ auto H = reinterpret_cast<const coff_dynamic_relocation32_v2 *>(Header);
+ Header += H->HeaderSize + H->FixupInfoSize;
+ }
+ break;
+ }
+}
+
+uint32_t DynamicRelocRef::getType() const {
+ switch (Obj->getDynamicRelocTable()->Version) {
+ case 1:
+ if (Obj->is64()) {
+ auto H = reinterpret_cast<const coff_dynamic_relocation64 *>(Header);
+ return H->Symbol;
+ } else {
+ auto H = reinterpret_cast<const coff_dynamic_relocation32 *>(Header);
+ return H->Symbol;
+ }
+ break;
+ case 2:
+ if (Obj->is64()) {
+ auto H = reinterpret_cast<const coff_dynamic_relocation64_v2 *>(Header);
+ return H->Symbol;
+ } else {
+ auto H = reinterpret_cast<const coff_dynamic_relocation32_v2 *>(Header);
+ return H->Symbol;
+ }
+ break;
+ default:
+ llvm_unreachable("invalid version");
+ }
+}
+
+void DynamicRelocRef::getContents(ArrayRef<uint8_t> &Ref) const {
+ switch (Obj->getDynamicRelocTable()->Version) {
+ case 1:
+ if (Obj->is64()) {
+ auto H = reinterpret_cast<const coff_dynamic_relocation64 *>(Header);
+ Ref = ArrayRef(Header + sizeof(*H), H->BaseRelocSize);
+ } else {
+ auto H = reinterpret_cast<const coff_dynamic_relocation32 *>(Header);
+ Ref = ArrayRef(Header + sizeof(*H), H->BaseRelocSize);
+ }
+ break;
+ case 2:
+ if (Obj->is64()) {
+ auto H = reinterpret_cast<const coff_dynamic_relocation64_v2 *>(Header);
+ Ref = ArrayRef(Header + H->HeaderSize, H->FixupInfoSize);
+ } else {
+ auto H = reinterpret_cast<const coff_dynamic_relocation32_v2 *>(Header);
+ Ref = ArrayRef(Header + H->HeaderSize, H->FixupInfoSize);
+ }
+ break;
+ }
+}
+
+Error DynamicRelocRef::validate() const {
+ const coff_dynamic_reloc_table *Table = Obj->getDynamicRelocTable();
+ size_t ContentsSize =
+ reinterpret_cast<const uint8_t *>(Table + 1) + Table->Size - Header;
+ size_t HeaderSize;
+ if (Table->Version == 1)
+ HeaderSize = Obj->is64() ? sizeof(coff_dynamic_relocation64)
+ : sizeof(coff_dynamic_relocation32);
+ else
+ HeaderSize = Obj->is64() ? sizeof(coff_dynamic_relocation64_v2)
+ : sizeof(coff_dynamic_relocation32_v2);
+ if (HeaderSize > ContentsSize)
+ return createStringError(object_error::parse_failed,
+ "Unexpected end of dynamic relocations data");
+
+ if (Table->Version == 2) {
+ size_t Size =
+ Obj->is64()
+ ? reinterpret_cast<const coff_dynamic_relocation64_v2 *>(Header)
+ ->HeaderSize
+ : reinterpret_cast<const coff_dynamic_relocation32_v2 *>(Header)
+ ->HeaderSize;
+ if (Size < HeaderSize || Size > ContentsSize)
+ return createStringError(object_error::parse_failed,
+ "Invalid dynamic relocation header size (" +
+ Twine(Size) + ")");
+ HeaderSize = Size;
+ }
+
+ ArrayRef<uint8_t> Contents;
+ getContents(Contents);
+ if (Contents.size() > ContentsSize - HeaderSize)
+ return createStringError(object_error::parse_failed,
+ "Too large dynamic relocation size (" +
+ Twine(Contents.size()) + ")");
+
+ switch (getType()) {
+ case COFF::IMAGE_DYNAMIC_RELOCATION_ARM64X:
+ for (auto Reloc : arm64x_relocs()) {
+ if (Error E = Reloc.validate(Obj))
+ return E;
+ }
+ break;
+ }
+
+ return Error::success();
+}
+
+arm64x_reloc_iterator DynamicRelocRef::arm64x_reloc_begin() const {
+ assert(getType() == COFF::IMAGE_DYNAMIC_RELOCATION_ARM64X);
+ ArrayRef<uint8_t> Content;
+ getContents(Content);
+ auto Header =
+ reinterpret_cast<const coff_base_reloc_block_header *>(Content.begin());
+ return arm64x_reloc_iterator(Arm64XRelocRef(Header));
+}
+
+arm64x_reloc_iterator DynamicRelocRef::arm64x_reloc_end() const {
+ assert(getType() == COFF::IMAGE_DYNAMIC_RELOCATION_ARM64X);
+ ArrayRef<uint8_t> Content;
+ getContents(Content);
+ auto Header =
+ reinterpret_cast<const coff_base_reloc_block_header *>(Content.end());
+ return arm64x_reloc_iterator(Arm64XRelocRef(Header, 0));
+}
+
+iterator_range<arm64x_reloc_iterator> DynamicRelocRef::arm64x_relocs() const {
+ return make_range(arm64x_reloc_begin(), arm64x_reloc_end());
+}
+
+bool Arm64XRelocRef::operator==(const Arm64XRelocRef &Other) const {
+ return Header == Other.Header && Index == Other.Index;
+}
+
+uint8_t Arm64XRelocRef::getEntrySize() const {
+ switch (getType()) {
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE:
+ return (1ull << getArg()) / sizeof(uint16_t) + 1;
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+void Arm64XRelocRef::moveNext() {
+ Index += getEntrySize();
+ if (sizeof(*Header) + Index * sizeof(uint16_t) < Header->BlockSize &&
+ !getReloc())
+ ++Index; // Skip padding
+ if (sizeof(*Header) + Index * sizeof(uint16_t) == Header->BlockSize) {
+ // The end of the block, move to the next one.
+ Header =
+ reinterpret_cast<const coff_base_reloc_block_header *>(&getReloc());
+ Index = 0;
+ }
+}
+
+uint8_t Arm64XRelocRef::getSize() const {
+ switch (getType()) {
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_ZEROFILL:
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE:
+ return 1 << getArg();
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA:
+ return sizeof(uint32_t);
+ }
+ llvm_unreachable("Unknown Arm64XFixupType enum");
+}
+
+uint64_t Arm64XRelocRef::getValue() const {
+ auto Ptr = reinterpret_cast<const ulittle16_t *>(Header + 1) + Index + 1;
+
+ switch (getType()) {
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE: {
+ ulittle64_t Value(0);
+ memcpy(&Value, Ptr, getSize());
+ return Value;
+ }
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA: {
+ uint16_t arg = getArg();
+ int delta = *Ptr;
+
+ if (arg & 1)
+ delta = -delta;
+ delta *= (arg & 2) ? 8 : 4;
+ return delta;
+ }
+ default:
+ return 0;
+ }
+}
+
+Error Arm64XRelocRef::validate(const COFFObjectFile *Obj) const {
+ if (!Index) {
+ const coff_dynamic_reloc_table *Table = Obj->getDynamicRelocTable();
+ size_t ContentsSize = reinterpret_cast<const uint8_t *>(Table + 1) +
+ Table->Size -
+ reinterpret_cast<const uint8_t *>(Header);
+ if (ContentsSize < sizeof(coff_base_reloc_block_header))
+ return createStringError(object_error::parse_failed,
+ "Unexpected end of ARM64X relocations data");
+ if (Header->BlockSize <= sizeof(*Header))
+ return createStringError(object_error::parse_failed,
+ "ARM64X relocations block size (" +
+ Twine(Header->BlockSize) + ") is too small");
+ if (Header->BlockSize % sizeof(uint32_t))
+ return createStringError(object_error::parse_failed,
+ "Unaligned ARM64X relocations block size (" +
+ Twine(Header->BlockSize) + ")");
+ if (Header->BlockSize > ContentsSize)
+ return createStringError(object_error::parse_failed,
+ "ARM64X relocations block size (" +
+ Twine(Header->BlockSize) + ") is too large");
+ if (Header->PageRVA & 0xfff)
+ return createStringError(object_error::parse_failed,
+ "Unaligned ARM64X relocations page RVA (" +
+ Twine(Header->PageRVA) + ")");
+ }
+
+ switch ((getReloc() >> 12) & 3) {
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_ZEROFILL:
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA:
+ break;
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE:
+ if (!getArg())
+ return createStringError(object_error::parse_failed,
+ "Invalid ARM64X relocation value size (0)");
+ break;
+ default:
+ return createStringError(object_error::parse_failed,
+ "Invalid relocation type");
+ }
+
+ uint32_t RelocsSize =
+ (Header->BlockSize - sizeof(*Header)) / sizeof(uint16_t);
+ uint16_t EntrySize = getEntrySize();
+ if (!getReloc() ||
+ (Index + EntrySize + 1 < RelocsSize && !getReloc(EntrySize)))
+ return createStringError(object_error::parse_failed,
+ "Unexpected ARM64X relocations terminator");
+ if (Index + EntrySize > RelocsSize)
+ return createStringError(object_error::parse_failed,
+ "Unexpected end of ARM64X relocations");
+ if (getRVA() % getSize())
+ return createStringError(object_error::parse_failed,
+ "Unaligned ARM64X relocation RVA (" +
+ Twine(getRVA()) + ")");
+ if (Header->PageRVA) {
+ uintptr_t IntPtr;
+ return Obj->getRvaPtr(getRVA() + getSize(), IntPtr, "ARM64X reloc");
+ }
+ return Error::success();
+}
+
#define RETURN_IF_ERROR(Expr) \
do { \
Error E = (Expr); \
diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp
index 53c3de0..f79c233 100644
--- a/llvm/lib/Object/ELFObjectFile.cpp
+++ b/llvm/lib/Object/ELFObjectFile.cpp
@@ -441,6 +441,8 @@ std::optional<StringRef> ELFObjectFileBase::tryGetCPUName() const {
case ELF::EM_PPC:
case ELF::EM_PPC64:
return StringRef("future");
+ case ELF::EM_BPF:
+ return StringRef("v4");
default:
return std::nullopt;
}
diff --git a/llvm/lib/Passes/CMakeLists.txt b/llvm/lib/Passes/CMakeLists.txt
index b522432..6425f49 100644
--- a/llvm/lib/Passes/CMakeLists.txt
+++ b/llvm/lib/Passes/CMakeLists.txt
@@ -21,7 +21,6 @@ add_llvm_component_library(LLVMPasses
CodeGen
Core
Coroutines
- Demangle
HipStdPar
IPO
InstCombine
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 5dbb1e2..bcc69d5 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -28,6 +28,7 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallPrinter.h"
#include "llvm/Analysis/CostModel.h"
+#include "llvm/Analysis/CtxProfAnalysis.h"
#include "llvm/Analysis/CycleAnalysis.h"
#include "llvm/Analysis/DDG.h"
#include "llvm/Analysis/DDGPrinter.h"
@@ -330,6 +331,8 @@ cl::opt<bool> PrintPipelinePasses(
"(best-effort only)."));
} // namespace llvm
+extern cl::opt<std::string> UseCtxProfile;
+
AnalysisKey NoOpModuleAnalysis::Key;
AnalysisKey NoOpCGSCCAnalysis::Key;
AnalysisKey NoOpFunctionAnalysis::Key;
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index adebbb5..c175ee8 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -304,7 +304,7 @@ static cl::opt<bool> UseLoopVersioningLICM(
"enable-loop-versioning-licm", cl::init(false), cl::Hidden,
cl::desc("Enable the experimental Loop Versioning LICM pass"));
-static cl::opt<std::string>
+cl::opt<std::string>
UseCtxProfile("use-ctx-profile", cl::init(""), cl::Hidden,
cl::desc("Use the specified contextual profile file"));
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index 3b92823..61a5bab 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -20,6 +20,7 @@
#endif
MODULE_ANALYSIS("callgraph", CallGraphAnalysis())
MODULE_ANALYSIS("collector-metadata", CollectorMetadataAnalysis())
+MODULE_ANALYSIS("ctx-prof-analysis", CtxProfAnalysis(UseCtxProfile))
MODULE_ANALYSIS("inline-advisor", InlineAdvisorAnalysis())
MODULE_ANALYSIS("ir-similarity", IRSimilarityAnalysis())
MODULE_ANALYSIS("lcg", LazyCallGraphAnalysis())
@@ -79,6 +80,7 @@ MODULE_PASS("insert-gcov-profiling", GCOVProfilerPass())
MODULE_PASS("instrorderfile", InstrOrderFilePass())
MODULE_PASS("instrprof", InstrProfilingLoweringPass())
MODULE_PASS("ctx-instr-lower", PGOCtxProfLoweringPass())
+MODULE_PASS("print<ctx-prof-analysis>", CtxProfAnalysisPrinterPass(dbgs()))
MODULE_PASS("invalidate<all>", InvalidateAllAnalysesPass())
MODULE_PASS("iroutliner", IROutlinerPass())
MODULE_PASS("jmc-instrumenter", JMCInstrumenterPass())
@@ -104,7 +106,7 @@ MODULE_PASS("pgo-icall-prom", PGOIndirectCallPromotion())
MODULE_PASS("pgo-instr-gen", PGOInstrumentationGen())
MODULE_PASS("pgo-instr-use", PGOInstrumentationUse())
MODULE_PASS("poison-checking", PoisonCheckingPass())
-MODULE_PASS("pre-isel-intrinsic-lowering", PreISelIntrinsicLoweringPass(*TM))
+MODULE_PASS("pre-isel-intrinsic-lowering", PreISelIntrinsicLoweringPass(TM))
MODULE_PASS("print", PrintModulePass(dbgs()))
MODULE_PASS("print-callgraph", CallGraphPrinterPass(dbgs()))
MODULE_PASS("print-callgraph-sccs", CallGraphSCCsPrinterPass(dbgs()))
diff --git a/llvm/lib/Passes/StandardInstrumentations.cpp b/llvm/lib/Passes/StandardInstrumentations.cpp
index 8f2461f..036484c 100644
--- a/llvm/lib/Passes/StandardInstrumentations.cpp
+++ b/llvm/lib/Passes/StandardInstrumentations.cpp
@@ -22,7 +22,6 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineVerifier.h"
-#include "llvm/Demangle/Demangle.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
@@ -236,12 +235,12 @@ void printIR(raw_ostream &OS, const MachineFunction *MF) {
MF->print(OS);
}
-std::string getIRName(Any IR, bool demangled = false) {
+std::string getIRName(Any IR) {
if (unwrapIR<Module>(IR))
return "[module]";
if (const auto *F = unwrapIR<Function>(IR))
- return demangled ? demangle(F->getName()) : F->getName().str();
+ return F->getName().str();
if (const auto *C = unwrapIR<LazyCallGraph::SCC>(IR))
return C->getName();
@@ -251,7 +250,7 @@ std::string getIRName(Any IR, bool demangled = false) {
L->getHeader()->getParent()->getName().str();
if (const auto *MF = unwrapIR<MachineFunction>(IR))
- return demangled ? demangle(MF->getName()) : MF->getName().str();
+ return MF->getName().str();
llvm_unreachable("Unknown wrapped IR type");
}
@@ -1588,7 +1587,7 @@ void TimeProfilingPassesHandler::registerCallbacks(
}
void TimeProfilingPassesHandler::runBeforePass(StringRef PassID, Any IR) {
- timeTraceProfilerBegin(PassID, getIRName(IR, true));
+ timeTraceProfilerBegin(PassID, getIRName(IR));
}
void TimeProfilingPassesHandler::runAfterPass() { timeTraceProfilerEnd(); }
diff --git a/llvm/lib/ProfileData/PGOCtxProfReader.cpp b/llvm/lib/ProfileData/PGOCtxProfReader.cpp
index 0a0e7db..8354e30 100644
--- a/llvm/lib/ProfileData/PGOCtxProfReader.cpp
+++ b/llvm/lib/ProfileData/PGOCtxProfReader.cpp
@@ -33,18 +33,18 @@ using namespace llvm;
if (auto Err = (EXPR)) \
return Err;
-Expected<PGOContextualProfile &>
-PGOContextualProfile::getOrEmplace(uint32_t Index, GlobalValue::GUID G,
- SmallVectorImpl<uint64_t> &&Counters) {
- auto [Iter, Inserted] = Callsites[Index].insert(
- {G, PGOContextualProfile(G, std::move(Counters))});
+Expected<PGOCtxProfContext &>
+PGOCtxProfContext::getOrEmplace(uint32_t Index, GlobalValue::GUID G,
+ SmallVectorImpl<uint64_t> &&Counters) {
+ auto [Iter, Inserted] =
+ Callsites[Index].insert({G, PGOCtxProfContext(G, std::move(Counters))});
if (!Inserted)
return make_error<InstrProfError>(instrprof_error::invalid_prof,
"Duplicate GUID for same callsite.");
return Iter->second;
}
-void PGOContextualProfile::getContainedGuids(
+void PGOCtxProfContext::getContainedGuids(
DenseSet<GlobalValue::GUID> &Guids) const {
Guids.insert(GUID);
for (const auto &[_, Callsite] : Callsites)
@@ -74,7 +74,7 @@ bool PGOCtxProfileReader::canReadContext() {
Blk->ID == PGOCtxProfileBlockIDs::ContextNodeBlockID;
}
-Expected<std::pair<std::optional<uint32_t>, PGOContextualProfile>>
+Expected<std::pair<std::optional<uint32_t>, PGOCtxProfContext>>
PGOCtxProfileReader::readContext(bool ExpectIndex) {
RET_ON_ERR(Cursor.EnterSubBlock(PGOCtxProfileBlockIDs::ContextNodeBlockID));
@@ -125,7 +125,7 @@ PGOCtxProfileReader::readContext(bool ExpectIndex) {
}
}
- PGOContextualProfile Ret(*Guid, std::move(*Counters));
+ PGOCtxProfContext Ret(*Guid, std::move(*Counters));
while (canReadContext()) {
EXPECT_OR_RET(SC, readContext(true));
@@ -174,9 +174,9 @@ Error PGOCtxProfileReader::readMetadata() {
return Error::success();
}
-Expected<std::map<GlobalValue::GUID, PGOContextualProfile>>
+Expected<std::map<GlobalValue::GUID, PGOCtxProfContext>>
PGOCtxProfileReader::loadContexts() {
- std::map<GlobalValue::GUID, PGOContextualProfile> Ret;
+ std::map<GlobalValue::GUID, PGOCtxProfContext> Ret;
RET_ON_ERR(readMetadata());
while (canReadContext()) {
EXPECT_OR_RET(E, readContext(false));
diff --git a/llvm/lib/SandboxIR/SandboxIR.cpp b/llvm/lib/SandboxIR/SandboxIR.cpp
index 235c457..65e9d86 100644
--- a/llvm/lib/SandboxIR/SandboxIR.cpp
+++ b/llvm/lib/SandboxIR/SandboxIR.cpp
@@ -546,7 +546,8 @@ void SelectInst::dump() const {
BranchInst *BranchInst::create(BasicBlock *IfTrue, Instruction *InsertBefore,
Context &Ctx) {
auto &Builder = Ctx.getLLVMIRBuilder();
- Builder.SetInsertPoint(cast<llvm::Instruction>(InsertBefore->Val));
+ llvm::Instruction *LLVMBefore = InsertBefore->getTopmostLLVMInstruction();
+ Builder.SetInsertPoint(cast<llvm::Instruction>(LLVMBefore));
llvm::BranchInst *NewBr =
Builder.CreateBr(cast<llvm::BasicBlock>(IfTrue->Val));
return Ctx.createBranchInst(NewBr);
@@ -565,7 +566,8 @@ BranchInst *BranchInst::create(BasicBlock *IfTrue, BasicBlock *IfFalse,
Value *Cond, Instruction *InsertBefore,
Context &Ctx) {
auto &Builder = Ctx.getLLVMIRBuilder();
- Builder.SetInsertPoint(cast<llvm::Instruction>(InsertBefore->Val));
+ llvm::Instruction *LLVMBefore = InsertBefore->getTopmostLLVMInstruction();
+ Builder.SetInsertPoint(LLVMBefore);
llvm::BranchInst *NewBr =
Builder.CreateCondBr(Cond->Val, cast<llvm::BasicBlock>(IfTrue->Val),
cast<llvm::BasicBlock>(IfFalse->Val));
@@ -747,6 +749,39 @@ void StoreInst::dump() const {
}
#endif // NDEBUG
+UnreachableInst *UnreachableInst::create(Instruction *InsertBefore,
+ Context &Ctx) {
+ auto &Builder = Ctx.getLLVMIRBuilder();
+ llvm::Instruction *LLVMBefore = InsertBefore->getTopmostLLVMInstruction();
+ Builder.SetInsertPoint(LLVMBefore);
+ llvm::UnreachableInst *NewUI = Builder.CreateUnreachable();
+ return Ctx.createUnreachableInst(NewUI);
+}
+
+UnreachableInst *UnreachableInst::create(BasicBlock *InsertAtEnd,
+ Context &Ctx) {
+ auto &Builder = Ctx.getLLVMIRBuilder();
+ Builder.SetInsertPoint(cast<llvm::BasicBlock>(InsertAtEnd->Val));
+ llvm::UnreachableInst *NewUI = Builder.CreateUnreachable();
+ return Ctx.createUnreachableInst(NewUI);
+}
+
+bool UnreachableInst::classof(const Value *From) {
+ return From->getSubclassID() == ClassID::Unreachable;
+}
+
+#ifndef NDEBUG
+void UnreachableInst::dump(raw_ostream &OS) const {
+ dumpCommonPrefix(OS);
+ dumpCommonSuffix(OS);
+}
+
+void UnreachableInst::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
ReturnInst *ReturnInst::createCommon(Value *RetVal, IRBuilder<> &Builder,
Context &Ctx) {
llvm::ReturnInst *NewRI;
@@ -1145,7 +1180,6 @@ Value *PHINode::removeIncomingValue(unsigned Idx) {
auto &Tracker = Ctx.getTracker();
if (Tracker.isTracking())
Tracker.track(std::make_unique<PHIRemoveIncoming>(*this, Idx, Tracker));
-
llvm::Value *LLVMV =
cast<llvm::PHINode>(Val)->removeIncomingValue(Idx,
/*DeletePHIIfEmpty=*/false);
@@ -1177,6 +1211,27 @@ Value *PHINode::hasConstantValue() const {
llvm::Value *LLVMV = cast<llvm::PHINode>(Val)->hasConstantValue();
return LLVMV != nullptr ? Ctx.getValue(LLVMV) : nullptr;
}
+void PHINode::replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
+ assert(New && Old && "Sandbox IR PHI node got a null basic block!");
+ for (unsigned Idx = 0, NumOps = cast<llvm::PHINode>(Val)->getNumOperands();
+ Idx != NumOps; ++Idx)
+ if (getIncomingBlock(Idx) == Old)
+ setIncomingBlock(Idx, New);
+}
+void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate) {
+ // Avoid duplicate tracking by going through this->removeIncomingValue here at
+ // the expense of some performance. Copy PHI::removeIncomingValueIf more
+ // directly if performance becomes an issue.
+
+ // Removing the element at index X, moves the element previously at X + 1
+ // to X. Working from the end avoids complications from that.
+ unsigned Idx = getNumIncomingValues();
+ while (Idx > 0) {
+ if (Predicate(Idx - 1))
+ removeIncomingValue(Idx - 1);
+ --Idx;
+ }
+}
static llvm::Instruction::CastOps getLLVMCastOp(Instruction::Opcode Opc) {
switch (Opc) {
@@ -1212,6 +1267,69 @@ static llvm::Instruction::CastOps getLLVMCastOp(Instruction::Opcode Opc) {
}
}
+AllocaInst *AllocaInst::create(Type *Ty, unsigned AddrSpace, BBIterator WhereIt,
+ BasicBlock *WhereBB, Context &Ctx,
+ Value *ArraySize, const Twine &Name) {
+ auto &Builder = Ctx.getLLVMIRBuilder();
+ if (WhereIt == WhereBB->end())
+ Builder.SetInsertPoint(cast<llvm::BasicBlock>(WhereBB->Val));
+ else
+ Builder.SetInsertPoint((*WhereIt).getTopmostLLVMInstruction());
+ auto *NewAlloca = Builder.CreateAlloca(Ty, AddrSpace, ArraySize->Val, Name);
+ return Ctx.createAllocaInst(NewAlloca);
+}
+
+AllocaInst *AllocaInst::create(Type *Ty, unsigned AddrSpace,
+ Instruction *InsertBefore, Context &Ctx,
+ Value *ArraySize, const Twine &Name) {
+ return create(Ty, AddrSpace, InsertBefore->getIterator(),
+ InsertBefore->getParent(), Ctx, ArraySize, Name);
+}
+
+AllocaInst *AllocaInst::create(Type *Ty, unsigned AddrSpace,
+ BasicBlock *InsertAtEnd, Context &Ctx,
+ Value *ArraySize, const Twine &Name) {
+ return create(Ty, AddrSpace, InsertAtEnd->end(), InsertAtEnd, Ctx, ArraySize,
+ Name);
+}
+
+void AllocaInst::setAllocatedType(Type *Ty) {
+ auto &Tracker = Ctx.getTracker();
+ if (Tracker.isTracking())
+ Tracker.track(std::make_unique<AllocaSetAllocatedType>(this, Tracker));
+ cast<llvm::AllocaInst>(Val)->setAllocatedType(Ty);
+}
+
+void AllocaInst::setAlignment(Align Align) {
+ auto &Tracker = Ctx.getTracker();
+ if (Tracker.isTracking())
+ Tracker.track(std::make_unique<AllocaSetAlignment>(this, Tracker));
+ cast<llvm::AllocaInst>(Val)->setAlignment(Align);
+}
+
+void AllocaInst::setUsedWithInAlloca(bool V) {
+ auto &Tracker = Ctx.getTracker();
+ if (Tracker.isTracking())
+ Tracker.track(std::make_unique<AllocaSetUsedWithInAlloca>(this, Tracker));
+ cast<llvm::AllocaInst>(Val)->setUsedWithInAlloca(V);
+}
+
+Value *AllocaInst::getArraySize() {
+ return Ctx.getValue(cast<llvm::AllocaInst>(Val)->getArraySize());
+}
+
+#ifndef NDEBUG
+void AllocaInst::dump(raw_ostream &OS) const {
+ dumpCommonPrefix(OS);
+ dumpCommonSuffix(OS);
+}
+
+void AllocaInst::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
Value *CastInst::create(Type *DestTy, Opcode Op, Value *Operand,
BBIterator WhereIt, BasicBlock *WhereBB, Context &Ctx,
const Twine &Name) {
@@ -1452,6 +1570,11 @@ Value *Context::getOrCreateValueInternal(llvm::Value *LLVMV, llvm::User *U) {
new GetElementPtrInst(LLVMGEP, *this));
return It->second.get();
}
+ case llvm::Instruction::Alloca: {
+ auto *LLVMAlloca = cast<llvm::AllocaInst>(LLVMV);
+ It->second = std::unique_ptr<AllocaInst>(new AllocaInst(LLVMAlloca, *this));
+ return It->second.get();
+ }
case llvm::Instruction::ZExt:
case llvm::Instruction::SExt:
case llvm::Instruction::FPToUI:
@@ -1474,6 +1597,12 @@ Value *Context::getOrCreateValueInternal(llvm::Value *LLVMV, llvm::User *U) {
It->second = std::unique_ptr<PHINode>(new PHINode(LLVMPhi, *this));
return It->second.get();
}
+ case llvm::Instruction::Unreachable: {
+ auto *LLVMUnreachable = cast<llvm::UnreachableInst>(LLVMV);
+ It->second = std::unique_ptr<UnreachableInst>(
+ new UnreachableInst(LLVMUnreachable, *this));
+ return It->second.get();
+ }
default:
break;
}
@@ -1532,13 +1661,22 @@ CallBrInst *Context::createCallBrInst(llvm::CallBrInst *I) {
return cast<CallBrInst>(registerValue(std::move(NewPtr)));
}
+UnreachableInst *Context::createUnreachableInst(llvm::UnreachableInst *UI) {
+ auto NewPtr =
+ std::unique_ptr<UnreachableInst>(new UnreachableInst(UI, *this));
+ return cast<UnreachableInst>(registerValue(std::move(NewPtr)));
+}
+
GetElementPtrInst *
Context::createGetElementPtrInst(llvm::GetElementPtrInst *I) {
auto NewPtr =
std::unique_ptr<GetElementPtrInst>(new GetElementPtrInst(I, *this));
return cast<GetElementPtrInst>(registerValue(std::move(NewPtr)));
}
-
+AllocaInst *Context::createAllocaInst(llvm::AllocaInst *I) {
+ auto NewPtr = std::unique_ptr<AllocaInst>(new AllocaInst(I, *this));
+ return cast<AllocaInst>(registerValue(std::move(NewPtr)));
+}
CastInst *Context::createCastInst(llvm::CastInst *I) {
auto NewPtr = std::unique_ptr<CastInst>(new CastInst(I, *this));
return cast<CastInst>(registerValue(std::move(NewPtr)));
diff --git a/llvm/lib/SandboxIR/Tracker.cpp b/llvm/lib/SandboxIR/Tracker.cpp
index aa18c21..90c48a3 100644
--- a/llvm/lib/SandboxIR/Tracker.cpp
+++ b/llvm/lib/SandboxIR/Tracker.cpp
@@ -204,6 +204,46 @@ void RemoveFromParent::dump() const {
}
#endif
+AllocaSetAllocatedType::AllocaSetAllocatedType(AllocaInst *Alloca,
+ Tracker &Tracker)
+ : IRChangeBase(Tracker), Alloca(Alloca),
+ OrigType(Alloca->getAllocatedType()) {}
+
+void AllocaSetAllocatedType::revert() { Alloca->setAllocatedType(OrigType); }
+
+#ifndef NDEBUG
+void AllocaSetAllocatedType::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
+AllocaSetAlignment::AllocaSetAlignment(AllocaInst *Alloca, Tracker &Tracker)
+ : IRChangeBase(Tracker), Alloca(Alloca), OrigAlign(Alloca->getAlign()) {}
+
+void AllocaSetAlignment::revert() { Alloca->setAlignment(OrigAlign); }
+
+#ifndef NDEBUG
+void AllocaSetAlignment::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
+AllocaSetUsedWithInAlloca::AllocaSetUsedWithInAlloca(AllocaInst *Alloca,
+ Tracker &Tracker)
+ : IRChangeBase(Tracker), Alloca(Alloca),
+ Orig(Alloca->isUsedWithInAlloca()) {}
+
+void AllocaSetUsedWithInAlloca::revert() { Alloca->setUsedWithInAlloca(Orig); }
+
+#ifndef NDEBUG
+void AllocaSetUsedWithInAlloca::dump() const {
+ dump(dbgs());
+ dbgs() << "\n";
+}
+#endif // NDEBUG
+
CallBrInstSetDefaultDest::CallBrInstSetDefaultDest(CallBrInst *CallBr,
Tracker &Tracker)
: IRChangeBase(Tracker), CallBr(CallBr) {
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index b51c056..040a47f 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -113,6 +113,8 @@ public:
void emitFunctionEntryLabel() override;
+ void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
+
void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
void LowerHardenedBRJumpTable(const MachineInstr &MI);
@@ -1280,6 +1282,23 @@ void AArch64AsmPrinter::emitFunctionEntryLabel() {
}
}
+void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
+ const Constant *CV) {
+ if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
+ if (CPA->hasAddressDiscriminator() &&
+ !CPA->hasSpecialAddressDiscriminator(
+ ConstantPtrAuth::AddrDiscriminator_CtorsDtors))
+ report_fatal_error(
+ "unexpected address discrimination value for ctors/dtors entry, only "
+ "'ptr inttoptr (i64 1 to ptr)' is allowed");
+ // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
+ // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
+ // actual address discrimination value and only checks
+ // hasAddressDiscriminator(), so it's OK to leave special address
+ // discrimination value here.
+ AsmPrinter::emitXXStructor(DL, CV);
+}
+
void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
const GlobalAlias &GA) {
if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
@@ -2142,6 +2161,10 @@ void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
};
const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
+ const bool IsELFSignedGOT = MI.getParent()
+ ->getParent()
+ ->getInfo<AArch64FunctionInfo>()
+ ->hasELFSignedGOT();
MachineOperand GAOp = MI.getOperand(0);
const uint64_t KeyC = MI.getOperand(1).getImm();
assert(KeyC <= AArch64PACKey::LAST &&
@@ -2158,9 +2181,16 @@ void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
// Emit:
// target materialization:
// - via GOT:
- // adrp x16, :got:target
- // ldr x16, [x16, :got_lo12:target]
- // add offset to x16 if offset != 0
+ // - unsigned GOT:
+ // adrp x16, :got:target
+ // ldr x16, [x16, :got_lo12:target]
+ // add offset to x16 if offset != 0
+ // - ELF signed GOT:
+ // adrp x17, :got:target
+ // add x17, x17, :got_auth_lo12:target
+ // ldr x16, [x17]
+ // aut{i|d}a x16, x17
+ // add offset to x16 if offset != 0
//
// - direct:
// adrp x16, target
@@ -2203,13 +2233,40 @@ void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
EmitAndIncrement(
- MCInstBuilder(AArch64::ADRP).addReg(AArch64::X16).addOperand(GAMCHi));
+ MCInstBuilder(AArch64::ADRP)
+ .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
+ .addOperand(GAMCHi));
if (IsGOTLoad) {
- EmitAndIncrement(MCInstBuilder(AArch64::LDRXui)
- .addReg(AArch64::X16)
- .addReg(AArch64::X16)
- .addOperand(GAMCLo));
+ if (IsELFSignedGOT) {
+ EmitAndIncrement(MCInstBuilder(AArch64::ADDXri)
+ .addReg(AArch64::X17)
+ .addReg(AArch64::X17)
+ .addOperand(GAMCLo)
+ .addImm(0));
+
+ EmitAndIncrement(MCInstBuilder(AArch64::LDRXui)
+ .addReg(AArch64::X16)
+ .addReg(AArch64::X17)
+ .addImm(0));
+
+ assert(GAOp.isGlobal());
+ assert(GAOp.getGlobal()->getValueType() != nullptr);
+ unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
+ ? AArch64::AUTIA
+ : AArch64::AUTDA;
+
+ EmitAndIncrement(MCInstBuilder(AuthOpcode)
+ .addReg(AArch64::X16)
+ .addReg(AArch64::X16)
+ .addReg(AArch64::X17));
+
+ } else {
+ EmitAndIncrement(MCInstBuilder(AArch64::LDRXui)
+ .addReg(AArch64::X16)
+ .addReg(AArch64::X16)
+ .addOperand(GAMCLo));
+ }
} else {
EmitAndIncrement(MCInstBuilder(AArch64::ADDXri)
.addReg(AArch64::X16)
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 2bc14f9..161cf24 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -108,6 +108,10 @@ static bool atomicReadDroppedOnZero(unsigned Opcode) {
case AArch64::LDUMINW: case AArch64::LDUMINX:
case AArch64::LDUMINLB: case AArch64::LDUMINLH:
case AArch64::LDUMINLW: case AArch64::LDUMINLX:
+ case AArch64::SWPB: case AArch64::SWPH:
+ case AArch64::SWPW: case AArch64::SWPX:
+ case AArch64::SWPLB: case AArch64::SWPLH:
+ case AArch64::SWPLW: case AArch64::SWPLX:
return true;
}
return false;
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 9b7fc22..72c7672 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -1291,7 +1291,40 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MI.eraseFromParent();
return true;
}
+ case AArch64::LOADgotAUTH: {
+ Register DstReg = MI.getOperand(0).getReg();
+ const MachineOperand &MO1 = MI.getOperand(1);
+
+ MachineOperand GAHiOp(MO1);
+ MachineOperand GALoOp(MO1);
+ GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
+ GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+
+ DebugLoc DL = MI.getDebugLoc();
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), AArch64::X16)
+ .add(GAHiOp);
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), AArch64::X16)
+ .addReg(AArch64::X16)
+ .add(GALoOp)
+ .addImm(0);
+
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui), DstReg)
+ .addReg(AArch64::X16)
+ .addImm(0);
+
+ assert(MO1.isGlobal());
+ assert(MO1.getGlobal()->getValueType() != nullptr);
+ unsigned AuthOpcode = MO1.getGlobal()->getValueType()->isFunctionTy()
+ ? AArch64::AUTIA
+ : AArch64::AUTDA;
+ BuildMI(MBB, MBBI, DL, TII->get(AuthOpcode), DstReg)
+ .addReg(DstReg)
+ .addReg(AArch64::X16);
+
+ MI.eraseFromParent();
+ return true;
+ }
case AArch64::LOADgot: {
MachineFunction *MF = MBB.getParent();
Register DstReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index cbf38f2..4487d34a 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -453,6 +453,9 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
if (!Subtarget->useSmallAddressing() && !Subtarget->isTargetMachO())
return 0;
+ if (FuncInfo.MF->getInfo<AArch64FunctionInfo>()->hasELFSignedGOT())
+ return 0;
+
unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, TM);
EVT DestEVT = TLI.getValueType(DL, GV->getType(), true);
diff --git a/llvm/lib/Target/AArch64/AArch64Features.td b/llvm/lib/Target/AArch64/AArch64Features.td
index a1ae087..2aa74de 100644
--- a/llvm/lib/Target/AArch64/AArch64Features.td
+++ b/llvm/lib/Target/AArch64/AArch64Features.td
@@ -186,7 +186,7 @@ def FeatureJS : ExtensionWithMArch<"jsconv", "JS", "FEAT_JSCVT",
[FeatureFPARMv8]>;
def FeatureFPAC : Extension<"fpac", "FPAC", "FEAT_FPAC",
- "Enable v8.3-A Pointer Authentication Faulting enhancement">;
+ "Enable Armv8.3-A Pointer Authentication Faulting enhancement">;
def FeatureCCIDX : Extension<"ccidx", "CCIDX", "FEAT_CCIDX",
"Enable Armv8.3-A Extend of the CCSIDR number of sets">;
@@ -435,8 +435,11 @@ def FeatureMEC : Extension<"mec", "MEC", "FEAT_MEC",
def FeatureSVE2p1: ExtensionWithMArch<"sve2p1", "SVE2p1", "FEAT_SVE2p1",
"Enable Scalable Vector Extension 2.1 instructions", [FeatureSVE2]>;
-def FeatureB16B16 : ExtensionWithMArch<"b16b16", "B16B16", "FEAT_SVE_B16B16",
- "Enable SVE2.1 or SME2.1 non-widening BFloat16 to BFloat16 instructions", [FeatureBF16]>;
+def FeatureB16B16 : ExtensionWithMArch<"b16b16", "B16B16", "FEAT_B16B16",
+ "Enable SME2.1 ZA-targeting non-widening BFloat16 to BFloat16 instructions", [FeatureBF16]>;
+
+def FeatureSVEB16B16: ExtensionWithMArch<"sve-b16b16", "SVEB16B16", "FEAT_SVE_B16B16",
+ "Enable SVE2.1 non-widening and SME2.1 Z-targeting non-widening BFloat16 to BFloat16 instructions">;
def FeatureSMEF16F16 : ExtensionWithMArch<"sme-f16f16", "SMEF16F16", "FEAT_SME_F16F16",
"Enable SME non-widening Float16 instructions", [FeatureSME2]>;
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index f28511c..bf0eb14 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -240,6 +240,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
@@ -275,6 +276,10 @@ cl::opt<bool> EnableHomogeneousPrologEpilog(
// Stack hazard padding size. 0 = disabled.
static cl::opt<unsigned> StackHazardSize("aarch64-stack-hazard-size",
cl::init(0), cl::Hidden);
+// Stack hazard size for analysis remarks. StackHazardSize takes precedence.
+static cl::opt<unsigned>
+ StackHazardRemarkSize("aarch64-stack-hazard-remark-size", cl::init(0),
+ cl::Hidden);
// Whether to insert padding into non-streaming functions (for testing).
static cl::opt<bool>
StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming",
@@ -2616,9 +2621,16 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
const auto &MFI = MF.getFrameInfo();
int64_t ObjectOffset = MFI.getObjectOffset(FI);
+ StackOffset SVEStackSize = getSVEStackSize(MF);
+
+ // For VLA-area objects, just emit an offset at the end of the stack frame.
+ // Whilst not quite correct, these objects do live at the end of the frame and
+ // so it is more useful for analysis for the offset to reflect this.
+ if (MFI.isVariableSizedObjectIndex(FI)) {
+ return StackOffset::getFixed(-((int64_t)MFI.getStackSize())) - SVEStackSize;
+ }
// This is correct in the absence of any SVE stack objects.
- StackOffset SVEStackSize = getSVEStackSize(MF);
if (!SVEStackSize)
return StackOffset::getFixed(ObjectOffset - getOffsetOfLocalArea());
@@ -3529,13 +3541,9 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
return true;
}
-// Return the FrameID for a Load/Store instruction by looking at the MMO.
-static std::optional<int> getLdStFrameID(const MachineInstr &MI,
- const MachineFrameInfo &MFI) {
- if (!MI.mayLoadOrStore() || MI.getNumMemOperands() < 1)
- return std::nullopt;
-
- MachineMemOperand *MMO = *MI.memoperands_begin();
+// Return the FrameID for a MMO.
+static std::optional<int> getMMOFrameID(MachineMemOperand *MMO,
+ const MachineFrameInfo &MFI) {
auto *PSV =
dyn_cast_or_null<FixedStackPseudoSourceValue>(MMO->getPseudoValue());
if (PSV)
@@ -3553,6 +3561,15 @@ static std::optional<int> getLdStFrameID(const MachineInstr &MI,
return std::nullopt;
}
+// Return the FrameID for a Load/Store instruction by looking at the first MMO.
+static std::optional<int> getLdStFrameID(const MachineInstr &MI,
+ const MachineFrameInfo &MFI) {
+ if (!MI.mayLoadOrStore() || MI.getNumMemOperands() < 1)
+ return std::nullopt;
+
+ return getMMOFrameID(*MI.memoperands_begin(), MFI);
+}
+
// Check if a Hazard slot is needed for the current function, and if so create
// one for it. The index is stored in AArch64FunctionInfo->StackHazardSlotIndex,
// which can be used to determine if any hazard padding is needed.
@@ -5030,3 +5047,174 @@ void AArch64FrameLowering::inlineStackProbe(MachineFunction &MF,
MI->eraseFromParent();
}
}
+
+struct StackAccess {
+ enum AccessType {
+ NotAccessed = 0, // Stack object not accessed by load/store instructions.
+ GPR = 1 << 0, // A general purpose register.
+ PPR = 1 << 1, // A predicate register.
+ FPR = 1 << 2, // A floating point/Neon/SVE register.
+ };
+
+ int Idx;
+ StackOffset Offset;
+ int64_t Size;
+ unsigned AccessTypes;
+
+ StackAccess() : Idx(0), Offset(), Size(0), AccessTypes(NotAccessed) {}
+
+ bool operator<(const StackAccess &Rhs) const {
+ return std::make_tuple(start(), Idx) <
+ std::make_tuple(Rhs.start(), Rhs.Idx);
+ }
+
+ bool isCPU() const {
+ // Predicate register load and store instructions execute on the CPU.
+ return AccessTypes & (AccessType::GPR | AccessType::PPR);
+ }
+ bool isSME() const { return AccessTypes & AccessType::FPR; }
+ bool isMixed() const { return isCPU() && isSME(); }
+
+ int64_t start() const { return Offset.getFixed() + Offset.getScalable(); }
+ int64_t end() const { return start() + Size; }
+
+ std::string getTypeString() const {
+ switch (AccessTypes) {
+ case AccessType::FPR:
+ return "FPR";
+ case AccessType::PPR:
+ return "PPR";
+ case AccessType::GPR:
+ return "GPR";
+ case AccessType::NotAccessed:
+ return "NA";
+ default:
+ return "Mixed";
+ }
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << getTypeString() << " stack object at [SP"
+ << (Offset.getFixed() < 0 ? "" : "+") << Offset.getFixed();
+ if (Offset.getScalable())
+ OS << (Offset.getScalable() < 0 ? "" : "+") << Offset.getScalable()
+ << " * vscale";
+ OS << "]";
+ }
+};
+
+static inline raw_ostream &operator<<(raw_ostream &OS, const StackAccess &SA) {
+ SA.print(OS);
+ return OS;
+}
+
+void AArch64FrameLowering::emitRemarks(
+ const MachineFunction &MF, MachineOptimizationRemarkEmitter *ORE) const {
+
+ SMEAttrs Attrs(MF.getFunction());
+ if (Attrs.hasNonStreamingInterfaceAndBody())
+ return;
+
+ const uint64_t HazardSize =
+ (StackHazardSize) ? StackHazardSize : StackHazardRemarkSize;
+
+ if (HazardSize == 0)
+ return;
+
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ // Bail if function has no stack objects.
+ if (!MFI.hasStackObjects())
+ return;
+
+ std::vector<StackAccess> StackAccesses(MFI.getNumObjects());
+
+ size_t NumFPLdSt = 0;
+ size_t NumNonFPLdSt = 0;
+
+ // Collect stack accesses via Load/Store instructions.
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ if (!MI.mayLoadOrStore() || MI.getNumMemOperands() < 1)
+ continue;
+ for (MachineMemOperand *MMO : MI.memoperands()) {
+ std::optional<int> FI = getMMOFrameID(MMO, MFI);
+ if (FI && !MFI.isDeadObjectIndex(*FI)) {
+ int FrameIdx = *FI;
+
+ size_t ArrIdx = FrameIdx + MFI.getNumFixedObjects();
+ if (StackAccesses[ArrIdx].AccessTypes == StackAccess::NotAccessed) {
+ StackAccesses[ArrIdx].Idx = FrameIdx;
+ StackAccesses[ArrIdx].Offset =
+ getFrameIndexReferenceFromSP(MF, FrameIdx);
+ StackAccesses[ArrIdx].Size = MFI.getObjectSize(FrameIdx);
+ }
+
+ unsigned RegTy = StackAccess::AccessType::GPR;
+ if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
+ if (AArch64::PPRRegClass.contains(MI.getOperand(0).getReg()))
+ RegTy = StackAccess::PPR;
+ else
+ RegTy = StackAccess::FPR;
+ } else if (AArch64InstrInfo::isFpOrNEON(MI)) {
+ RegTy = StackAccess::FPR;
+ }
+
+ StackAccesses[ArrIdx].AccessTypes |= RegTy;
+
+ if (RegTy == StackAccess::FPR)
+ ++NumFPLdSt;
+ else
+ ++NumNonFPLdSt;
+ }
+ }
+ }
+ }
+
+ if (NumFPLdSt == 0 || NumNonFPLdSt == 0)
+ return;
+
+ llvm::sort(StackAccesses);
+ StackAccesses.erase(llvm::remove_if(StackAccesses,
+ [](const StackAccess &S) {
+ return S.AccessTypes ==
+ StackAccess::NotAccessed;
+ }),
+ StackAccesses.end());
+
+ SmallVector<const StackAccess *> MixedObjects;
+ SmallVector<std::pair<const StackAccess *, const StackAccess *>> HazardPairs;
+
+ if (StackAccesses.front().isMixed())
+ MixedObjects.push_back(&StackAccesses.front());
+
+ for (auto It = StackAccesses.begin(), End = std::prev(StackAccesses.end());
+ It != End; ++It) {
+ const auto &First = *It;
+ const auto &Second = *(It + 1);
+
+ if (Second.isMixed())
+ MixedObjects.push_back(&Second);
+
+ if ((First.isSME() && Second.isCPU()) ||
+ (First.isCPU() && Second.isSME())) {
+ uint64_t Distance = static_cast<uint64_t>(Second.start() - First.end());
+ if (Distance < HazardSize)
+ HazardPairs.emplace_back(&First, &Second);
+ }
+ }
+
+ auto EmitRemark = [&](llvm::StringRef Str) {
+ ORE->emit([&]() {
+ auto R = MachineOptimizationRemarkAnalysis(
+ "sme", "StackHazard", MF.getFunction().getSubprogram(), &MF.front());
+ return R << formatv("stack hazard in '{0}': ", MF.getName()).str() << Str;
+ });
+ };
+
+ for (const auto &P : HazardPairs)
+ EmitRemark(formatv("{0} is too close to {1}", *P.first, *P.second).str());
+
+ for (const auto *Obj : MixedObjects)
+ EmitRemark(
+ formatv("{0} accessed by both GP and FP instructions", *Obj).str());
+}
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 0ebab17..c197312 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -13,8 +13,9 @@
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
-#include "llvm/Support/TypeSize.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/Support/TypeSize.h"
namespace llvm {
@@ -178,6 +179,9 @@ private:
inlineStackProbeLoopExactMultiple(MachineBasicBlock::iterator MBBI,
int64_t NegProbeSize,
Register TargetReg) const;
+
+ void emitRemarks(const MachineFunction &MF,
+ MachineOptimizationRemarkEmitter *ORE) const override;
};
} // End llvm namespace
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7704321..9413073 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -9225,6 +9225,11 @@ SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
// FIXME: Once remat is capable of dealing with instructions with register
// operands, expand this into two nodes instead of using a wrapper node.
+ if (DAG.getMachineFunction()
+ .getInfo<AArch64FunctionInfo>()
+ ->hasELFSignedGOT())
+ return SDValue(DAG.getMachineNode(AArch64::LOADgotAUTH, DL, Ty, GotAddr),
+ 0);
return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
}
@@ -21769,6 +21774,7 @@ static SDValue performExtendCombine(SDNode *N,
// helps the backend to decide that an sabdl2 would be useful, saving a real
// extract_high operation.
if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
+ N->getOperand(0).getValueType().is64BitVector() &&
(N->getOperand(0).getOpcode() == ISD::ABDU ||
N->getOperand(0).getOpcode() == ISD::ABDS)) {
SDNode *ABDNode = N->getOperand(0).getNode();
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index e720c6b..1e5c5e2 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -143,6 +143,8 @@ def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">,
"fuse-aes">;
def HasSVE : Predicate<"Subtarget->isSVEAvailable()">,
AssemblerPredicateWithAll<(all_of FeatureSVE), "sve">;
+def HasSVEB16B16 : Predicate<"Subtarget->hasSVEB16B16()">,
+ AssemblerPredicateWithAll<(all_of FeatureSVEB16B16), "sve-b16b16">;
def HasSVE2 : Predicate<"Subtarget->isSVEAvailable() && Subtarget->hasSVE2()">,
AssemblerPredicateWithAll<(all_of FeatureSVE2), "sve2">;
def HasSVE2p1 : Predicate<"Subtarget->isSVEAvailable() && Subtarget->hasSVE2p1()">,
@@ -1872,7 +1874,7 @@ let Predicates = [HasPAuth] in {
Sched<[WriteI, ReadI]> {
let isReMaterializable = 1;
let isCodeGenOnly = 1;
- let Size = 40; // 12 fixed + 28 variable, for pointer offset, and discriminator
+ let Size = 48; // 12 fixed + 36 variable, for pointer offset, and discriminator
let Defs = [X16,X17];
}
@@ -1911,6 +1913,11 @@ let Predicates = [HasPAuth] in {
tcGPR64:$AddrDisc),
(AUTH_TCRETURN_BTI tcGPRx16x17:$dst, imm:$FPDiff, imm:$Key,
imm:$Disc, tcGPR64:$AddrDisc)>;
+
+ def LOADgotAUTH : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr), []>,
+ Sched<[WriteI, ReadI]> {
+ let Defs = [X16];
+ }
}
// v9.5-A pointer authentication extensions
diff --git a/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp b/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp
index 4867224..9f234b0 100644
--- a/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "AArch64MCInstLower.h"
+#include "AArch64MachineFunctionInfo.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/CodeGen/AsmPrinter.h"
@@ -185,9 +186,12 @@ MCOperand AArch64MCInstLower::lowerSymbolOperandELF(const MachineOperand &MO,
MCSymbol *Sym) const {
uint32_t RefFlags = 0;
- if (MO.getTargetFlags() & AArch64II::MO_GOT)
- RefFlags |= AArch64MCExpr::VK_GOT;
- else if (MO.getTargetFlags() & AArch64II::MO_TLS) {
+ if (MO.getTargetFlags() & AArch64II::MO_GOT) {
+ const MachineFunction *MF = MO.getParent()->getParent()->getParent();
+ RefFlags |= (MF->getInfo<AArch64FunctionInfo>()->hasELFSignedGOT()
+ ? AArch64MCExpr::VK_GOT_AUTH
+ : AArch64MCExpr::VK_GOT);
+ } else if (MO.getTargetFlags() & AArch64II::MO_TLS) {
TLSModel::Model Model;
if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
index e96c5a9..a0f0a48 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
@@ -16,6 +16,7 @@
#include "AArch64MachineFunctionInfo.h"
#include "AArch64InstrInfo.h"
#include "AArch64Subtarget.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
@@ -72,6 +73,29 @@ static bool ShouldSignWithBKey(const Function &F, const AArch64Subtarget &STI) {
return Key == "b_key";
}
+// Determine if we need to treat pointers in GOT as signed (as described in
+// https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#appendix-signed-got)
+// based on PAuth core info encoded as "aarch64-elf-pauthabi-platform" and
+// "aarch64-elf-pauthabi-version" module flags. Currently, only
+// AARCH64_PAUTH_PLATFORM_LLVM_LINUX platform supports signed GOT with
+// AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT bit in version value set.
+static bool hasELFSignedGOTHelper(const Function &F,
+ const AArch64Subtarget *STI) {
+ if (!Triple(STI->getTargetTriple()).isOSBinFormatELF())
+ return false;
+ const Module *M = F.getParent();
+ const auto *PAP = mdconst::extract_or_null<ConstantInt>(
+ M->getModuleFlag("aarch64-elf-pauthabi-platform"));
+ if (!PAP || PAP->getZExtValue() != ELF::AARCH64_PAUTH_PLATFORM_LLVM_LINUX)
+ return false;
+ const auto *PAV = mdconst::extract_or_null<ConstantInt>(
+ M->getModuleFlag("aarch64-elf-pauthabi-version"));
+ if (!PAV)
+ return false;
+ return PAV->getZExtValue() &
+ (1 << ELF::AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT);
+}
+
AArch64FunctionInfo::AArch64FunctionInfo(const Function &F,
const AArch64Subtarget *STI) {
// If we already know that the function doesn't have a redzone, set
@@ -80,6 +104,7 @@ AArch64FunctionInfo::AArch64FunctionInfo(const Function &F,
HasRedZone = false;
std::tie(SignReturnAddress, SignReturnAddressAll) = GetSignReturnAddress(F);
SignWithBKey = ShouldSignWithBKey(F, *STI);
+ HasELFSignedGOT = hasELFSignedGOTHelper(F, STI);
// TODO: skip functions that have no instrumented allocas for optimization
IsMTETagged = F.hasFnAttribute(Attribute::SanitizeMemTag);
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 72f110c..9ae4584 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -177,6 +177,14 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
/// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
bool SignWithBKey = false;
+ /// HasELFSignedGOT is true if the target binary format is ELF and the IR
+ /// module containing the corresponding function has the following flags:
+ /// - aarch64-elf-pauthabi-platform flag equal to
+ /// AARCH64_PAUTH_PLATFORM_LLVM_LINUX;
+ /// - aarch64-elf-pauthabi-version flag with
+ /// AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT bit set.
+ bool HasELFSignedGOT = false;
+
/// SigningInstrOffset captures the offset of the PAC-RET signing instruction
/// within the prologue, so it can be re-used for authentication in the
/// epilogue when using PC as a second salt (FEAT_PAuth_LR)
@@ -509,6 +517,8 @@ public:
bool shouldSignWithBKey() const { return SignWithBKey; }
+ bool hasELFSignedGOT() const { return HasELFSignedGOT; }
+
MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
diff --git a/llvm/lib/Target/AArch64/AArch64Processors.td b/llvm/lib/Target/AArch64/AArch64Processors.td
index 71384a2..52b5c8a 100644
--- a/llvm/lib/Target/AArch64/AArch64Processors.td
+++ b/llvm/lib/Target/AArch64/AArch64Processors.td
@@ -863,22 +863,25 @@ def ProcessorFeatures {
list<SubtargetFeature> AppleA15 = [HasV8_6aOps, FeatureSHA2, FeatureAES, FeatureFPARMv8,
FeatureNEON, FeaturePerfMon, FeatureSHA3,
FeatureFullFP16, FeatureFP16FML,
- FeatureComplxNum, FeatureCRC, FeatureJS, FeatureLSE,
- FeaturePAuth, FeatureRAS, FeatureRCPC, FeatureRDM,
+ FeatureComplxNum, FeatureCRC, FeatureJS,
+ FeatureLSE, FeaturePAuth, FeatureFPAC,
+ FeatureRAS, FeatureRCPC, FeatureRDM,
FeatureBF16, FeatureDotProd, FeatureMatMulInt8];
list<SubtargetFeature> AppleA16 = [HasV8_6aOps, FeatureSHA2, FeatureAES, FeatureFPARMv8,
FeatureNEON, FeaturePerfMon, FeatureSHA3,
FeatureFullFP16, FeatureFP16FML,
FeatureHCX,
- FeatureComplxNum, FeatureCRC, FeatureJS, FeatureLSE,
- FeaturePAuth, FeatureRAS, FeatureRCPC, FeatureRDM,
+ FeatureComplxNum, FeatureCRC, FeatureJS,
+ FeatureLSE, FeaturePAuth, FeatureFPAC,
+ FeatureRAS, FeatureRCPC, FeatureRDM,
FeatureBF16, FeatureDotProd, FeatureMatMulInt8];
list<SubtargetFeature> AppleA17 = [HasV8_6aOps, FeatureSHA2, FeatureAES, FeatureFPARMv8,
FeatureNEON, FeaturePerfMon, FeatureSHA3,
FeatureFullFP16, FeatureFP16FML,
FeatureHCX,
- FeatureComplxNum, FeatureCRC, FeatureJS, FeatureLSE,
- FeaturePAuth, FeatureRAS, FeatureRCPC, FeatureRDM,
+ FeatureComplxNum, FeatureCRC, FeatureJS,
+ FeatureLSE, FeaturePAuth, FeatureFPAC,
+ FeatureRAS, FeatureRCPC, FeatureRDM,
FeatureBF16, FeatureDotProd, FeatureMatMulInt8];
list<SubtargetFeature> AppleM4 = [HasV9_2aOps, FeatureSHA2, FeatureFPARMv8,
FeatureNEON, FeaturePerfMon, FeatureSHA3,
@@ -886,8 +889,9 @@ def ProcessorFeatures {
FeatureAES, FeatureBF16,
FeatureSME, FeatureSME2,
FeatureSMEF64F64, FeatureSMEI16I64,
- FeatureComplxNum, FeatureCRC, FeatureJS, FeatureLSE,
- FeaturePAuth, FeatureRAS, FeatureRCPC, FeatureRDM,
+ FeatureComplxNum, FeatureCRC, FeatureJS,
+ FeatureLSE, FeaturePAuth, FeatureFPAC,
+ FeatureRAS, FeatureRCPC, FeatureRDM,
FeatureDotProd, FeatureMatMulInt8];
list<SubtargetFeature> ExynosM3 = [HasV8_0aOps, FeatureCRC, FeatureSHA2, FeatureAES,
FeaturePerfMon, FeatureNEON, FeatureFPARMv8];
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index 709a98d..22de9e1 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -857,6 +857,7 @@ defm FMOPA_MPPZZ_H : sme2p1_fmop_tile_fp16<"fmopa", 0b0, 0b0, nxv8f16, int_aarch
defm FMOPS_MPPZZ_H : sme2p1_fmop_tile_fp16<"fmops", 0b0, 0b1, nxv8f16, int_aarch64_sme_mops>;
}
+// SME2 ZA-targeting non-widening BFloat16 instructions
let Predicates = [HasSME2, HasB16B16] in {
defm BFADD_VG2_M2Z_H : sme2_multivec_accum_add_sub_vg2<"bfadd", 0b1100, MatrixOp16, ZZ_h_mul_r, nxv8bf16, int_aarch64_sme_add_za16_vg1x2>;
defm BFADD_VG4_M4Z_H : sme2_multivec_accum_add_sub_vg4<"bfadd", 0b1100, MatrixOp16, ZZZZ_h_mul_r, nxv8bf16, int_aarch64_sme_add_za16_vg1x4>;
@@ -877,6 +878,12 @@ defm BFMLS_VG4_M4ZZ : sme2_dot_mla_add_sub_array_vg4_single<"bfmls", 0b1111101,
defm BFMLS_VG2_M2Z2Z : sme2_dot_mla_add_sub_array_vg2_multi<"bfmls", 0b1100011, MatrixOp16, ZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmls_vg1x2>;
defm BFMLS_VG4_M4Z4Z : sme2_dot_mla_add_sub_array_vg4_multi<"bfmls", 0b1100011, MatrixOp16, ZZZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmls_vg1x4>;
+defm BFMOPA_MPPZZ_H : sme2p1_fmop_tile_fp16<"bfmopa", 0b1, 0b0, nxv8bf16, int_aarch64_sme_mopa>;
+defm BFMOPS_MPPZZ_H : sme2p1_fmop_tile_fp16<"bfmops", 0b1, 0b1, nxv8bf16, int_aarch64_sme_mops>;
+}
+
+// SME2 Z-targeting non-widening BFloat16 instructions
+let Predicates = [HasSME2, HasSVEB16B16] in {
defm BFMAX_VG2_2ZZ : sme2p1_bf_max_min_vector_vg2_single<"bfmax", 0b0010000>;
defm BFMAX_VG4_4ZZ : sme2p1_bf_max_min_vector_vg4_single<"bfmax", 0b0010000>;
defm BFMAX_VG2_2Z2Z : sme2p1_bf_max_min_vector_vg2_multi<"bfmax", 0b0010000>;
@@ -899,9 +906,6 @@ defm BFMINNM_VG4_4Z2Z : sme2p1_bf_max_min_vector_vg4_multi<"bfminnm", 0b0010011
defm BFCLAMP_VG2_2ZZZ: sme2p1_bfclamp_vector_vg2_multi<"bfclamp">;
defm BFCLAMP_VG4_4ZZZ: sme2p1_bfclamp_vector_vg4_multi<"bfclamp">;
-
-defm BFMOPA_MPPZZ_H : sme2p1_fmop_tile_fp16<"bfmopa", 0b1, 0b0, nxv8bf16, int_aarch64_sme_mopa>;
-defm BFMOPS_MPPZZ_H : sme2p1_fmop_tile_fp16<"bfmops", 0b1, 0b1, nxv8bf16, int_aarch64_sme_mops>;
}
let Predicates = [HasSME2, HasFP8] in {
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 19c0301..d9a70b5 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -279,6 +279,10 @@ def AArch64fsub_m1 : PatFrags<(ops node:$pg, node:$op1, node:$op2), [
(int_aarch64_sve_fsub node:$pg, node:$op1, node:$op2),
(vselect node:$pg, (AArch64fsub_p (SVEAllActive), node:$op1, node:$op2), node:$op1)
]>;
+def AArch64fsubr_m1 : PatFrags<(ops node:$pg, node:$op1, node:$op2), [
+ (int_aarch64_sve_fsubr node:$pg, node:$op1, node:$op2),
+ (vselect node:$pg, (AArch64fsub_p (SVEAllActive), node:$op2, node:$op1), node:$op1)
+]>;
def AArch64shadd : PatFrags<(ops node:$pg, node:$op1, node:$op2),
[(int_aarch64_sve_shadd node:$pg, node:$op1, node:$op2),
@@ -423,6 +427,11 @@ def AArch64bic : PatFrags<(ops node:$op1, node:$op2),
def AArch64subr : PatFrag<(ops node:$op1, node:$op2),
(sub node:$op2, node:$op1)>;
+
+def AArch64subr_m1 : PatFrags<(ops node:$pg, node:$op1, node:$op2),
+ [(int_aarch64_sve_subr node:$pg, node:$op1, node:$op2),
+ (vselect node:$pg, (sub node:$op2, node:$op1), node:$op1)]>;
+
def AArch64mla_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
[(int_aarch64_sve_mla node:$pred, node:$op1, node:$op2, node:$op3),
(vselect node:$pred, (add node:$op1, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3)), node:$op1)]>;
@@ -529,7 +538,7 @@ let Predicates = [HasSVEorSME] in {
defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add", "ADD_ZPZZ", AArch64add_m1, DestructiveBinaryComm>;
defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", "SUB_ZPZZ", AArch64sub_m1, DestructiveBinaryCommWithRev, "SUBR_ZPmZ">;
- defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr", "SUBR_ZPZZ", int_aarch64_sve_subr, DestructiveBinaryCommWithRev, "SUB_ZPmZ", /*isReverseInstr*/ 1>;
+ defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr", "SUBR_ZPZZ", AArch64subr_m1, DestructiveBinaryCommWithRev, "SUB_ZPmZ", /*isReverseInstr*/ 1>;
defm ORR_ZPmZ : sve_int_bin_pred_log<0b000, "orr", "ORR_ZPZZ", AArch64orr_m1, DestructiveBinaryComm>;
defm EOR_ZPmZ : sve_int_bin_pred_log<0b001, "eor", "EOR_ZPZZ", AArch64eor_m1, DestructiveBinaryComm>;
@@ -685,7 +694,7 @@ let Predicates = [HasSVEorSME] in {
defm FADD_ZPmZ : sve_fp_2op_p_zds<0b0000, "fadd", "FADD_ZPZZ", AArch64fadd_m1, DestructiveBinaryComm>;
defm FSUB_ZPmZ : sve_fp_2op_p_zds<0b0001, "fsub", "FSUB_ZPZZ", AArch64fsub_m1, DestructiveBinaryCommWithRev, "FSUBR_ZPmZ">;
defm FMUL_ZPmZ : sve_fp_2op_p_zds<0b0010, "fmul", "FMUL_ZPZZ", AArch64fmul_m1, DestructiveBinaryComm>;
- defm FSUBR_ZPmZ : sve_fp_2op_p_zds<0b0011, "fsubr", "FSUBR_ZPZZ", int_aarch64_sve_fsubr, DestructiveBinaryCommWithRev, "FSUB_ZPmZ", /*isReverseInstr*/ 1>;
+ defm FSUBR_ZPmZ : sve_fp_2op_p_zds<0b0011, "fsubr", "FSUBR_ZPZZ", AArch64fsubr_m1, DestructiveBinaryCommWithRev, "FSUB_ZPmZ", /*isReverseInstr*/ 1>;
defm FMAXNM_ZPmZ : sve_fp_2op_p_zds<0b0100, "fmaxnm", "FMAXNM_ZPZZ", AArch64fmaxnm_m1, DestructiveBinaryComm>;
defm FMINNM_ZPmZ : sve_fp_2op_p_zds<0b0101, "fminnm", "FMINNM_ZPZZ", AArch64fminnm_m1, DestructiveBinaryComm>;
defm FMAX_ZPmZ : sve_fp_2op_p_zds<0b0110, "fmax", "FMAX_ZPZZ", AArch64fmax_m1, DestructiveBinaryComm>;
@@ -4101,7 +4110,7 @@ def : InstAlias<"pfalse\t$Pd", (PFALSE PPRorPNR8:$Pd), 0>;
// Non-widening BFloat16 to BFloat16 instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasSVE2orSME2, HasB16B16, UseExperimentalZeroingPseudos] in {
+let Predicates = [HasSVE2orSME2, HasSVEB16B16, UseExperimentalZeroingPseudos] in {
defm BFADD_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fadd>;
defm BFSUB_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fsub>;
defm BFMUL_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fmul>;
@@ -4109,9 +4118,9 @@ defm BFMAXNM_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fmaxnm>;
defm BFMINNM_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fminnm>;
defm BFMIN_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fmin>;
defm BFMAX_ZPZZ : sve2p1_bf_2op_p_zds_zeroing<int_aarch64_sve_fmax>;
-} // HasSVE2orSME2, HasB16B16, UseExperimentalZeroingPseudos
+} // HasSVE2orSME2, HasSVEB16B16, UseExperimentalZeroingPseudos
-let Predicates = [HasSVE2orSME2, HasB16B16] in {
+let Predicates = [HasSVE2orSME2, HasSVEB16B16] in {
defm BFMLA_ZPmZZ : sve_fp_3op_p_zds_a_bf<0b00, "bfmla", "BFMLA_ZPZZZ", AArch64fmla_m1>;
defm BFMLS_ZPmZZ : sve_fp_3op_p_zds_a_bf<0b01, "bfmls", "BFMLS_ZPZZZ", AArch64fmls_m1>;
@@ -4151,7 +4160,7 @@ defm BFMINNM_ZPZZ : sve2p1_bf_bin_pred_zds<AArch64fminnm_p>;
defm BFMUL_ZZZI : sve2p1_fp_bfmul_by_indexed_elem<"bfmul", int_aarch64_sve_fmul_lane>;
defm BFCLAMP_ZZZ : sve2p1_bfclamp<"bfclamp", AArch64fclamp>;
-} // End HasSVE2orSME2, HasB16B16
+} // End HasSVE2orSME2, HasSVEB16B16
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 5e17ed4..8a93b7f 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -875,6 +875,7 @@ public:
if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
ELFRefKind == AArch64MCExpr::VK_LO12 ||
ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
@@ -986,19 +987,20 @@ public:
int64_t Addend;
if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
DarwinRefKind, Addend)) {
- return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
- || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
- || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
- || ELFRefKind == AArch64MCExpr::VK_LO12
- || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
- || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
- || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
- || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
- || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
- || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
- || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
- || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
- || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
+ return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
+ (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
+ ELFRefKind == AArch64MCExpr::VK_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
}
// If it's a constant, it should be a real immediate in range.
@@ -3250,6 +3252,7 @@ ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
+ ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
@@ -3674,6 +3677,7 @@ static const struct Extension {
{"rcpc", {AArch64::FeatureRCPC}},
{"rng", {AArch64::FeatureRandGen}},
{"sve", {AArch64::FeatureSVE}},
+ {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
{"sve2", {AArch64::FeatureSVE2}},
{"sve2-aes", {AArch64::FeatureSVE2AES}},
{"sve2-sm4", {AArch64::FeatureSVE2SM4}},
@@ -4334,6 +4338,8 @@ bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
.Case("got", AArch64MCExpr::VK_GOT_PAGE)
.Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
.Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
+ .Case("got_auth", AArch64MCExpr::VK_GOT_AUTH_PAGE)
+ .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
.Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
.Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
.Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
@@ -5708,6 +5714,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
// Only allow these with ADDXri/ADDWri
if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index e9e6b6c..ef8fcc0 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2845,7 +2845,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
}
if (OpFlags & AArch64II::MO_GOT) {
- I.setDesc(TII.get(AArch64::LOADgot));
+ I.setDesc(TII.get(MF.getInfo<AArch64FunctionInfo>()->hasELFSignedGOT()
+ ? AArch64::LOADgotAUTH
+ : AArch64::LOADgot));
I.getOperand(1).setTargetFlags(OpFlags);
} else if (TM.getCodeModel() == CodeModel::Large &&
!TM.isPositionIndependent()) {
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 4a1977b..afea8bd 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -288,10 +288,16 @@ bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
MachineRegisterInfo &MRI,
ShuffleVectorPseudo &MatchInfo) {
assert(Lane >= 0 && "Expected positive lane?");
+ int NumElements = MRI.getType(MI.getOperand(1).getReg()).getNumElements();
// Test if the LHS is a BUILD_VECTOR. If it is, then we can just reference the
// lane's definition directly.
- auto *BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
- MI.getOperand(1).getReg(), MRI);
+ auto *BuildVecMI =
+ getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
+ MI.getOperand(Lane < NumElements ? 1 : 2).getReg(), MRI);
+ // If Lane >= NumElements then it is point to RHS, just check from RHS
+ if (NumElements <= Lane)
+ Lane -= NumElements;
+
if (!BuildVecMI)
return false;
Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index b4c5cde..72671b0 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -167,6 +167,15 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
}
if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC)
return R_CLS(ADR_GOT_PAGE);
+ if (SymLoc == AArch64MCExpr::VK_GOT_AUTH && !IsNC) {
+ if (IsILP32) {
+ Ctx.reportError(Fixup.getLoc(),
+ "ILP32 ADRP AUTH relocation not supported "
+ "(LP64 eqv: AUTH_ADR_GOT_PAGE)");
+ return ELF::R_AARCH64_NONE;
+ }
+ return ELF::R_AARCH64_AUTH_ADR_GOT_PAGE;
+ }
if (SymLoc == AArch64MCExpr::VK_GOTTPREL && !IsNC)
return R_CLS(TLSIE_ADR_GOTTPREL_PAGE21);
if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
@@ -237,6 +246,15 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
return R_CLS(TLSLE_ADD_TPREL_LO12);
if (RefKind == AArch64MCExpr::VK_TLSDESC_LO12)
return R_CLS(TLSDESC_ADD_LO12);
+ if (RefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 && IsNC) {
+ if (IsILP32) {
+ Ctx.reportError(Fixup.getLoc(),
+ "ILP32 ADD AUTH relocation not supported "
+ "(LP64 eqv: AUTH_GOT_ADD_LO12_NC)");
+ return ELF::R_AARCH64_NONE;
+ }
+ return ELF::R_AARCH64_AUTH_GOT_ADD_LO12_NC;
+ }
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(ADD_ABS_LO12_NC);
@@ -329,17 +347,23 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
case AArch64::fixup_aarch64_ldst_imm12_scale8:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(LDST64_ABS_LO12_NC);
- if (SymLoc == AArch64MCExpr::VK_GOT && IsNC) {
+ if ((SymLoc == AArch64MCExpr::VK_GOT ||
+ SymLoc == AArch64MCExpr::VK_GOT_AUTH) &&
+ IsNC) {
AArch64MCExpr::VariantKind AddressLoc =
AArch64MCExpr::getAddressFrag(RefKind);
+ bool IsAuth = (SymLoc == AArch64MCExpr::VK_GOT_AUTH);
if (!IsILP32) {
if (AddressLoc == AArch64MCExpr::VK_LO15)
return ELF::R_AARCH64_LD64_GOTPAGE_LO15;
- return ELF::R_AARCH64_LD64_GOT_LO12_NC;
+ return (IsAuth ? ELF::R_AARCH64_AUTH_LD64_GOT_LO12_NC
+ : ELF::R_AARCH64_LD64_GOT_LO12_NC);
}
- Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
- "relocation not supported (LP64 eqv: "
- "LD64_GOT_LO12_NC)");
+ Ctx.reportError(Fixup.getLoc(),
+ Twine("ILP32 64-bit load/store "
+ "relocation not supported (LP64 eqv: ") +
+ (IsAuth ? "AUTH_GOT_LO12_NC" : "LD64_GOT_LO12_NC") +
+ Twine(')'));
return ELF::R_AARCH64_NONE;
}
if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
index fb8eb9f..3430b90 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -30,6 +30,7 @@ const AArch64MCExpr *AArch64MCExpr::create(const MCExpr *Expr, VariantKind Kind,
}
StringRef AArch64MCExpr::getVariantKindName() const {
+ // clang-format off
switch (static_cast<uint32_t>(getKind())) {
case VK_CALL: return "";
case VK_LO12: return ":lo12:";
@@ -82,9 +83,13 @@ StringRef AArch64MCExpr::getVariantKindName() const {
case VK_TLSDESC_PAGE: return ":tlsdesc:";
case VK_SECREL_LO12: return ":secrel_lo12:";
case VK_SECREL_HI12: return ":secrel_hi12:";
+ case VK_GOT_AUTH: return ":got_auth:";
+ case VK_GOT_AUTH_PAGE: return ":got_auth:";
+ case VK_GOT_AUTH_LO12: return ":got_auth_lo12:";
default:
llvm_unreachable("Invalid ELF symbol kind");
}
+ // clang-format on
}
void AArch64MCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
index cf3a90f..6999927 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
@@ -24,6 +24,7 @@ namespace llvm {
class AArch64MCExpr : public MCTargetExpr {
public:
enum VariantKind {
+ // clang-format off
// Symbol locations specifying (roughly speaking) what calculation should be
// performed to construct the final address for the relocated
// symbol. E.g. direct, via the GOT, ...
@@ -38,6 +39,7 @@ public:
VK_SECREL = 0x009,
VK_AUTH = 0x00a,
VK_AUTHADDR = 0x00b,
+ VK_GOT_AUTH = 0x00c,
VK_SymLocBits = 0x00f,
// Variants specifying which part of the final address calculation is
@@ -88,6 +90,8 @@ public:
VK_GOT_LO12 = VK_GOT | VK_PAGEOFF | VK_NC,
VK_GOT_PAGE = VK_GOT | VK_PAGE,
VK_GOT_PAGE_LO15 = VK_GOT | VK_LO15 | VK_NC,
+ VK_GOT_AUTH_LO12 = VK_GOT_AUTH | VK_PAGEOFF | VK_NC,
+ VK_GOT_AUTH_PAGE = VK_GOT_AUTH | VK_PAGE,
VK_DTPREL_G2 = VK_DTPREL | VK_G2,
VK_DTPREL_G1 = VK_DTPREL | VK_G1,
VK_DTPREL_G1_NC = VK_DTPREL | VK_G1 | VK_NC,
@@ -114,6 +118,7 @@ public:
VK_SECREL_HI12 = VK_SECREL | VK_HI12,
VK_INVALID = 0xfff
+ // clang-format on
};
private:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index de1f342..39c5214 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -1038,7 +1038,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) {
&AAPotentialValues::ID, &AAAMDFlatWorkGroupSize::ID,
&AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID, &AACallEdges::ID,
&AAPointerInfo::ID, &AAPotentialConstantValues::ID,
- &AAUnderlyingObjects::ID});
+ &AAUnderlyingObjects::ID, &AAAddressSpace::ID});
AttributorConfig AC(CGUpdater);
AC.Allowed = &Allowed;
@@ -1064,6 +1064,17 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) {
} else if (CC == CallingConv::AMDGPU_KERNEL) {
addPreloadKernArgHint(F, TM);
}
+
+ for (auto &I : instructions(F)) {
+ if (auto *LI = dyn_cast<LoadInst>(&I)) {
+ A.getOrCreateAAFor<AAAddressSpace>(
+ IRPosition::value(*LI->getPointerOperand()));
+ }
+ if (auto *SI = dyn_cast<StoreInst>(&I)) {
+ A.getOrCreateAAFor<AAAddressSpace>(
+ IRPosition::value(*SI->getPointerOperand()));
+ }
+ }
}
ChangeStatus Change = A.run();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 73f3921..f78699f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1372,8 +1372,8 @@ bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const {
MachineInstrBuilder SelectedMI;
MachineOperand &LHS = I.getOperand(2);
MachineOperand &RHS = I.getOperand(3);
- auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS);
- auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS);
+ auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS.getReg());
+ auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS.getReg());
Register Src0Reg =
copyToVGPRIfSrcFolded(Src0, Src0Mods, LHS, &I, /*ForceVGPR*/ true);
Register Src1Reg =
@@ -2467,14 +2467,48 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
return false;
}
+static Register stripCopy(Register Reg, MachineRegisterInfo &MRI) {
+ return getDefSrcRegIgnoringCopies(Reg, MRI)->Reg;
+}
+
+static Register stripBitCast(Register Reg, MachineRegisterInfo &MRI) {
+ Register BitcastSrc;
+ if (mi_match(Reg, MRI, m_GBitcast(m_Reg(BitcastSrc))))
+ Reg = BitcastSrc;
+ return Reg;
+}
+
static bool isExtractHiElt(MachineRegisterInfo &MRI, Register In,
Register &Out) {
+ Register Trunc;
+ if (!mi_match(In, MRI, m_GTrunc(m_Reg(Trunc))))
+ return false;
+
Register LShlSrc;
- if (mi_match(In, MRI,
- m_GTrunc(m_GLShr(m_Reg(LShlSrc), m_SpecificICst(16))))) {
- Out = LShlSrc;
+ Register Cst;
+ if (mi_match(Trunc, MRI, m_GLShr(m_Reg(LShlSrc), m_Reg(Cst)))) {
+ Cst = stripCopy(Cst, MRI);
+ if (mi_match(Cst, MRI, m_SpecificICst(16))) {
+ Out = stripBitCast(LShlSrc, MRI);
+ return true;
+ }
+ }
+
+ MachineInstr *Shuffle = MRI.getVRegDef(Trunc);
+ if (Shuffle->getOpcode() != AMDGPU::G_SHUFFLE_VECTOR)
+ return false;
+
+ assert(MRI.getType(Shuffle->getOperand(0).getReg()) ==
+ LLT::fixed_vector(2, 16));
+
+ ArrayRef<int> Mask = Shuffle->getOperand(3).getShuffleMask();
+ assert(Mask.size() == 2);
+
+ if (Mask[0] == 1 && Mask[1] <= 1) {
+ Out = Shuffle->getOperand(0).getReg();
return true;
}
+
return false;
}
@@ -3550,11 +3584,8 @@ AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
}
-std::pair<Register, unsigned>
-AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
- bool IsCanonicalizing,
- bool AllowAbs, bool OpSel) const {
- Register Src = Root.getReg();
+std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
+ Register Src, bool IsCanonicalizing, bool AllowAbs, bool OpSel) const {
unsigned Mods = 0;
MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
@@ -3617,7 +3648,7 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
return {{
[=](MachineInstrBuilder &MIB) {
@@ -3633,7 +3664,7 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(),
/*IsCanonicalizing=*/true,
/*AllowAbs=*/false);
@@ -3660,7 +3691,7 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
return {{
[=](MachineInstrBuilder &MIB) {
@@ -3675,7 +3706,8 @@ AMDGPUInstructionSelector::selectVOP3ModsNonCanonicalizing(
MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/false);
+ std::tie(Src, Mods) =
+ selectVOP3ModsImpl(Root.getReg(), /*IsCanonicalizing=*/false);
return {{
[=](MachineInstrBuilder &MIB) {
@@ -3689,8 +3721,9 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/true,
- /*AllowAbs=*/false);
+ std::tie(Src, Mods) =
+ selectVOP3ModsImpl(Root.getReg(), /*IsCanonicalizing=*/true,
+ /*AllowAbs=*/false);
return {{
[=](MachineInstrBuilder &MIB) {
@@ -4016,7 +4049,7 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
// FIXME: Handle op_sel
return {{
@@ -4029,7 +4062,7 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(),
/*IsCanonicalizing=*/true,
/*AllowAbs=*/false,
/*OpSel=*/false);
@@ -4047,7 +4080,7 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(),
/*IsCanonicalizing=*/true,
/*AllowAbs=*/false,
/*OpSel=*/true);
@@ -5229,59 +5262,6 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
[=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}};
}
-// Variant of stripBitCast that returns the instruction instead of a
-// MachineOperand.
-static MachineInstr *stripBitCast(MachineInstr *MI, MachineRegisterInfo &MRI) {
- if (MI->getOpcode() == AMDGPU::G_BITCAST)
- return getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
- return MI;
-}
-
-// Figure out if this is really an extract of the high 16-bits of a dword,
-// returns nullptr if it isn't.
-static MachineInstr *isExtractHiElt(MachineInstr *Inst,
- MachineRegisterInfo &MRI) {
- Inst = stripBitCast(Inst, MRI);
-
- if (Inst->getOpcode() != AMDGPU::G_TRUNC)
- return nullptr;
-
- MachineInstr *TruncOp =
- getDefIgnoringCopies(Inst->getOperand(1).getReg(), MRI);
- TruncOp = stripBitCast(TruncOp, MRI);
-
- // G_LSHR x, (G_CONSTANT i32 16)
- if (TruncOp->getOpcode() == AMDGPU::G_LSHR) {
- auto SrlAmount = getIConstantVRegValWithLookThrough(
- TruncOp->getOperand(2).getReg(), MRI);
- if (SrlAmount && SrlAmount->Value.getZExtValue() == 16) {
- MachineInstr *SrlOp =
- getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
- return stripBitCast(SrlOp, MRI);
- }
- }
-
- // G_SHUFFLE_VECTOR x, y, shufflemask(1, 1|0)
- // 1, 0 swaps the low/high 16 bits.
- // 1, 1 sets the high 16 bits to be the same as the low 16.
- // in any case, it selects the high elts.
- if (TruncOp->getOpcode() == AMDGPU::G_SHUFFLE_VECTOR) {
- assert(MRI.getType(TruncOp->getOperand(0).getReg()) ==
- LLT::fixed_vector(2, 16));
-
- ArrayRef<int> Mask = TruncOp->getOperand(3).getShuffleMask();
- assert(Mask.size() == 2);
-
- if (Mask[0] == 1 && Mask[1] <= 1) {
- MachineInstr *LHS =
- getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
- return stripBitCast(LHS, MRI);
- }
- }
-
- return nullptr;
-}
-
std::pair<Register, unsigned>
AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
bool &Matched) const {
@@ -5289,37 +5269,34 @@ AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
Register Src;
unsigned Mods;
- std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
-
- MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
- if (MI->getOpcode() == AMDGPU::G_FPEXT) {
- MachineOperand *MO = &MI->getOperand(1);
- Src = MO->getReg();
- MI = getDefIgnoringCopies(Src, *MRI);
+ std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
+ if (mi_match(Src, *MRI, m_GFPExt(m_Reg(Src)))) {
assert(MRI->getType(Src) == LLT::scalar(16));
- // See through bitcasts.
- // FIXME: Would be nice to use stripBitCast here.
- if (MI->getOpcode() == AMDGPU::G_BITCAST) {
- MO = &MI->getOperand(1);
- Src = MO->getReg();
- MI = getDefIgnoringCopies(Src, *MRI);
- }
+ // Only change Src if src modifier could be gained. In such cases new Src
+ // could be sgpr but this does not violate constant bus restriction for
+ // instruction that is being selected.
+ // Note: Src is not changed when there is only a simple sgpr to vgpr copy
+ // since this could violate constant bus restriction.
+ Register PeekSrc = stripCopy(Src, *MRI);
const auto CheckAbsNeg = [&]() {
// Be careful about folding modifiers if we already have an abs. fneg is
// applied last, so we don't want to apply an earlier fneg.
if ((Mods & SISrcMods::ABS) == 0) {
unsigned ModsTmp;
- std::tie(Src, ModsTmp) = selectVOP3ModsImpl(*MO);
- MI = getDefIgnoringCopies(Src, *MRI);
+ std::tie(PeekSrc, ModsTmp) = selectVOP3ModsImpl(PeekSrc);
- if ((ModsTmp & SISrcMods::NEG) != 0)
+ if ((ModsTmp & SISrcMods::NEG) != 0) {
Mods ^= SISrcMods::NEG;
+ Src = PeekSrc;
+ }
- if ((ModsTmp & SISrcMods::ABS) != 0)
+ if ((ModsTmp & SISrcMods::ABS) != 0) {
Mods |= SISrcMods::ABS;
+ Src = PeekSrc;
+ }
}
};
@@ -5332,12 +5309,9 @@ AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
Mods |= SISrcMods::OP_SEL_1;
- if (MachineInstr *ExtractHiEltMI = isExtractHiElt(MI, *MRI)) {
+ if (isExtractHiElt(*MRI, PeekSrc, PeekSrc)) {
+ Src = PeekSrc;
Mods |= SISrcMods::OP_SEL_0;
- MI = ExtractHiEltMI;
- MO = &MI->getOperand(0);
- Src = MO->getReg();
-
CheckAbsNeg();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 7fff7d2..69806b2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -150,7 +150,7 @@ private:
bool selectSBarrierSignalIsfirst(MachineInstr &I, Intrinsic::ID IID) const;
bool selectSBarrierLeave(MachineInstr &I) const;
- std::pair<Register, unsigned> selectVOP3ModsImpl(MachineOperand &Root,
+ std::pair<Register, unsigned> selectVOP3ModsImpl(Register Src,
bool IsCanonicalizing = true,
bool AllowAbs = true,
bool OpSel = false) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9a6ba5a..17067dd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3739,17 +3739,28 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (MI.isCopy() || MI.getOpcode() == AMDGPU::G_FREEZE) {
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+
// The default logic bothers to analyze impossible alternative mappings. We
// want the most straightforward mapping, so just directly handle this.
- const RegisterBank *DstBank = getRegBank(MI.getOperand(0).getReg(), MRI,
- *TRI);
- const RegisterBank *SrcBank = getRegBank(MI.getOperand(1).getReg(), MRI,
- *TRI);
+ const RegisterBank *DstBank = getRegBank(DstReg, MRI, *TRI);
+ const RegisterBank *SrcBank = getRegBank(SrcReg, MRI, *TRI);
assert(SrcBank && "src bank should have been assigned already");
+
+ // For COPY between a physical reg and an s1, there is no type associated so
+ // we need to take the virtual register's type as a hint on how to interpret
+ // s1 values.
+ if (!SrcReg.isVirtual() && !DstBank &&
+ MRI.getType(DstReg) == LLT::scalar(1))
+ DstBank = &AMDGPU::VCCRegBank;
+ else if (!DstReg.isVirtual() && MRI.getType(SrcReg) == LLT::scalar(1))
+ DstBank = &AMDGPU::VCCRegBank;
+
if (!DstBank)
DstBank = SrcBank;
- unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
+ unsigned Size = getSizeInBits(DstReg, MRI, *TRI);
if (MI.getOpcode() != AMDGPU::G_FREEZE &&
cannotCopy(*DstBank, *SrcBank, TypeSize::getFixed(Size)))
return getInvalidInstructionMapping();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
index 80ca308..5c4d2b8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
@@ -381,3 +381,7 @@ def : AlwaysUniform<int_amdgcn_if_break>;
def : AlwaysUniform<int_amdgcn_workgroup_id_x>;
def : AlwaysUniform<int_amdgcn_workgroup_id_y>;
def : AlwaysUniform<int_amdgcn_workgroup_id_z>;
+def : AlwaysUniform<int_amdgcn_s_getpc>;
+def : AlwaysUniform<int_amdgcn_s_getreg>;
+def : AlwaysUniform<int_amdgcn_s_memrealtime>;
+def : AlwaysUniform<int_amdgcn_s_memtime>;
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 26a839a..c8b594f 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1618,6 +1618,14 @@ public:
ParseStatus parseTH(OperandVector &Operands, int64_t &TH);
ParseStatus parseStringWithPrefix(StringRef Prefix, StringRef &Value,
SMLoc &StringLoc);
+ ParseStatus parseStringOrIntWithPrefix(OperandVector &Operands,
+ StringRef Name,
+ ArrayRef<const char *> Ids,
+ int64_t &IntVal);
+ ParseStatus parseStringOrIntWithPrefix(OperandVector &Operands,
+ StringRef Name,
+ ArrayRef<const char *> Ids,
+ AMDGPUOperand::ImmTy Type);
bool isModifier();
bool isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
@@ -6633,27 +6641,17 @@ ParseStatus AMDGPUAsmParser::parseCPol(OperandVector &Operands) {
ParseStatus AMDGPUAsmParser::parseScope(OperandVector &Operands,
int64_t &Scope) {
- Scope = AMDGPU::CPol::SCOPE_CU; // default;
+ static const unsigned Scopes[] = {CPol::SCOPE_CU, CPol::SCOPE_SE,
+ CPol::SCOPE_DEV, CPol::SCOPE_SYS};
- StringRef Value;
- SMLoc StringLoc;
- ParseStatus Res;
-
- Res = parseStringWithPrefix("scope", Value, StringLoc);
- if (!Res.isSuccess())
- return Res;
-
- Scope = StringSwitch<int64_t>(Value)
- .Case("SCOPE_CU", AMDGPU::CPol::SCOPE_CU)
- .Case("SCOPE_SE", AMDGPU::CPol::SCOPE_SE)
- .Case("SCOPE_DEV", AMDGPU::CPol::SCOPE_DEV)
- .Case("SCOPE_SYS", AMDGPU::CPol::SCOPE_SYS)
- .Default(0xffffffff);
+ ParseStatus Res = parseStringOrIntWithPrefix(
+ Operands, "scope", {"SCOPE_CU", "SCOPE_SE", "SCOPE_DEV", "SCOPE_SYS"},
+ Scope);
- if (Scope == 0xffffffff)
- return Error(StringLoc, "invalid scope value");
+ if (Res.isSuccess())
+ Scope = Scopes[Scope];
- return ParseStatus::Success;
+ return Res;
}
ParseStatus AMDGPUAsmParser::parseTH(OperandVector &Operands, int64_t &TH) {
@@ -6742,6 +6740,44 @@ ParseStatus AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix,
: ParseStatus::Failure;
}
+ParseStatus AMDGPUAsmParser::parseStringOrIntWithPrefix(
+ OperandVector &Operands, StringRef Name, ArrayRef<const char *> Ids,
+ int64_t &IntVal) {
+ if (!trySkipId(Name, AsmToken::Colon))
+ return ParseStatus::NoMatch;
+
+ SMLoc StringLoc = getLoc();
+
+ StringRef Value;
+ if (isToken(AsmToken::Identifier)) {
+ Value = getTokenStr();
+ lex();
+
+ for (IntVal = 0; IntVal < (int64_t)Ids.size(); ++IntVal)
+ if (Value == Ids[IntVal])
+ break;
+ } else if (!parseExpr(IntVal))
+ return ParseStatus::Failure;
+
+ if (IntVal < 0 || IntVal >= (int64_t)Ids.size())
+ return Error(StringLoc, "invalid " + Twine(Name) + " value");
+
+ return ParseStatus::Success;
+}
+
+ParseStatus AMDGPUAsmParser::parseStringOrIntWithPrefix(
+ OperandVector &Operands, StringRef Name, ArrayRef<const char *> Ids,
+ AMDGPUOperand::ImmTy Type) {
+ SMLoc S = getLoc();
+ int64_t IntVal;
+
+ ParseStatus Res = parseStringOrIntWithPrefix(Operands, Name, Ids, IntVal);
+ if (Res.isSuccess())
+ Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S, Type));
+
+ return Res;
+}
+
//===----------------------------------------------------------------------===//
// MTBUF format
//===----------------------------------------------------------------------===//
@@ -9396,57 +9432,16 @@ void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands, bool I
ParseStatus AMDGPUAsmParser::parseSDWASel(OperandVector &Operands,
StringRef Prefix,
AMDGPUOperand::ImmTy Type) {
- using namespace llvm::AMDGPU::SDWA;
-
- SMLoc S = getLoc();
- StringRef Value;
-
- SMLoc StringLoc;
- ParseStatus Res = parseStringWithPrefix(Prefix, Value, StringLoc);
- if (!Res.isSuccess())
- return Res;
-
- int64_t Int;
- Int = StringSwitch<int64_t>(Value)
- .Case("BYTE_0", SdwaSel::BYTE_0)
- .Case("BYTE_1", SdwaSel::BYTE_1)
- .Case("BYTE_2", SdwaSel::BYTE_2)
- .Case("BYTE_3", SdwaSel::BYTE_3)
- .Case("WORD_0", SdwaSel::WORD_0)
- .Case("WORD_1", SdwaSel::WORD_1)
- .Case("DWORD", SdwaSel::DWORD)
- .Default(0xffffffff);
-
- if (Int == 0xffffffff)
- return Error(StringLoc, "invalid " + Twine(Prefix) + " value");
-
- Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
- return ParseStatus::Success;
+ return parseStringOrIntWithPrefix(
+ Operands, Prefix,
+ {"BYTE_0", "BYTE_1", "BYTE_2", "BYTE_3", "WORD_0", "WORD_1", "DWORD"},
+ Type);
}
ParseStatus AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
- using namespace llvm::AMDGPU::SDWA;
-
- SMLoc S = getLoc();
- StringRef Value;
-
- SMLoc StringLoc;
- ParseStatus Res = parseStringWithPrefix("dst_unused", Value, StringLoc);
- if (!Res.isSuccess())
- return Res;
-
- int64_t Int;
- Int = StringSwitch<int64_t>(Value)
- .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
- .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
- .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
- .Default(0xffffffff);
-
- if (Int == 0xffffffff)
- return Error(StringLoc, "invalid dst_unused value");
-
- Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySDWADstUnused));
- return ParseStatus::Success;
+ return parseStringOrIntWithPrefix(
+ Operands, "dst_unused", {"UNUSED_PAD", "UNUSED_SEXT", "UNUSED_PRESERVE"},
+ AMDGPUOperand::ImmTySDWADstUnused);
}
void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index ae537b1..b39fbdc 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -352,6 +352,8 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
return 1;
case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec:
case AMDGPU::S_LOAD_DWORDX2_IMM:
case AMDGPU::S_LOAD_DWORDX2_IMM_ec:
case AMDGPU::GLOBAL_LOAD_DWORDX2:
@@ -363,6 +365,8 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
return 2;
case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM_ec:
case AMDGPU::S_LOAD_DWORDX3_IMM:
case AMDGPU::S_LOAD_DWORDX3_IMM_ec:
case AMDGPU::GLOBAL_LOAD_DWORDX3:
@@ -374,6 +378,8 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
return 3;
case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec:
case AMDGPU::S_LOAD_DWORDX4_IMM:
case AMDGPU::S_LOAD_DWORDX4_IMM_ec:
case AMDGPU::GLOBAL_LOAD_DWORDX4:
@@ -385,6 +391,8 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
return 4;
case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM_ec:
case AMDGPU::S_LOAD_DWORDX8_IMM:
case AMDGPU::S_LOAD_DWORDX8_IMM_ec:
return 8;
@@ -499,12 +507,20 @@ static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM_ec:
return S_BUFFER_LOAD_IMM;
case AMDGPU::S_BUFFER_LOAD_DWORD_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM_ec:
return S_BUFFER_LOAD_SGPR_IMM;
case AMDGPU::S_LOAD_DWORD_IMM:
case AMDGPU::S_LOAD_DWORDX2_IMM:
@@ -587,12 +603,20 @@ static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM_ec:
return AMDGPU::S_BUFFER_LOAD_DWORD_IMM;
case AMDGPU::S_BUFFER_LOAD_DWORD_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM_ec:
return AMDGPU::S_BUFFER_LOAD_DWORD_SGPR_IMM;
case AMDGPU::S_LOAD_DWORD_IMM:
case AMDGPU::S_LOAD_DWORDX2_IMM:
@@ -703,6 +727,10 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM_ec:
Result.SOffset = true;
[[fallthrough]];
case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
@@ -710,6 +738,10 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM_ec:
+ case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM_ec:
case AMDGPU::S_LOAD_DWORD_IMM:
case AMDGPU::S_LOAD_DWORDX2_IMM:
case AMDGPU::S_LOAD_DWORDX3_IMM:
@@ -1679,6 +1711,14 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeFlatStorePair(
return New;
}
+static bool needsConstrainedOpcode(const GCNSubtarget &STM,
+ ArrayRef<MachineMemOperand *> MMOs,
+ unsigned Width) {
+ // Conservatively returns true if not found the MMO.
+ return STM.isXNACKEnabled() &&
+ (MMOs.size() != 1 || MMOs[0]->getAlign().value() < Width * 4);
+}
+
unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
const CombineInfo &Paired) {
const unsigned Width = CI.Width + Paired.Width;
@@ -1696,38 +1736,55 @@ unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
case UNKNOWN:
llvm_unreachable("Unknown instruction class");
- case S_BUFFER_LOAD_IMM:
+ case S_BUFFER_LOAD_IMM: {
+ // If XNACK is enabled, use the constrained opcodes when the first load is
+ // under-aligned.
+ bool NeedsConstrainedOpc =
+ needsConstrainedOpcode(*STM, CI.I->memoperands(), Width);
switch (Width) {
default:
return 0;
case 2:
- return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
case 3:
- return AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX3_IMM;
case 4:
- return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
case 8:
- return AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM;
}
- case S_BUFFER_LOAD_SGPR_IMM:
+ }
+ case S_BUFFER_LOAD_SGPR_IMM: {
+ // If XNACK is enabled, use the constrained opcodes when the first load is
+ // under-aligned.
+ bool NeedsConstrainedOpc =
+ needsConstrainedOpcode(*STM, CI.I->memoperands(), Width);
switch (Width) {
default:
return 0;
case 2:
- return AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR_IMM;
case 3:
- return AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX3_SGPR_IMM;
case 4:
- return AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR_IMM;
case 8:
- return AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM;
+ return NeedsConstrainedOpc ? AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM_ec
+ : AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR_IMM;
}
+ }
case S_LOAD_IMM: {
// If XNACK is enabled, use the constrained opcodes when the first load is
// under-aligned.
- const MachineMemOperand *MMO = *CI.I->memoperands_begin();
bool NeedsConstrainedOpc =
- STM->isXNACKEnabled() && MMO->getAlign().value() < Width * 4;
+ needsConstrainedOpcode(*STM, CI.I->memoperands(), Width);
switch (Width) {
default:
return 0;
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index fb33308..1312b44 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -414,6 +414,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
MF->RenumberBlocks();
+ DT->updateBlockNumbers();
// Try to reorder and otherwise adjust the block layout to make good use
// of the TB[BH] instructions.
@@ -425,6 +426,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
T2JumpTables.clear();
// Blocks may have shifted around. Keep the numbering up to date.
MF->RenumberBlocks();
+ DT->updateBlockNumbers();
}
// Align any non-fallthrough blocks
@@ -670,8 +672,10 @@ void ARMConstantIslands::doInitialJumpTablePlacement(
}
// If we did anything then we need to renumber the subsequent blocks.
- if (LastCorrectlyNumberedBB)
+ if (LastCorrectlyNumberedBB) {
MF->RenumberBlocks(LastCorrectlyNumberedBB);
+ DT->updateBlockNumbers();
+ }
}
/// BBHasFallthrough - Return true if the specified basic block can fallthrough
@@ -972,6 +976,7 @@ static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
// Renumber the MBB's to keep them consecutive.
NewBB->getParent()->RenumberBlocks(NewBB);
+ DT->updateBlockNumbers();
// Insert an entry into BBInfo to align it properly with the (newly
// renumbered) block numbers.
@@ -1034,6 +1039,7 @@ MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
// This is almost the same as updateForInsertedWaterBlock, except that
// the Water goes after OrigBB, not NewBB.
MF->RenumberBlocks(NewBB);
+ DT->updateBlockNumbers();
// Insert an entry into BBInfo to align it properly with the (newly
// renumbered) block numbers.
@@ -2485,6 +2491,7 @@ MachineBasicBlock *ARMConstantIslands::adjustJTTargetBlockForward(
BB->updateTerminator(OldNext != MF->end() ? &*OldNext : nullptr);
// Update numbering to account for the block being moved.
MF->RenumberBlocks();
+ DT->updateBlockNumbers();
++NumJTMoved;
return nullptr;
}
@@ -2513,6 +2520,7 @@ MachineBasicBlock *ARMConstantIslands::adjustJTTargetBlockForward(
// Update internal data structures to account for the newly inserted MBB.
MF->RenumberBlocks(NewBB);
+ DT->updateBlockNumbers();
// Update the CFG.
NewBB->addSuccessor(BB);
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td
index e133dbe..61635bd 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -5849,6 +5849,7 @@ def t2AUT : PACBTIHintSpaceUseInst<"aut", 0b00101101> {
def ARMt2CallBTI : SDNode<"ARMISD::t2CALL_BTI", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;
+let Defs = [LR], Uses = [SP] in
def t2CALL_BTI : PseudoInst<(outs), (ins pred:$p, thumb_bl_target:$func),
IIC_Br, [(ARMt2CallBTI tglobaladdr:$func)]>,
Requires<[IsThumb2]>, Sched<[WriteBrL]>;
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 994b43f..4e4a19d 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -587,6 +587,14 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
return 0;
return 0xffffff & ((Value - 8) >> 2);
case ARM::fixup_t2_uncondbranch: {
+ if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
+ Value != 4) {
+ // MSVC link.exe and lld do not support this relocation type
+ // with a non-zero offset. ("Value" is offset by 4 at this point.)
+ Ctx.reportError(Fixup.getLoc(),
+ "cannot perform a PC-relative fixup with a non-zero "
+ "symbol offset");
+ }
Value = Value - 4;
if (!isInt<25>(Value)) {
Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
@@ -637,6 +645,14 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
return 0;
}
+ if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
+ Value != 4) {
+ // MSVC link.exe and lld do not support this relocation type
+ // with a non-zero offset. ("Value" is offset by 4 at this point.)
+ Ctx.reportError(Fixup.getLoc(),
+ "cannot perform a PC-relative fixup with a non-zero "
+ "symbol offset");
+ }
// The value doesn't encode the low bit (always zero) and is offset by
// four. The 32-bit immediate value is encoded as
@@ -666,6 +682,14 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
Endian == llvm::endianness::little);
}
case ARM::fixup_arm_thumb_blx: {
+ if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
+ Value != 4) {
+ // MSVC link.exe and lld do not support this relocation type
+ // with a non-zero offset. ("Value" is offset by 4 at this point.)
+ Ctx.reportError(Fixup.getLoc(),
+ "cannot perform a PC-relative fixup with a non-zero "
+ "symbol offset");
+ }
// The value doesn't encode the low two bits (always zero) and is offset by
// four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
// imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index 50a59ce..ddc62b5 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -41,8 +41,6 @@ namespace {
bool needsRelocateWithSymbol(const MCValue &Val, const MCSymbol &Sym,
unsigned Type) const override;
-
- void addTargetSectionFlags(MCContext &Ctx, MCSectionELF &Sec) override;
};
} // end anonymous namespace
@@ -319,25 +317,6 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
}
}
-void ARMELFObjectWriter::addTargetSectionFlags(MCContext &Ctx,
- MCSectionELF &Sec) {
- // The mix of execute-only and non-execute-only at link time is
- // non-execute-only. To avoid the empty implicitly created .text
- // section from making the whole .text section non-execute-only, we
- // mark it execute-only if it is empty and there is at least one
- // execute-only section in the object.
- MCSectionELF *TextSection =
- static_cast<MCSectionELF *>(Ctx.getObjectFileInfo()->getTextSection());
- bool IsExecOnly = Sec.getFlags() & ELF::SHF_ARM_PURECODE;
- if (IsExecOnly && !TextSection->hasInstructions()) {
- for (auto &F : *TextSection)
- if (auto *DF = dyn_cast<MCDataFragment>(&F))
- if (!DF->getContents().empty())
- return;
- TextSection->setFlags(TextSection->getFlags() | ELF::SHF_ARM_PURECODE);
- }
-}
-
std::unique_ptr<MCObjectTargetWriter>
llvm::createARMELFObjectWriter(uint8_t OSABI) {
return std::make_unique<ARMELFObjectWriter>(OSABI);
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 9df752f..59f2966 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -34,6 +34,7 @@
#include "llvm/MC/MCFragment.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
@@ -1113,6 +1114,25 @@ void ARMTargetELFStreamer::reset() { AttributeSection = nullptr; }
void ARMTargetELFStreamer::finish() {
ARMTargetStreamer::finish();
finishAttributeSection();
+
+ // The mix of execute-only and non-execute-only at link time is
+ // non-execute-only. To avoid the empty implicitly created .text
+ // section from making the whole .text section non-execute-only, we
+ // mark it execute-only if it is empty and there is at least one
+ // execute-only section in the object.
+ MCContext &Ctx = getStreamer().getContext();
+ auto &Asm = getStreamer().getAssembler();
+ if (any_of(Asm, [](const MCSection &Sec) {
+ return cast<MCSectionELF>(Sec).getFlags() & ELF::SHF_ARM_PURECODE;
+ })) {
+ auto *Text =
+ static_cast<MCSectionELF *>(Ctx.getObjectFileInfo()->getTextSection());
+ for (auto &F : *Text)
+ if (auto *DF = dyn_cast<MCDataFragment>(&F))
+ if (!DF->getContents().empty())
+ return;
+ Text->setFlags(Text->getFlags() | ELF::SHF_ARM_PURECODE);
+ }
}
void ARMELFStreamer::reset() {
diff --git a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
index 97bdd4c..d7f4d4b 100644
--- a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
+++ b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
@@ -28,7 +28,6 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -217,11 +216,6 @@ public:
bool runOnMachineFunction(MachineFunction &F) override;
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<MachineDominatorTreeWrapperPass>();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
MachineFunctionProperties getRequiredProperties() const override {
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::NoVRegs);
diff --git a/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp b/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp
index e9d95c6..a223028 100644
--- a/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp
@@ -249,7 +249,8 @@ void HexagonCopyHoisting::moveCopyInstr(MachineBasicBlock *DestBB,
DestBB->splice(FirstTI, MI->getParent(), MI);
addMItoCopyList(MI);
- for (auto I = ++(DestBB->succ_begin()), E = DestBB->succ_end(); I != E; ++I) {
+ for (auto I = std::next(DestBB->succ_begin()), E = DestBB->succ_end(); I != E;
+ ++I) {
MachineBasicBlock *SuccBB = *I;
auto &BBCopyInst = CopyMIList[SuccBB->getNumber()];
MachineInstr *SuccMI = BBCopyInst[Key];
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index a004d64..5b568b0 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -13,12 +13,14 @@
#include "MCTargetDesc/NVPTXInstPrinter.h"
#include "MCTargetDesc/NVPTXBaseInfo.h"
#include "NVPTX.h"
+#include "NVPTXUtilities.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/FormattedStream.h"
#include <cctype>
using namespace llvm;
@@ -228,31 +230,29 @@ void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
const MCOperand &MO = MI->getOperand(OpNum);
int Imm = (int) MO.getImm();
if (!strcmp(Modifier, "sem")) {
- switch (Imm) {
- case NVPTX::PTXLdStInstCode::NotAtomic:
+ auto Ordering = NVPTX::Ordering(Imm);
+ switch (Ordering) {
+ case NVPTX::Ordering::NotAtomic:
break;
- case NVPTX::PTXLdStInstCode::Volatile:
+ case NVPTX::Ordering::Volatile:
O << ".volatile";
break;
- case NVPTX::PTXLdStInstCode::Relaxed:
+ case NVPTX::Ordering::Relaxed:
O << ".relaxed.sys";
break;
- case NVPTX::PTXLdStInstCode::Acquire:
+ case NVPTX::Ordering::Acquire:
O << ".acquire.sys";
break;
- case NVPTX::PTXLdStInstCode::Release:
+ case NVPTX::Ordering::Release:
O << ".release.sys";
break;
- case NVPTX::PTXLdStInstCode::RelaxedMMIO:
+ case NVPTX::Ordering::RelaxedMMIO:
O << ".mmio.relaxed.sys";
break;
default:
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
- OS << "NVPTX LdStCode Printer does not support \"" << Imm
- << "\" sem modifier.";
- report_fatal_error(OS.str());
- break;
+ report_fatal_error(formatv(
+ "NVPTX LdStCode Printer does not support \"{}\" sem modifier.",
+ OrderingToCString(Ordering)));
}
} else if (!strcmp(Modifier, "addsp")) {
switch (Imm) {
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index 3c7167b..f6f6acb 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -16,6 +16,7 @@
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/CodeGen.h"
namespace llvm {
@@ -106,15 +107,25 @@ enum LoadStore {
isStoreShift = 6
};
-namespace PTXLdStInstCode {
-enum MemorySemantic {
- NotAtomic = 0, // PTX calls these: "Weak"
- Volatile = 1,
- Relaxed = 2,
- Acquire = 3,
- Release = 4,
- RelaxedMMIO = 5
+// Extends LLVM AtomicOrdering with PTX Orderings:
+using OrderingUnderlyingType = unsigned int;
+enum Ordering : OrderingUnderlyingType {
+ NotAtomic = (OrderingUnderlyingType)
+ AtomicOrdering::NotAtomic, // PTX calls these: "Weak"
+ // Unordered = 1, // NVPTX maps LLVM Unorderd to Relaxed
+ Relaxed = (OrderingUnderlyingType)AtomicOrdering::Monotonic,
+ // Consume = 3, // Unimplemented in LLVM; NVPTX would map to "Acquire"
+ Acquire = (OrderingUnderlyingType)AtomicOrdering::Acquire,
+ Release = (OrderingUnderlyingType)AtomicOrdering::Release,
+ // AcquireRelease = 6, // TODO
+ SequentiallyConsistent =
+ (OrderingUnderlyingType)AtomicOrdering::SequentiallyConsistent,
+ Volatile = SequentiallyConsistent + 1,
+ RelaxedMMIO = Volatile + 1,
+ LAST = RelaxedMMIO
};
+
+namespace PTXLdStInstCode {
enum AddressSpace {
GENERIC = 0,
GLOBAL = 1,
@@ -134,7 +145,7 @@ enum VecType {
V2 = 2,
V4 = 4
};
-}
+} // namespace PTXLdStInstCode
/// PTXCvtMode - Conversion code enumeration
namespace PTXCvtMode {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 96456ad..25c198f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -22,6 +22,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
@@ -714,21 +715,28 @@ static unsigned int getCodeAddrSpace(MemSDNode *N) {
return NVPTX::PTXLdStInstCode::GENERIC;
}
-static unsigned int getCodeMemorySemantic(MemSDNode *N,
- const NVPTXSubtarget *Subtarget) {
+namespace {
+
+struct OperationOrderings {
+ NVPTX::Ordering InstructionOrdering, FenceOrdering;
+ OperationOrderings(NVPTX::Ordering IO = NVPTX::Ordering::NotAtomic,
+ NVPTX::Ordering FO = NVPTX::Ordering::NotAtomic)
+ : InstructionOrdering(IO), FenceOrdering(FO) {}
+};
+
+static OperationOrderings
+getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
AtomicOrdering Ordering = N->getSuccessOrdering();
auto CodeAddrSpace = getCodeAddrSpace(N);
bool HasMemoryOrdering = Subtarget->hasMemoryOrdering();
bool HasRelaxedMMIO = Subtarget->hasRelaxedMMIO();
- // TODO: lowering for SequentiallyConsistent Operations: for now, we error.
- // TODO: lowering for AcquireRelease Operations: for now, we error.
- //
-
// clang-format off
- // Lowering for non-SequentiallyConsistent Operations
+ // Lowering for Load/Store Operations (note: AcquireRelease Loads or Stores error).
+ // Note: uses of Relaxed in the Atomic column of this table refer
+ // to LLVM AtomicOrdering::Monotonic.
//
// | Atomic | Volatile | Statespace | PTX sm_60- | PTX sm_70+ |
// |---------|----------|--------------------|------------|------------------------------|
@@ -749,6 +757,25 @@ static unsigned int getCodeMemorySemantic(MemSDNode *N,
// | Other | Yes | Generic, Shared, | Error [2] | <atomic sem> [3] |
// | | | / Global [0] | | |
+ // Lowering of CUDA C++ SequentiallyConsistent Operations and Fences to PTX
+ // by following the ABI proven sound in:
+ // Lustig et al, A Formal Analysis of the NVIDIA PTX Memory Consistency Model, ASPLOS’19.
+ // https://dl.acm.org/doi/pdf/10.1145/3297858.3304043
+ //
+ // | CUDA C++ Atomic Operation or Atomic Fence | PTX Atomic Operation or Fence |
+ // |------------------------------------------------------|-------------------------------|
+ // | cuda::atomic_thread_fence | fence.sc.<scope>; |
+ // | (memory_order_seq_cst, cuda::thread_scope_<scope>) | |
+ // |------------------------------------------------------|-------------------------------|
+ // | cuda::atomic_load | fence.sc.<scope>; |
+ // | (memory_order_seq_cst, cuda::thread_scope_<scope>) | ld.acquire.<scope>; |
+ // |------------------------------------------------------|-------------------------------|
+ // | cuda::atomic_store | fence.sc.<scope>; |
+ // | (memory_order_seq_cst, cuda::thread_scope_<scope>) | st.release.<scope>; |
+ // |------------------------------------------------------|-------------------------------|
+ // | cuda::atomic_fetch_<op> | fence.sc.<scope>; |
+ // | (memory_order_seq_cst, cuda::thread_scope_<scope>) | atom.acq_rel.<scope>; |
+
// clang-format on
// [0]: volatile and atomics are only supported on global or shared
@@ -788,11 +815,10 @@ static unsigned int getCodeMemorySemantic(MemSDNode *N,
// - the "weak" memory instruction we are currently lowering to, and
// - some other instruction that preserves the side-effect, e.g.,
// a dead dummy volatile load.
-
if (CodeAddrSpace == NVPTX::PTXLdStInstCode::LOCAL ||
CodeAddrSpace == NVPTX::PTXLdStInstCode::CONSTANT ||
CodeAddrSpace == NVPTX::PTXLdStInstCode::PARAM) {
- return NVPTX::PTXLdStInstCode::NotAtomic;
+ return NVPTX::Ordering::NotAtomic;
}
// [2]: Atomics with Ordering different than Unordered or Relaxed are not
@@ -801,12 +827,11 @@ static unsigned int getCodeMemorySemantic(MemSDNode *N,
Ordering == AtomicOrdering::Unordered ||
Ordering == AtomicOrdering::Monotonic) &&
!HasMemoryOrdering) {
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
- OS << "PTX does not support \"atomic\" for orderings different than"
- "\"NotAtomic\" or \"Monotonic\" for sm_60 or older, but order is: \""
- << toIRString(Ordering) << "\".";
- report_fatal_error(OS.str());
+ report_fatal_error(
+ formatv("PTX does not support \"atomic\" for orderings different than"
+ "\"NotAtomic\" or \"Monotonic\" for sm_60 or older, but order "
+ "is: \"{}\".",
+ toIRString(Ordering)));
}
// [3]: TODO: these should eventually use .mmio<.atomic sem>; for now we drop
@@ -820,68 +845,76 @@ static unsigned int getCodeMemorySemantic(MemSDNode *N,
(CodeAddrSpace == NVPTX::PTXLdStInstCode::GENERIC ||
CodeAddrSpace == NVPTX::PTXLdStInstCode::GLOBAL ||
CodeAddrSpace == NVPTX::PTXLdStInstCode::SHARED);
+ if (!AddrGenericOrGlobalOrShared)
+ return NVPTX::Ordering::NotAtomic;
+
bool UseRelaxedMMIO =
HasRelaxedMMIO && CodeAddrSpace == NVPTX::PTXLdStInstCode::GLOBAL;
switch (Ordering) {
case AtomicOrdering::NotAtomic:
- return N->isVolatile() && AddrGenericOrGlobalOrShared
- ? NVPTX::PTXLdStInstCode::Volatile
- : NVPTX::PTXLdStInstCode::NotAtomic;
+ return N->isVolatile() ? NVPTX::Ordering::Volatile
+ : NVPTX::Ordering::NotAtomic;
case AtomicOrdering::Unordered:
// We lower unordered in the exact same way as 'monotonic' to respect
// LLVM IR atomicity requirements.
case AtomicOrdering::Monotonic:
if (N->isVolatile())
- return UseRelaxedMMIO ? NVPTX::PTXLdStInstCode::RelaxedMMIO
- : AddrGenericOrGlobalOrShared ? NVPTX::PTXLdStInstCode::Volatile
- : NVPTX::PTXLdStInstCode::NotAtomic;
+ return UseRelaxedMMIO ? NVPTX::Ordering::RelaxedMMIO
+ : NVPTX::Ordering::Volatile;
else
- return HasMemoryOrdering ? NVPTX::PTXLdStInstCode::Relaxed
- : AddrGenericOrGlobalOrShared ? NVPTX::PTXLdStInstCode::Volatile
- : NVPTX::PTXLdStInstCode::NotAtomic;
+ return HasMemoryOrdering ? NVPTX::Ordering::Relaxed
+ : NVPTX::Ordering::Volatile;
+ // case AtomicOrdering::Consume: // If LLVM ever provides this, lower it to
+ // Acquire.
case AtomicOrdering::Acquire:
- if (!N->readMem()) {
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
- OS << "PTX only supports Acquire Ordering on reads: "
- << N->getOperationName();
- N->print(OS);
- report_fatal_error(OS.str());
- }
- return AddrGenericOrGlobalOrShared ? NVPTX::PTXLdStInstCode::Acquire
- : NVPTX::PTXLdStInstCode::NotAtomic;
+ if (!N->readMem())
+ report_fatal_error(
+ formatv("PTX only supports Acquire Ordering on reads: {}",
+ N->getOperationName()));
+ return NVPTX::Ordering::Acquire;
case AtomicOrdering::Release:
- if (!N->writeMem()) {
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
- OS << "PTX only supports Release Ordering on writes: "
- << N->getOperationName();
- N->print(OS);
- report_fatal_error(OS.str());
- }
- return AddrGenericOrGlobalOrShared ? NVPTX::PTXLdStInstCode::Release
- : NVPTX::PTXLdStInstCode::NotAtomic;
+ if (!N->writeMem())
+ report_fatal_error(
+ formatv("PTX only supports Release Ordering on writes: {}",
+ N->getOperationName()));
+ return NVPTX::Ordering::Release;
case AtomicOrdering::AcquireRelease: {
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
- OS << "PTX only supports AcquireRelease Ordering on read-modify-write: "
- << N->getOperationName();
- N->print(OS);
- report_fatal_error(OS.str());
+ report_fatal_error(
+ formatv("NVPTX does not support AcquireRelease Ordering on "
+ "read-modify-write "
+ "yet and PTX does not support it on loads or stores: {}",
+ N->getOperationName()));
+ }
+ case AtomicOrdering::SequentiallyConsistent: {
+ // LLVM-IR SequentiallyConsistent atomics map to a two-instruction PTX
+ // sequence including a "fence.sc.sco" and the memory instruction with an
+ // Ordering that differs from "sc": acq, rel, or acq_rel, depending on
+ // whether the memory operation is a read, write, or read-modify-write.
+ //
+ // This sets the ordering of the fence to SequentiallyConsistent, and
+ // sets the corresponding ordering for the instruction.
+ NVPTX::Ordering InstrOrder;
+ if (N->readMem())
+ InstrOrder = NVPTX::Ordering::Acquire;
+ else if (N->writeMem())
+ InstrOrder = NVPTX::Ordering::Release;
+ else
+ report_fatal_error(
+ formatv("NVPTX does not support SequentiallyConsistent Ordering on "
+ "read-modify-writes yet: {}",
+ N->getOperationName()));
+ return OperationOrderings(InstrOrder,
+ NVPTX::Ordering::SequentiallyConsistent);
}
- case AtomicOrdering::SequentiallyConsistent:
- // TODO: support AcquireRelease and SequentiallyConsistent
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
- OS << "NVPTX backend does not support AtomicOrdering \""
- << toIRString(Ordering) << "\" yet.";
- report_fatal_error(OS.str());
}
-
- llvm_unreachable("unexpected unhandled case");
+ report_fatal_error(
+ formatv("NVPTX backend does not support AtomicOrdering \"{}\" yet.",
+ toIRString(Ordering)));
}
+} // namespace
+
static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
unsigned CodeAddrSpace, MachineFunction *F) {
// We use ldg (i.e. ld.global.nc) for invariant loads from the global address
@@ -924,6 +957,35 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
});
}
+NVPTX::Ordering NVPTXDAGToDAGISel::insertMemoryInstructionFence(SDLoc DL,
+ SDValue &Chain,
+ MemSDNode *N) {
+ // Some memory instructions - loads, stores, atomics - need an extra fence
+ // instruction. Get the memory order of the instruction, and that of its
+ // fence, if any.
+ auto [InstructionOrdering, FenceOrdering] =
+ getOperationOrderings(N, Subtarget);
+
+ // If a fence is required before the operation, insert it:
+ switch (NVPTX::Ordering(FenceOrdering)) {
+ case NVPTX::Ordering::NotAtomic:
+ break;
+ case NVPTX::Ordering::SequentiallyConsistent: {
+ unsigned Op = Subtarget->hasMemoryOrdering()
+ ? NVPTX::atomic_thread_fence_seq_cst_sys
+ : NVPTX::INT_MEMBAR_SYS;
+ Chain = SDValue(CurDAG->getMachineNode(Op, DL, MVT::Other, Chain), 0);
+ break;
+ }
+ default:
+ report_fatal_error(
+ formatv("Unexpected fence ordering: \"{}\".",
+ OrderingToCString(NVPTX::Ordering(FenceOrdering))));
+ }
+
+ return InstructionOrdering;
+}
+
bool NVPTXDAGToDAGISel::tryIntrinsicNoChain(SDNode *N) {
unsigned IID = N->getConstantOperandVal(0);
switch (IID) {
@@ -1070,17 +1132,15 @@ static int getLdStRegType(EVT VT) {
}
bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
- SDLoc dl(N);
MemSDNode *LD = cast<MemSDNode>(N);
assert(LD->readMem() && "Expected load");
- LoadSDNode *PlainLoad = dyn_cast<LoadSDNode>(N);
- EVT LoadedVT = LD->getMemoryVT();
- SDNode *NVPTXLD = nullptr;
// do not support pre/post inc/dec
+ LoadSDNode *PlainLoad = dyn_cast<LoadSDNode>(N);
if (PlainLoad && PlainLoad->isIndexed())
return false;
+ EVT LoadedVT = LD->getMemoryVT();
if (!LoadedVT.isSimple())
return false;
@@ -1089,13 +1149,13 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
if (canLowerToLDG(LD, *Subtarget, CodeAddrSpace, MF)) {
return tryLDGLDU(N);
}
-
- // Memory Semantic Setting
- unsigned int CodeMemorySem = getCodeMemorySemantic(LD, Subtarget);
-
unsigned int PointerSize =
CurDAG->getDataLayout().getPointerSizeInBits(LD->getAddressSpace());
+ SDLoc DL(N);
+ SDValue Chain = N->getOperand(0);
+ auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, LD);
+
// Type Setting: fromType + fromTypeWidth
//
// Sign : ISD::SEXTLOAD
@@ -1105,45 +1165,42 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
MVT SimpleVT = LoadedVT.getSimpleVT();
MVT ScalarVT = SimpleVT.getScalarType();
// Read at least 8 bits (predicates are stored as 8-bit values)
- unsigned fromTypeWidth = std::max(8U, (unsigned)ScalarVT.getSizeInBits());
- unsigned int fromType;
+ unsigned FromTypeWidth = std::max(8U, (unsigned)ScalarVT.getSizeInBits());
+ unsigned int FromType;
// Vector Setting
- unsigned vecType = NVPTX::PTXLdStInstCode::Scalar;
+ unsigned VecType = NVPTX::PTXLdStInstCode::Scalar;
if (SimpleVT.isVector()) {
assert((Isv2x16VT(LoadedVT) || LoadedVT == MVT::v4i8) &&
"Unexpected vector type");
// v2f16/v2bf16/v2i16 is loaded using ld.b32
- fromTypeWidth = 32;
+ FromTypeWidth = 32;
}
if (PlainLoad && (PlainLoad->getExtensionType() == ISD::SEXTLOAD))
- fromType = NVPTX::PTXLdStInstCode::Signed;
+ FromType = NVPTX::PTXLdStInstCode::Signed;
else
- fromType = getLdStRegType(ScalarVT);
+ FromType = getLdStRegType(ScalarVT);
// Create the machine instruction DAG
- SDValue Chain = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue Addr;
SDValue Offset, Base;
std::optional<unsigned> Opcode;
MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
+ SmallVector<SDValue, 12> Ops({getI32Imm(InstructionOrdering, DL),
+ getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(FromType, DL),
+ getI32Imm(FromTypeWidth, DL)});
+
if (SelectDirectAddr(N1, Addr)) {
Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_avar, NVPTX::LD_i16_avar,
NVPTX::LD_i32_avar, NVPTX::LD_i64_avar,
NVPTX::LD_f32_avar, NVPTX::LD_f64_avar);
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(fromType, dl),
- getI32Imm(fromTypeWidth, dl),
- Addr,
- Chain};
- NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
+ Ops.append({Addr, Chain});
} else if (PointerSize == 64 ? SelectADDRsi64(N1.getNode(), N1, Base, Offset)
: SelectADDRsi(N1.getNode(), N1, Base, Offset)) {
Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_i8_asi, NVPTX::LD_i16_asi,
@@ -1151,15 +1208,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
NVPTX::LD_f32_asi, NVPTX::LD_f64_asi);
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(fromType, dl),
- getI32Imm(fromTypeWidth, dl),
- Base,
- Offset,
- Chain};
- NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
+ Ops.append({Base, Offset, Chain});
} else if (PointerSize == 64 ? SelectADDRri64(N1.getNode(), N1, Base, Offset)
: SelectADDRri(N1.getNode(), N1, Base, Offset)) {
if (PointerSize == 64)
@@ -1173,15 +1222,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
NVPTX::LD_f32_ari, NVPTX::LD_f64_ari);
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(fromType, dl),
- getI32Imm(fromTypeWidth, dl),
- Base,
- Offset,
- Chain};
- NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
+ Ops.append({Base, Offset, Chain});
} else {
if (PointerSize == 64)
Opcode =
@@ -1194,16 +1235,11 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
NVPTX::LD_f32_areg, NVPTX::LD_f64_areg);
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(fromType, dl),
- getI32Imm(fromTypeWidth, dl),
- N1,
- Chain};
- NVPTXLD = CurDAG->getMachineNode(*Opcode, dl, TargetVT, MVT::Other, Ops);
+ Ops.append({N1, Chain});
}
+ SDNode *NVPTXLD =
+ CurDAG->getMachineNode(*Opcode, DL, TargetVT, MVT::Other, Ops);
if (!NVPTXLD)
return false;
@@ -1215,16 +1251,8 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
}
bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
-
- SDValue Chain = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
- SDValue Addr, Offset, Base;
- std::optional<unsigned> Opcode;
- SDLoc DL(N);
- SDNode *LD;
MemSDNode *MemSD = cast<MemSDNode>(N);
EVT LoadedVT = MemSD->getMemoryVT();
-
if (!LoadedVT.isSimple())
return false;
@@ -1233,12 +1261,12 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
if (canLowerToLDG(MemSD, *Subtarget, CodeAddrSpace, MF)) {
return tryLDGLDU(N);
}
-
unsigned int PointerSize =
CurDAG->getDataLayout().getPointerSizeInBits(MemSD->getAddressSpace());
- // Memory Semantic Setting
- unsigned int CodeMemorySem = getCodeMemorySemantic(MemSD, Subtarget);
+ SDLoc DL(N);
+ SDValue Chain = N->getOperand(0);
+ auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, MemSD);
// Vector Setting
MVT SimpleVT = LoadedVT.getSimpleVT();
@@ -1286,6 +1314,16 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
FromTypeWidth = 32;
}
+ SDValue Op1 = N->getOperand(1);
+ SDValue Addr, Offset, Base;
+ std::optional<unsigned> Opcode;
+ SDNode *LD;
+
+ SmallVector<SDValue, 12> Ops({getI32Imm(InstructionOrdering, DL),
+ getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(FromType, DL),
+ getI32Imm(FromTypeWidth, DL)});
+
if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
default:
@@ -1305,14 +1343,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
}
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, DL),
- getI32Imm(CodeAddrSpace, DL),
- getI32Imm(VecType, DL),
- getI32Imm(FromType, DL),
- getI32Imm(FromTypeWidth, DL),
- Addr,
- Chain};
- LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
+ Ops.append({Addr, Chain});
} else if (PointerSize == 64
? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset)
: SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
@@ -1334,15 +1365,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
}
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, DL),
- getI32Imm(CodeAddrSpace, DL),
- getI32Imm(VecType, DL),
- getI32Imm(FromType, DL),
- getI32Imm(FromTypeWidth, DL),
- Base,
- Offset,
- Chain};
- LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
+ Ops.append({Base, Offset, Chain});
} else if (PointerSize == 64
? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
: SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
@@ -1384,16 +1407,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
}
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, DL),
- getI32Imm(CodeAddrSpace, DL),
- getI32Imm(VecType, DL),
- getI32Imm(FromType, DL),
- getI32Imm(FromTypeWidth, DL),
- Base,
- Offset,
- Chain};
-
- LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
+ Ops.append({Base, Offset, Chain});
} else {
if (PointerSize == 64) {
switch (N->getOpcode()) {
@@ -1434,15 +1448,9 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
}
if (!Opcode)
return false;
- SDValue Ops[] = {getI32Imm(CodeMemorySem, DL),
- getI32Imm(CodeAddrSpace, DL),
- getI32Imm(VecType, DL),
- getI32Imm(FromType, DL),
- getI32Imm(FromTypeWidth, DL),
- Op1,
- Chain};
- LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
+ Ops.append({Op1, Chain});
}
+ LD = CurDAG->getMachineNode(*Opcode, DL, N->getVTList(), Ops);
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
CurDAG->setNodeMemRefs(cast<MachineSDNode>(LD), {MemRef});
@@ -1452,8 +1460,6 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
}
bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
-
- SDValue Chain = N->getOperand(0);
SDValue Op1;
MemSDNode *Mem;
bool IsLDG = true;
@@ -1483,12 +1489,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
Mem = cast<MemSDNode>(N);
}
- std::optional<unsigned> Opcode;
- SDLoc DL(N);
- SDNode *LD;
- SDValue Base, Offset, Addr;
EVT OrigType = N->getValueType(0);
-
EVT EltVT = Mem->getMemoryVT();
unsigned NumElts = 1;
if (EltVT.isVector()) {
@@ -1517,6 +1518,12 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
}
InstVTs.push_back(MVT::Other);
SDVTList InstVTList = CurDAG->getVTList(InstVTs);
+ SDValue Chain = N->getOperand(0);
+
+ std::optional<unsigned> Opcode;
+ SDLoc DL(N);
+ SDNode *LD;
+ SDValue Base, Offset, Addr;
if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
@@ -1867,19 +1874,17 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
}
bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
- SDLoc dl(N);
MemSDNode *ST = cast<MemSDNode>(N);
assert(ST->writeMem() && "Expected store");
StoreSDNode *PlainStore = dyn_cast<StoreSDNode>(N);
AtomicSDNode *AtomicStore = dyn_cast<AtomicSDNode>(N);
assert((PlainStore || AtomicStore) && "Expected store");
- EVT StoreVT = ST->getMemoryVT();
- SDNode *NVPTXST = nullptr;
// do not support pre/post inc/dec
if (PlainStore && PlainStore->isIndexed())
return false;
+ EVT StoreVT = ST->getMemoryVT();
if (!StoreVT.isSimple())
return false;
@@ -1888,29 +1893,28 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
unsigned int PointerSize =
CurDAG->getDataLayout().getPointerSizeInBits(ST->getAddressSpace());
- // Memory Semantic Setting
- unsigned int CodeMemorySem = getCodeMemorySemantic(ST, Subtarget);
+ SDLoc DL(N);
+ SDValue Chain = ST->getChain();
+ auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, ST);
// Vector Setting
MVT SimpleVT = StoreVT.getSimpleVT();
- unsigned vecType = NVPTX::PTXLdStInstCode::Scalar;
+ unsigned VecType = NVPTX::PTXLdStInstCode::Scalar;
// Type Setting: toType + toTypeWidth
// - for integer type, always use 'u'
- //
MVT ScalarVT = SimpleVT.getScalarType();
- unsigned toTypeWidth = ScalarVT.getSizeInBits();
+ unsigned ToTypeWidth = ScalarVT.getSizeInBits();
if (SimpleVT.isVector()) {
assert((Isv2x16VT(StoreVT) || StoreVT == MVT::v4i8) &&
"Unexpected vector type");
// v2x16 is stored using st.b32
- toTypeWidth = 32;
+ ToTypeWidth = 32;
}
- unsigned int toType = getLdStRegType(ScalarVT);
+ unsigned int ToType = getLdStRegType(ScalarVT);
// Create the machine instruction DAG
- SDValue Chain = ST->getChain();
SDValue Value = PlainStore ? PlainStore->getValue() : AtomicStore->getVal();
SDValue BasePtr = ST->getBasePtr();
SDValue Addr;
@@ -1919,21 +1923,18 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
MVT::SimpleValueType SourceVT =
Value.getNode()->getSimpleValueType(0).SimpleTy;
+ SmallVector<SDValue, 12> Ops({Value, getI32Imm(InstructionOrdering, DL),
+ getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(ToType, DL),
+ getI32Imm(ToTypeWidth, DL)});
+
if (SelectDirectAddr(BasePtr, Addr)) {
Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_avar, NVPTX::ST_i16_avar,
NVPTX::ST_i32_avar, NVPTX::ST_i64_avar,
NVPTX::ST_f32_avar, NVPTX::ST_f64_avar);
if (!Opcode)
return false;
- SDValue Ops[] = {Value,
- getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(toType, dl),
- getI32Imm(toTypeWidth, dl),
- Addr,
- Chain};
- NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
+ Ops.append({Addr, Chain});
} else if (PointerSize == 64
? SelectADDRsi64(BasePtr.getNode(), BasePtr, Base, Offset)
: SelectADDRsi(BasePtr.getNode(), BasePtr, Base, Offset)) {
@@ -1942,16 +1943,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
NVPTX::ST_f32_asi, NVPTX::ST_f64_asi);
if (!Opcode)
return false;
- SDValue Ops[] = {Value,
- getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(toType, dl),
- getI32Imm(toTypeWidth, dl),
- Base,
- Offset,
- Chain};
- NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
+ Ops.append({Base, Offset, Chain});
} else if (PointerSize == 64
? SelectADDRri64(BasePtr.getNode(), BasePtr, Base, Offset)
: SelectADDRri(BasePtr.getNode(), BasePtr, Base, Offset)) {
@@ -1966,17 +1958,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
NVPTX::ST_f32_ari, NVPTX::ST_f64_ari);
if (!Opcode)
return false;
-
- SDValue Ops[] = {Value,
- getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(toType, dl),
- getI32Imm(toTypeWidth, dl),
- Base,
- Offset,
- Chain};
- NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
+ Ops.append({Base, Offset, Chain});
} else {
if (PointerSize == 64)
Opcode =
@@ -1989,17 +1971,12 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
NVPTX::ST_f32_areg, NVPTX::ST_f64_areg);
if (!Opcode)
return false;
- SDValue Ops[] = {Value,
- getI32Imm(CodeMemorySem, dl),
- getI32Imm(CodeAddrSpace, dl),
- getI32Imm(vecType, dl),
- getI32Imm(toType, dl),
- getI32Imm(toTypeWidth, dl),
- BasePtr,
- Chain};
- NVPTXST = CurDAG->getMachineNode(*Opcode, dl, MVT::Other, Ops);
+ Ops.append({BasePtr, Chain});
}
+ SDNode *NVPTXST = NVPTXST =
+ CurDAG->getMachineNode(*Opcode, DL, MVT::Other, Ops);
+
if (!NVPTXST)
return false;
@@ -2010,11 +1987,9 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
}
bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
- SDValue Chain = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue Addr, Offset, Base;
std::optional<unsigned> Opcode;
- SDLoc DL(N);
SDNode *ST;
EVT EltVT = Op1.getValueType();
MemSDNode *MemSD = cast<MemSDNode>(N);
@@ -2029,8 +2004,9 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
unsigned int PointerSize =
CurDAG->getDataLayout().getPointerSizeInBits(MemSD->getAddressSpace());
- // Memory Semantic Setting
- unsigned int CodeMemorySem = getCodeMemorySemantic(MemSD, Subtarget);
+ SDLoc DL(N);
+ SDValue Chain = N->getOperand(0);
+ auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, MemSD);
// Type Setting: toType + toTypeWidth
// - for integer type, always use 'u'
@@ -2039,23 +2015,20 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
unsigned ToTypeWidth = ScalarVT.getSizeInBits();
unsigned ToType = getLdStRegType(ScalarVT);
- SmallVector<SDValue, 12> StOps;
+ SmallVector<SDValue, 12> Ops;
SDValue N2;
unsigned VecType;
switch (N->getOpcode()) {
case NVPTXISD::StoreV2:
VecType = NVPTX::PTXLdStInstCode::V2;
- StOps.push_back(N->getOperand(1));
- StOps.push_back(N->getOperand(2));
+ Ops.append({N->getOperand(1), N->getOperand(2)});
N2 = N->getOperand(3);
break;
case NVPTXISD::StoreV4:
VecType = NVPTX::PTXLdStInstCode::V4;
- StOps.push_back(N->getOperand(1));
- StOps.push_back(N->getOperand(2));
- StOps.push_back(N->getOperand(3));
- StOps.push_back(N->getOperand(4));
+ Ops.append({N->getOperand(1), N->getOperand(2), N->getOperand(3),
+ N->getOperand(4)});
N2 = N->getOperand(5);
break;
default:
@@ -2072,11 +2045,9 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
ToTypeWidth = 32;
}
- StOps.push_back(getI32Imm(CodeMemorySem, DL));
- StOps.push_back(getI32Imm(CodeAddrSpace, DL));
- StOps.push_back(getI32Imm(VecType, DL));
- StOps.push_back(getI32Imm(ToType, DL));
- StOps.push_back(getI32Imm(ToTypeWidth, DL));
+ Ops.append({getI32Imm(InstructionOrdering, DL), getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(ToType, DL),
+ getI32Imm(ToTypeWidth, DL)});
if (SelectDirectAddr(N2, Addr)) {
switch (N->getOpcode()) {
@@ -2095,7 +2066,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
NVPTX::STV_f32_v4_avar, std::nullopt);
break;
}
- StOps.push_back(Addr);
+ Ops.push_back(Addr);
} else if (PointerSize == 64 ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
: SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
switch (N->getOpcode()) {
@@ -2114,8 +2085,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
std::nullopt, NVPTX::STV_f32_v4_asi, std::nullopt);
break;
}
- StOps.push_back(Base);
- StOps.push_back(Offset);
+ Ops.append({Base, Offset});
} else if (PointerSize == 64 ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
: SelectADDRri(N2.getNode(), N2, Base, Offset)) {
if (PointerSize == 64) {
@@ -2154,8 +2124,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
break;
}
}
- StOps.push_back(Base);
- StOps.push_back(Offset);
+ Ops.append({Base, Offset});
} else {
if (PointerSize == 64) {
switch (N->getOpcode()) {
@@ -2194,15 +2163,15 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
break;
}
}
- StOps.push_back(N2);
+ Ops.push_back(N2);
}
if (!Opcode)
return false;
- StOps.push_back(Chain);
+ Ops.push_back(Chain);
- ST = CurDAG->getMachineNode(*Opcode, DL, MVT::Other, StOps);
+ ST = CurDAG->getMachineNode(*Opcode, DL, MVT::Other, Ops);
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
CurDAG->setNodeMemRefs(cast<MachineSDNode>(ST), {MemRef});
@@ -2276,10 +2245,8 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
unsigned OffsetVal = Offset->getAsZExtVal();
- SmallVector<SDValue, 2> Ops;
- Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
- Ops.push_back(Chain);
- Ops.push_back(Glue);
+ SmallVector<SDValue, 2> Ops(
+ {CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32), Chain, Glue});
ReplaceNode(Node, CurDAG->getMachineNode(*Opcode, DL, VTs, Ops));
return true;
@@ -2312,8 +2279,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SmallVector<SDValue, 6> Ops;
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(N->getOperand(i + 2));
- Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
- Ops.push_back(Chain);
+ Ops.append({CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32), Chain});
// Determine target opcode
// If we have an i1, use an 8-bit store. The lowering code in
@@ -2493,10 +2459,8 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(N->getOperand(i + 3));
- Ops.push_back(CurDAG->getTargetConstant(ParamVal, DL, MVT::i32));
- Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
- Ops.push_back(Chain);
- Ops.push_back(Glue);
+ Ops.append({CurDAG->getTargetConstant(ParamVal, DL, MVT::i32),
+ CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32), Chain, Glue});
// Determine target opcode
// If we have an i1, use an 8-bit store. The lowering code in
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index 49626d4..eac4056 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -99,6 +99,9 @@ private:
bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const;
static unsigned GetConvertOpcode(MVT DestTy, MVT SrcTy, LoadSDNode *N);
+
+ NVPTX::Ordering insertMemoryInstructionFence(SDLoc DL, SDValue &Chain,
+ MemSDNode *N);
};
class NVPTXDAGToDAGISelLegacy : public SelectionDAGISelLegacy {
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index a5bdc6f..6a096fa 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -3926,7 +3926,6 @@ def : Pat<(atomic_fence (i64 6), (i64 1)), (atomic_thread_fence_acq_rel_sys)>, /
def : Pat<(atomic_fence (i64 7), (i64 1)), (atomic_thread_fence_seq_cst_sys)>, // seq_cst(7) sys(1)
Requires<[hasPTX<60>, hasSM<70>]>;
-
// If PTX<60 or SM<70, we fall back to MEMBAR:
def : Pat<(atomic_fence (i64 4), (i64 1)), (INT_MEMBAR_SYS)>; // acquire(4) sys(1)
def : Pat<(atomic_fence (i64 5), (i64 1)), (INT_MEMBAR_SYS)>; // release(5) sys(1)
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index c81dfa6..887951b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -335,6 +335,48 @@ def INT_FENCE_SC_CLUSTER:
MEMBAR<"fence.sc.cluster;", int_nvvm_fence_sc_cluster>,
Requires<[hasPTX<78>, hasSM<90>]>;
+// Proxy fence (uni-directional)
+// fence.proxy.tensormap.release variants
+
+class FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<string Scope, Intrinsic Intr> :
+ NVPTXInst<(outs), (ins),
+ "fence.proxy.tensormap::generic.release." # Scope # ";", [(Intr)]>,
+ Requires<[hasPTX<83>, hasSM<90>]>;
+
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_CTA:
+ FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<"cta",
+ int_nvvm_fence_proxy_tensormap_generic_release_cta>;
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_CLUSTER:
+ FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<"cluster",
+ int_nvvm_fence_proxy_tensormap_generic_release_cluster>;
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_GPU:
+ FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<"gpu",
+ int_nvvm_fence_proxy_tensormap_generic_release_gpu>;
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_SYS:
+ FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<"sys",
+ int_nvvm_fence_proxy_tensormap_generic_release_sys>;
+
+// fence.proxy.tensormap.acquire variants
+
+class FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE<string Scope, Intrinsic Intr> :
+ NVPTXInst<(outs), (ins Int64Regs:$addr),
+ "fence.proxy.tensormap::generic.acquire." # Scope # " [$addr], 128;",
+ [(Intr Int64Regs:$addr, (i32 128))]>,
+ Requires<[hasPTX<83>, hasSM<90>]>;
+
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE_CTA :
+ FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE<"cta",
+ int_nvvm_fence_proxy_tensormap_generic_acquire_cta>;
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE_CLUSTER :
+ FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE<"cluster",
+ int_nvvm_fence_proxy_tensormap_generic_acquire_cluster>;
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE_GPU :
+ FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE<"gpu",
+ int_nvvm_fence_proxy_tensormap_generic_acquire_gpu>;
+def INT_FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE_SYS :
+ FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE<"sys",
+ int_nvvm_fence_proxy_tensormap_generic_acquire_sys>;
+
//-----------------------------------
// Async Copy Functions
//-----------------------------------
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index c15ff6c..eebd91f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -13,6 +13,7 @@
#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXUTILITIES_H
#define LLVM_LIB_TARGET_NVPTX_NVPTXUTILITIES_H
+#include "NVPTX.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
@@ -82,6 +83,36 @@ inline unsigned promoteScalarArgumentSize(unsigned size) {
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM);
bool Isv2x16VT(EVT VT);
+
+namespace NVPTX {
+
+inline std::string OrderingToCString(Ordering Order) {
+ switch (Order) {
+ case Ordering::NotAtomic:
+ return "NotAtomic";
+ case Ordering::Relaxed:
+ return "Relaxed";
+ case Ordering::Acquire:
+ return "Acquire";
+ case Ordering::Release:
+ return "Release";
+ // case Ordering::AcquireRelease: return "AcquireRelease";
+ case Ordering::SequentiallyConsistent:
+ return "SequentiallyConsistent";
+ case Ordering::Volatile:
+ return "Volatile";
+ case Ordering::RelaxedMMIO:
+ return "RelaxedMMIO";
+ }
+ report_fatal_error("unknown ordering");
+}
+
+inline raw_ostream &operator<<(raw_ostream &O, Ordering Order) {
+ O << OrderingToCString(Order);
+ return O;
}
+} // namespace NVPTX
+} // namespace llvm
+
#endif
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 1963582..a57ed33 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1007,7 +1007,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
// R0 cannot be used as a base register, but it can be used as an
// index in a store-indexed.
int LastOffset = 0;
- if (HasFP) {
+ if (HasFP) {
// R0 += (FPOffset-LastOffset).
// Need addic, since addi treats R0 as 0.
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
@@ -2025,8 +2025,18 @@ void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF,
// code. Same goes for the base pointer and the PIC base register.
if (needsFP(MF))
SavedRegs.reset(isPPC64 ? PPC::X31 : PPC::R31);
- if (RegInfo->hasBasePointer(MF))
+ if (RegInfo->hasBasePointer(MF)) {
SavedRegs.reset(RegInfo->getBaseRegister(MF));
+ // On AIX, when BaseRegister(R30) is used, need to spill r31 too to match
+ // AIX trackback table requirement.
+ if (!needsFP(MF) && !SavedRegs.test(isPPC64 ? PPC::X31 : PPC::R31) &&
+ Subtarget.isAIXABI()) {
+ assert(
+ (RegInfo->getBaseRegister(MF) == (isPPC64 ? PPC::X30 : PPC::R30)) &&
+ "Invalid base register on AIX!");
+ SavedRegs.set(isPPC64 ? PPC::X31 : PPC::R31);
+ }
+ }
if (FI->usesPICBase())
SavedRegs.reset(PPC::R30);
diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 8f5afba..0177034 100644
--- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -1014,12 +1014,14 @@ def POPCNTB8 : XForm_11<31, 122, (outs g8rc:$RA), (ins g8rc:$RST),
[(set i64:$RA, (int_ppc_popcntb i64:$RST))]>;
def CDTBCD8 : XForm_11<31, 282, (outs g8rc:$RA), (ins g8rc:$RST),
- "cdtbcd $RA, $RST", IIC_IntGeneral, []>;
+ "cdtbcd $RA, $RST", IIC_IntGeneral,
+ [(set i64:$RA, (int_ppc_cdtbcdd i64:$RST))]>;
def CBCDTD8 : XForm_11<31, 314, (outs g8rc:$RA), (ins g8rc:$RST),
- "cbcdtd $RA, $RST", IIC_IntGeneral, []>;
-
+ "cbcdtd $RA, $RST", IIC_IntGeneral,
+ [(set i64:$RA, (int_ppc_cbcdtdd i64:$RST))]>;
def ADDG6S8 : XOForm_1<31, 74, 0, (outs g8rc:$RT), (ins g8rc:$RA, g8rc:$RB),
- "addg6s $RT, $RA, $RB", IIC_IntGeneral, []>;
+ "addg6s $RT, $RA, $RB", IIC_IntGeneral,
+ [(set i64:$RT, (int_ppc_addg6sd i64:$RA, i64:$RB))]>;
}
defm DIVD : XOForm_1rcr<31, 489, 0, (outs g8rc:$RT), (ins g8rc:$RA, g8rc:$RB),
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 1686249..411ea77 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -1931,12 +1931,14 @@ def POPCNTB : XForm_11<31, 122, (outs gprc:$RA), (ins gprc:$RST),
[(set i32:$RA, (int_ppc_popcntb i32:$RST))]>;
def CDTBCD : XForm_11<31, 282, (outs gprc:$RA), (ins gprc:$RST),
- "cdtbcd $RA, $RST", IIC_IntGeneral, []>;
+ "cdtbcd $RA, $RST", IIC_IntGeneral,
+ [(set i32:$RA, (int_ppc_cdtbcd i32:$RST))]>;
def CBCDTD : XForm_11<31, 314, (outs gprc:$RA), (ins gprc:$RST),
- "cbcdtd $RA, $RST", IIC_IntGeneral, []>;
-
+ "cbcdtd $RA, $RST", IIC_IntGeneral,
+ [(set i32:$RA, (int_ppc_cbcdtd i32:$RST))]>;
def ADDG6S : XOForm_1<31, 74, 0, (outs gprc:$RT), (ins gprc:$RA, gprc:$RB),
- "addg6s $RT, $RA, $RB", IIC_IntGeneral, []>;
+ "addg6s $RT, $RA, $RB", IIC_IntGeneral,
+ [(set i32:$RT, (int_ppc_addg6s i32:$RA, i32:$RB))]>;
//===----------------------------------------------------------------------===//
// PPC32 Load Instructions.
diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index f28a709..5146e51 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -36,6 +36,7 @@ add_llvm_target(RISCVCodeGen
RISCVExpandPseudoInsts.cpp
RISCVFrameLowering.cpp
RISCVGatherScatterLowering.cpp
+ RISCVIndirectBranchTracking.cpp
RISCVInsertVSETVLI.cpp
RISCVInsertReadWriteCSR.cpp
RISCVInsertWriteVXRM.cpp
diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index 0d2473c..80cb395 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -31,6 +31,9 @@ void initializeRISCVCodeGenPreparePass(PassRegistry &);
FunctionPass *createRISCVDeadRegisterDefinitionsPass();
void initializeRISCVDeadRegisterDefinitionsPass(PassRegistry &);
+FunctionPass *createRISCVIndirectBranchTrackingPass();
+void initializeRISCVIndirectBranchTrackingPass(PassRegistry &);
+
FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM,
CodeGenOptLevel OptLevel);
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 43c04d4..604234b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1461,7 +1461,12 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
SDValue X = N0.getOperand(0);
- if (isMask_64(C1)) {
+ // Prefer SRAIW + ANDI when possible.
+ bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
+ X.getOpcode() == ISD::SHL &&
+ isa<ConstantSDNode>(X.getOperand(1)) &&
+ X.getConstantOperandVal(1) == 32;
+ if (isMask_64(C1) && !Skip) {
unsigned Leading = XLen - llvm::bit_width(C1);
if (C2 > Leading) {
SDNode *SRAI = CurDAG->getMachineNode(
diff --git a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
new file mode 100644
index 0000000..1b484d4
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
@@ -0,0 +1,102 @@
+//===------ RISCVIndirectBranchTracking.cpp - Enables lpad mechanism ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The pass adds LPAD (AUIPC with rs1 = X0) machine instructions at the
+// beginning of each basic block or function that is referenced by an indrect
+// jump/call instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+
+using namespace llvm;
+
+static cl::opt<uint32_t> PreferredLandingPadLabel(
+ "riscv-landing-pad-label", cl::ReallyHidden,
+ cl::desc("Use preferred fixed label for all labels"));
+
+namespace {
+class RISCVIndirectBranchTrackingPass : public MachineFunctionPass {
+public:
+ RISCVIndirectBranchTrackingPass() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "RISC-V Indirect Branch Tracking";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ static char ID;
+ const Align LpadAlign = Align(4);
+};
+
+} // end anonymous namespace
+
+char RISCVIndirectBranchTrackingPass::ID = 0;
+
+FunctionPass *llvm::createRISCVIndirectBranchTrackingPass() {
+ return new RISCVIndirectBranchTrackingPass();
+}
+
+static void emitLpad(MachineBasicBlock &MBB, const RISCVInstrInfo *TII,
+ uint32_t Label) {
+ auto I = MBB.begin();
+ BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(RISCV::AUIPC), RISCV::X0)
+ .addImm(Label);
+}
+
+bool RISCVIndirectBranchTrackingPass::runOnMachineFunction(
+ MachineFunction &MF) {
+ const auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+ const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
+ if (!Subtarget.hasStdExtZicfilp())
+ return false;
+
+ uint32_t FixedLabel = 0;
+ if (PreferredLandingPadLabel.getNumOccurrences() > 0) {
+ if (!isUInt<20>(PreferredLandingPadLabel))
+ report_fatal_error("riscv-landing-pad-label=<val>, <val> needs to fit in "
+ "unsigned 20-bits");
+ FixedLabel = PreferredLandingPadLabel;
+ }
+
+ bool Changed = false;
+ for (MachineBasicBlock &MBB : MF) {
+ if (&MBB == &MF.front()) {
+ Function &F = MF.getFunction();
+ // When trap is taken, landing pad is not needed.
+ if (F.hasFnAttribute("interrupt"))
+ continue;
+
+ if (F.hasAddressTaken() || !F.hasLocalLinkage()) {
+ emitLpad(MBB, TII, FixedLabel);
+ if (MF.getAlignment() < LpadAlign)
+ MF.setAlignment(LpadAlign);
+ Changed = true;
+ }
+ continue;
+ }
+
+ if (MBB.hasAddressTaken()) {
+ emitLpad(MBB, TII, FixedLabel);
+ if (MBB.getAlignment() < LpadAlign)
+ MBB.setAlignment(LpadAlign);
+ Changed = true;
+ }
+ }
+
+ return Changed;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index b5dbc05..86b30e8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -371,6 +371,9 @@ defset list<VTypeInfo> AllVectors = {
}
}
+defvar AllFloatVectorsExceptFP16 = !filter(vti, AllFloatVectors, !ne(vti.Scalar, f16));
+defvar AllFP16Vectors = !filter(vti, AllFloatVectors, !eq(vti.Scalar, f16));
+
// This functor is used to obtain the int vector type that has the same SEW and
// multiplier as the input parameter type
class GetIntVTypeInfo<VTypeInfo vti> {
@@ -7245,6 +7248,14 @@ foreach vti = AllFloatVectors in {
vti.RegClass, vti.ScalarRegClass>;
}
+foreach vti = AllBFloatVectors in
+ let Predicates = [HasVInstructionsBF16Minimal] in
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
+ vti.Vector,
+ vti.Vector, vti.Vector, vti.Mask,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, vti.RegClass>;
+
foreach fvti = AllFloatVectors in {
defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
let Predicates = GetVTypePredicates<fvti>.Predicates in
@@ -7445,14 +7456,22 @@ defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
eew=16, vtilist=AllIntegerVectors>;
defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
- AllFloatVectors, uimm5>;
+ AllFloatVectorsExceptFP16, uimm5>;
+let Predicates = [HasVInstructionsF16Minimal] in
+ defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+ AllFP16Vectors, uimm5>;
+defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+ AllBFloatVectors, uimm5>;
defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
eew=16, vtilist=AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 16.5. Vector Compress Instruction
//===----------------------------------------------------------------------===//
defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
-defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
+defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectorsExceptFP16>;
+let Predicates = [HasVInstructionsF16Minimal] in
+ defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFP16Vectors>;
+defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBFloatVectors>;
// Include the non-intrinsic ISel patterns
include "RISCVInstrInfoVVLPatterns.td"
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index 83d27b3..cc40d6a 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -239,6 +239,12 @@ def SIFIVE_X280 : RISCVProcessorModel<"sifive-x280", SiFive7Model,
FeatureStdExtZbb],
SiFiveX280TuneFeatures>;
+defvar SiFiveP400TuneFeatures = [TuneNoDefaultUnroll,
+ TuneConditionalCompressedMoveFusion,
+ TuneLUIADDIFusion,
+ TuneAUIPCADDIFusion,
+ FeaturePostRAScheduler];
+
def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
[Feature64Bit,
FeatureStdExtI,
@@ -266,11 +272,26 @@ def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
FeatureStdExtZfhmin,
FeatureUnalignedScalarMem,
FeatureUnalignedVectorMem],
- [TuneNoDefaultUnroll,
- TuneConditionalCompressedMoveFusion,
- TuneLUIADDIFusion,
- TuneAUIPCADDIFusion,
- FeaturePostRAScheduler]>;
+ SiFiveP400TuneFeatures>;
+
+def SIFIVE_P470 : RISCVProcessorModel<"sifive-p470", SiFiveP400Model,
+ !listconcat(RVA22U64Features,
+ [FeatureStdExtV,
+ FeatureStdExtZifencei,
+ FeatureStdExtZihintntl,
+ FeatureStdExtZvl128b,
+ FeatureStdExtZvbb,
+ FeatureStdExtZvknc,
+ FeatureStdExtZvkng,
+ FeatureStdExtZvksc,
+ FeatureStdExtZvksg,
+ FeatureVendorXSiFivecdiscarddlone,
+ FeatureVendorXSiFivecflushdlone,
+ FeatureUnalignedScalarMem,
+ FeatureUnalignedVectorMem]),
+ !listconcat(SiFiveP400TuneFeatures,
+ [TuneNoSinkSplatOperands])>;
+
def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
[Feature64Bit,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 21fbf47..8b3770a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -520,6 +520,7 @@ void RISCVPassConfig::addPreEmitPass2() {
// ensuring return instruction is detected correctly.
addPass(createRISCVPushPopOptimizationPass());
}
+ addPass(createRISCVIndirectBranchTrackingPass());
addPass(createRISCVExpandPseudoPass());
// Schedule the expansion of AMOs at the last possible moment, avoiding the
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
index 53bac88..04eada1 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
@@ -186,10 +186,11 @@ struct Entry {
/// Explore them.
static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
const WebAssemblyExceptionInfo &WEI,
- const MachineDominatorTree &MDT) {
+ MachineDominatorTree &MDT) {
// Remember original layout ordering, so we can update terminators after
// reordering to point to the original layout successor.
MF.RenumberBlocks();
+ MDT.updateBlockNumbers();
// Prepare for a topological sort: Record the number of predecessors each
// block has, ignoring loop backedges.
@@ -330,6 +331,7 @@ static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
}
assert(Entries.empty() && "Active sort region list not finished");
MF.RenumberBlocks();
+ MDT.updateBlockNumbers();
#ifndef NDEBUG
SmallSetVector<const SortRegion *, 8> OnStack;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
index 70b91c2..c7001ef 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -45,6 +45,8 @@ STATISTIC(NumCatchUnwindMismatches, "Number of catch unwind mismatches found");
namespace {
class WebAssemblyCFGStackify final : public MachineFunctionPass {
+ MachineDominatorTree *MDT;
+
StringRef getPassName() const override { return "WebAssembly CFG Stackify"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
@@ -252,7 +254,6 @@ void WebAssemblyCFGStackify::unregisterScope(MachineInstr *Begin) {
void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) {
assert(!MBB.isEHPad());
MachineFunction &MF = *MBB.getParent();
- auto &MDT = getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
const auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
@@ -264,7 +265,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) {
int MBBNumber = MBB.getNumber();
for (MachineBasicBlock *Pred : MBB.predecessors()) {
if (Pred->getNumber() < MBBNumber) {
- Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
+ Header = Header ? MDT->findNearestCommonDominator(Header, Pred) : Pred;
if (explicitlyBranchesTo(Pred, &MBB))
IsBranchedTo = true;
}
@@ -1439,6 +1440,7 @@ void WebAssemblyCFGStackify::recalculateScopeTops(MachineFunction &MF) {
// Renumber BBs and recalculate ScopeTop info because new BBs might have been
// created and inserted during fixing unwind mismatches.
MF.RenumberBlocks();
+ MDT->updateBlockNumbers();
ScopeTops.clear();
ScopeTops.resize(MF.getNumBlockIDs());
for (auto &MBB : reverse(MF)) {
@@ -1741,6 +1743,7 @@ bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
"********** Function: "
<< MF.getName() << '\n');
const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo();
+ MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
releaseMemory();
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 6289b3a..831944c 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -41,13 +41,6 @@ static cl::opt<bool> DisableX86DomainReassignment(
namespace {
enum RegDomain { NoDomain = -1, GPRDomain, MaskDomain, OtherDomain, NumDomains };
-static bool isGPR(const TargetRegisterClass *RC) {
- return X86::GR64RegClass.hasSubClassEq(RC) ||
- X86::GR32RegClass.hasSubClassEq(RC) ||
- X86::GR16RegClass.hasSubClassEq(RC) ||
- X86::GR8RegClass.hasSubClassEq(RC);
-}
-
static bool isMask(const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) {
return X86::VK16RegClass.hasSubClassEq(RC);
@@ -55,7 +48,7 @@ static bool isMask(const TargetRegisterClass *RC,
static RegDomain getDomain(const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) {
- if (isGPR(RC))
+ if (TRI->isGeneralPurposeRegisterClass(RC))
return GPRDomain;
if (isMask(RC, TRI))
return MaskDomain;
@@ -797,7 +790,8 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
continue;
// GPR only current source domain supported.
- if (!isGPR(MRI->getRegClass(Reg)))
+ if (!MRI->getTargetRegisterInfo()->isGeneralPurposeRegisterClass(
+ MRI->getRegClass(Reg)))
continue;
// Register already in closure.
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index bdc9a0d..77dac119 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -4227,3 +4227,323 @@ void X86FrameLowering::restoreWinEHStackPointersInParent(
/*RestoreSP=*/IsSEH);
}
}
+
+// Compute the alignment gap between current SP after spilling FP/BP and the
+// next properly aligned stack offset.
+static int computeFPBPAlignmentGap(MachineFunction &MF,
+ const TargetRegisterClass *RC,
+ unsigned NumSpilledRegs) {
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ unsigned AllocSize = TRI->getSpillSize(*RC) * NumSpilledRegs;
+ Align StackAlign = MF.getSubtarget().getFrameLowering()->getStackAlign();
+ unsigned AlignedSize = alignTo(AllocSize, StackAlign);
+ return AlignedSize - AllocSize;
+}
+
+void X86FrameLowering::spillFPBPUsingSP(MachineFunction &MF,
+ MachineBasicBlock::iterator BeforeMI,
+ Register FP, Register BP,
+ int SPAdjust) const {
+ assert(FP.isValid() || BP.isValid());
+
+ MachineBasicBlock *MBB = BeforeMI->getParent();
+ DebugLoc DL = BeforeMI->getDebugLoc();
+
+ // Spill FP.
+ if (FP.isValid()) {
+ BuildMI(*MBB, BeforeMI, DL,
+ TII.get(getPUSHOpcode(MF.getSubtarget<X86Subtarget>())))
+ .addReg(FP);
+ }
+
+ // Spill BP.
+ if (BP.isValid()) {
+ BuildMI(*MBB, BeforeMI, DL,
+ TII.get(getPUSHOpcode(MF.getSubtarget<X86Subtarget>())))
+ .addReg(BP);
+ }
+
+ // Make sure SP is aligned.
+ if (SPAdjust)
+ emitSPUpdate(*MBB, BeforeMI, DL, -SPAdjust, false);
+
+ // Emit unwinding information.
+ if (FP.isValid() && needsDwarfCFI(MF)) {
+ // Emit .cfi_remember_state to remember old frame.
+ unsigned CFIIndex =
+ MF.addFrameInst(MCCFIInstruction::createRememberState(nullptr));
+ BuildMI(*MBB, BeforeMI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Setup new CFA value with DW_CFA_def_cfa_expression:
+ // DW_OP_breg7+offset, DW_OP_deref, DW_OP_consts 16, DW_OP_plus
+ SmallString<64> CfaExpr;
+ uint8_t buffer[16];
+ int Offset = SPAdjust;
+ if (BP.isValid())
+ Offset += TRI->getSpillSize(*TRI->getMinimalPhysRegClass(BP));
+ // If BeforeMI is a frame setup instruction, we need to adjust the position
+ // and offset of the new cfi instruction.
+ if (TII.isFrameSetup(*BeforeMI)) {
+ Offset += alignTo(TII.getFrameSize(*BeforeMI), getStackAlign());
+ BeforeMI = std::next(BeforeMI);
+ }
+ Register StackPtr = TRI->getStackRegister();
+ if (STI.isTarget64BitILP32())
+ StackPtr = Register(getX86SubSuperRegister(StackPtr, 64));
+ unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackPtr, true);
+ CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfStackPtr));
+ CfaExpr.append(buffer, buffer + encodeSLEB128(Offset, buffer));
+ CfaExpr.push_back(dwarf::DW_OP_deref);
+ CfaExpr.push_back(dwarf::DW_OP_consts);
+ CfaExpr.append(buffer, buffer + encodeSLEB128(SlotSize * 2, buffer));
+ CfaExpr.push_back((uint8_t)dwarf::DW_OP_plus);
+
+ SmallString<64> DefCfaExpr;
+ DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
+ DefCfaExpr.append(buffer, buffer + encodeSLEB128(CfaExpr.size(), buffer));
+ DefCfaExpr.append(CfaExpr.str());
+ BuildCFI(*MBB, BeforeMI, DL,
+ MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str()),
+ MachineInstr::FrameSetup);
+ }
+}
+
+void X86FrameLowering::restoreFPBPUsingSP(MachineFunction &MF,
+ MachineBasicBlock::iterator AfterMI,
+ Register FP, Register BP,
+ int SPAdjust) const {
+ assert(FP.isValid() || BP.isValid());
+
+ // Adjust SP so it points to spilled FP or BP.
+ MachineBasicBlock *MBB = AfterMI->getParent();
+ MachineBasicBlock::iterator Pos = std::next(AfterMI);
+ DebugLoc DL = AfterMI->getDebugLoc();
+ if (SPAdjust)
+ emitSPUpdate(*MBB, Pos, DL, SPAdjust, false);
+
+ // Restore BP.
+ if (BP.isValid()) {
+ BuildMI(*MBB, Pos, DL,
+ TII.get(getPOPOpcode(MF.getSubtarget<X86Subtarget>())), BP);
+ }
+
+ // Restore FP.
+ if (FP.isValid()) {
+ BuildMI(*MBB, Pos, DL,
+ TII.get(getPOPOpcode(MF.getSubtarget<X86Subtarget>())), FP);
+
+ // Emit unwinding information.
+ if (needsDwarfCFI(MF)) {
+ // Restore original frame with .cfi_restore_state.
+ unsigned CFIIndex =
+ MF.addFrameInst(MCCFIInstruction::createRestoreState(nullptr));
+ BuildMI(*MBB, Pos, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
+}
+
+void X86FrameLowering::saveAndRestoreFPBPUsingSP(
+ MachineFunction &MF, MachineBasicBlock::iterator BeforeMI,
+ MachineBasicBlock::iterator AfterMI, bool SpillFP, bool SpillBP) const {
+ assert(SpillFP || SpillBP);
+
+ Register FP, BP;
+ const TargetRegisterClass *RC;
+ unsigned NumRegs = 0;
+
+ if (SpillFP) {
+ FP = TRI->getFrameRegister(MF);
+ if (STI.isTarget64BitILP32())
+ FP = Register(getX86SubSuperRegister(FP, 64));
+ RC = TRI->getMinimalPhysRegClass(FP);
+ ++NumRegs;
+ }
+ if (SpillBP) {
+ BP = TRI->getBaseRegister();
+ if (STI.isTarget64BitILP32())
+ BP = Register(getX86SubSuperRegister(BP, 64));
+ RC = TRI->getMinimalPhysRegClass(BP);
+ ++NumRegs;
+ }
+ int SPAdjust = computeFPBPAlignmentGap(MF, RC, NumRegs);
+
+ spillFPBPUsingSP(MF, BeforeMI, FP, BP, SPAdjust);
+ restoreFPBPUsingSP(MF, AfterMI, FP, BP, SPAdjust);
+}
+
+bool X86FrameLowering::skipSpillFPBP(
+ MachineFunction &MF, MachineBasicBlock::reverse_iterator &MI) const {
+ if (MI->getOpcode() == X86::LCMPXCHG16B_SAVE_RBX) {
+ // The pseudo instruction LCMPXCHG16B_SAVE_RBX is generated in the form
+ // SaveRbx = COPY RBX
+ // SaveRbx = LCMPXCHG16B_SAVE_RBX ..., SaveRbx, implicit-def rbx
+ // And later LCMPXCHG16B_SAVE_RBX is expanded to restore RBX from SaveRbx.
+ // We should skip this instruction sequence.
+ int FI;
+ unsigned Reg;
+ while (!(MI->getOpcode() == TargetOpcode::COPY &&
+ MI->getOperand(1).getReg() == X86::RBX) &&
+ !((Reg = TII.isStoreToStackSlot(*MI, FI)) && Reg == X86::RBX))
+ ++MI;
+ return true;
+ }
+ return false;
+}
+
+static bool isFPBPAccess(const MachineInstr &MI, Register FP, Register BP,
+ const TargetRegisterInfo *TRI, bool &AccessFP,
+ bool &AccessBP) {
+ AccessFP = AccessBP = false;
+ if (FP) {
+ if (MI.findRegisterUseOperandIdx(FP, TRI, false) != -1 ||
+ MI.findRegisterDefOperandIdx(FP, TRI, false, true) != -1)
+ AccessFP = true;
+ }
+ if (BP) {
+ if (MI.findRegisterUseOperandIdx(BP, TRI, false) != -1 ||
+ MI.findRegisterDefOperandIdx(BP, TRI, false, true) != -1)
+ AccessBP = true;
+ }
+ return AccessFP || AccessBP;
+}
+
+// Invoke instruction has been lowered to normal function call. We try to figure
+// out if MI comes from Invoke.
+// Do we have any better method?
+static bool isInvoke(const MachineInstr &MI, bool InsideEHLabels) {
+ if (!MI.isCall())
+ return false;
+ if (InsideEHLabels)
+ return true;
+
+ const MachineBasicBlock *MBB = MI.getParent();
+ if (!MBB->hasEHPadSuccessor())
+ return false;
+
+ // Check if there is another call instruction from MI to the end of MBB.
+ MachineBasicBlock::const_iterator MBBI = MI, ME = MBB->end();
+ for (++MBBI; MBBI != ME; ++MBBI)
+ if (MBBI->isCall())
+ return false;
+ return true;
+}
+
+/// If a function uses base pointer and the base pointer is clobbered by inline
+/// asm, RA doesn't detect this case, and after the inline asm, the base pointer
+/// contains garbage value.
+/// For example if a 32b x86 function uses base pointer esi, and esi is
+/// clobbered by following inline asm
+/// asm("rep movsb" : "+D"(ptr), "+S"(x), "+c"(c)::"memory");
+/// We need to save esi before the asm and restore it after the asm.
+///
+/// The problem can also occur to frame pointer if there is a function call, and
+/// the callee uses a different calling convention and clobbers the fp.
+///
+/// Because normal frame objects (spill slots) are accessed through fp/bp
+/// register, so we can't spill fp/bp to normal spill slots.
+///
+/// FIXME: There are 2 possible enhancements:
+/// 1. In many cases there are different physical registers not clobbered by
+/// inline asm, we can use one of them as base pointer. Or use a virtual
+/// register as base pointer and let RA allocate a physical register to it.
+/// 2. If there is no other instructions access stack with fp/bp from the
+/// inline asm to the epilog, and no cfi requirement for a correct fp, we can
+/// skip the save and restore operations.
+void X86FrameLowering::spillFPBP(MachineFunction &MF) const {
+ Register FP, BP;
+ const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
+ if (TFI.hasFP(MF))
+ FP = TRI->getFrameRegister(MF);
+ if (TRI->hasBasePointer(MF))
+ BP = TRI->getBaseRegister();
+ if (!FP && !BP)
+ return;
+
+ for (MachineBasicBlock &MBB : MF) {
+ bool InsideEHLabels = false;
+ auto MI = MBB.rbegin(), ME = MBB.rend();
+ auto TermMI = MBB.getFirstTerminator();
+ if (TermMI != MBB.begin())
+ MI = *(std::prev(TermMI));
+
+ while (MI != ME) {
+ // Skip frame setup/destroy instructions.
+ // Skip Invoke (call inside try block) instructions.
+ // Skip instructions handled by target.
+ if (MI->getFlag(MachineInstr::MIFlag::FrameSetup) ||
+ MI->getFlag(MachineInstr::MIFlag::FrameDestroy) ||
+ isInvoke(*MI, InsideEHLabels) || skipSpillFPBP(MF, MI)) {
+ ++MI;
+ continue;
+ }
+
+ if (MI->getOpcode() == TargetOpcode::EH_LABEL) {
+ InsideEHLabels = !InsideEHLabels;
+ ++MI;
+ continue;
+ }
+
+ bool AccessFP, AccessBP;
+ // Check if fp or bp is used in MI.
+ if (!isFPBPAccess(*MI, FP, BP, TRI, AccessFP, AccessBP)) {
+ ++MI;
+ continue;
+ }
+
+ // Look for the range [DefMI, KillMI] in which fp or bp is defined and
+ // used.
+ bool FPLive = false, BPLive = false;
+ bool SpillFP = false, SpillBP = false;
+ auto DefMI = MI, KillMI = MI;
+ do {
+ SpillFP |= AccessFP;
+ SpillBP |= AccessBP;
+
+ // Maintain FPLive and BPLive.
+ if (FPLive && MI->findRegisterDefOperandIdx(FP, TRI, false, true) != -1)
+ FPLive = false;
+ if (FP && MI->findRegisterUseOperandIdx(FP, TRI, false) != -1)
+ FPLive = true;
+ if (BPLive && MI->findRegisterDefOperandIdx(BP, TRI, false, true) != -1)
+ BPLive = false;
+ if (BP && MI->findRegisterUseOperandIdx(BP, TRI, false) != -1)
+ BPLive = true;
+
+ DefMI = MI++;
+ } while ((MI != ME) &&
+ (FPLive || BPLive ||
+ isFPBPAccess(*MI, FP, BP, TRI, AccessFP, AccessBP)));
+
+ // Don't need to save/restore if FP is accessed through llvm.frameaddress.
+ if (FPLive && !SpillBP)
+ continue;
+
+ // If the bp is clobbered by a call, we should save and restore outside of
+ // the frame setup instructions.
+ if (KillMI->isCall() && DefMI != ME) {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ auto FrameSetup = std::next(DefMI);
+ // Look for frame setup instruction toward the start of the BB.
+ // If we reach another call instruction, it means no frame setup
+ // instruction for the current call instruction.
+ while (FrameSetup != ME && !TII.isFrameSetup(*FrameSetup) &&
+ !FrameSetup->isCall())
+ ++FrameSetup;
+ // If a frame setup instruction is found, we need to find out the
+ // corresponding frame destroy instruction.
+ if (FrameSetup != ME && TII.isFrameSetup(*FrameSetup)) {
+ while (!TII.isFrameInstr(*KillMI))
+ --KillMI;
+ DefMI = FrameSetup;
+ MI = DefMI;
+ ++MI;
+ }
+ }
+
+ // Call target function to spill and restore FP and BP registers.
+ saveAndRestoreFPBPUsingSP(MF, &(*DefMI), &(*KillMI), SpillFP, SpillBP);
+ }
+ }
+}
diff --git a/llvm/lib/Target/X86/X86FrameLowering.h b/llvm/lib/Target/X86/X86FrameLowering.h
index 2dc9ecc..e21f6ab 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/llvm/lib/Target/X86/X86FrameLowering.h
@@ -103,6 +103,8 @@ public:
MutableArrayRef<CalleeSavedInfo> CSI,
const TargetRegisterInfo *TRI) const override;
+ void spillFPBP(MachineFunction &MF) const override;
+
bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
@@ -267,6 +269,29 @@ private:
void emitCatchRetReturnValue(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineInstr *CatchRet) const;
+
+ /// Issue instructions to allocate stack space and spill frame pointer and/or
+ /// base pointer to stack using stack pointer register.
+ void spillFPBPUsingSP(MachineFunction &MF,
+ const MachineBasicBlock::iterator BeforeMI, Register FP,
+ Register BP, int SPAdjust) const;
+
+ /// Issue instructions to restore frame pointer and/or base pointer from stack
+ /// using stack pointer register, and free stack space.
+ void restoreFPBPUsingSP(MachineFunction &MF,
+ const MachineBasicBlock::iterator AfterMI,
+ Register FP, Register BP, int SPAdjust) const;
+
+ void saveAndRestoreFPBPUsingSP(MachineFunction &MF,
+ MachineBasicBlock::iterator BeforeMI,
+ MachineBasicBlock::iterator AfterMI,
+ bool SpillFP, bool SpillBP) const;
+
+ // If MI uses fp/bp, but target can handle it, and doesn't want to be spilled
+ // again, this function should return true, and update MI so we will not check
+ // any instructions from related sequence.
+ bool skipSpillFPBP(MachineFunction &MF,
+ MachineBasicBlock::reverse_iterator &MI) const;
};
} // End llvm namespace
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index fff65a1..2891e21 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -34069,6 +34069,14 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(VMINMAX_SAE)
NODE_NAME_CASE(VMINMAXS)
NODE_NAME_CASE(VMINMAXS_SAE)
+ NODE_NAME_CASE(CVTP2IBS)
+ NODE_NAME_CASE(CVTP2IUBS)
+ NODE_NAME_CASE(CVTP2IBS_RND)
+ NODE_NAME_CASE(CVTP2IUBS_RND)
+ NODE_NAME_CASE(CVTTP2IBS)
+ NODE_NAME_CASE(CVTTP2IUBS)
+ NODE_NAME_CASE(CVTTP2IBS_SAE)
+ NODE_NAME_CASE(CVTTP2IUBS_SAE)
NODE_NAME_CASE(AESENC128KL)
NODE_NAME_CASE(AESDEC128KL)
NODE_NAME_CASE(AESENC256KL)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index b985f75..2e7538c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -607,6 +607,15 @@ namespace llvm {
VMINMAXS,
VMINMAXS_SAE,
+ CVTP2IBS,
+ CVTP2IUBS,
+ CVTP2IBS_RND,
+ CVTP2IUBS_RND,
+ CVTTP2IBS,
+ CVTTP2IUBS,
+ CVTTP2IBS_SAE,
+ CVTTP2IUBS_SAE,
+
MPSADBW,
// Compress and expand.
diff --git a/llvm/lib/Target/X86/X86InstrAVX10.td b/llvm/lib/Target/X86/X86InstrAVX10.td
index 8e4586f..fe381b3 100644
--- a/llvm/lib/Target/X86/X86InstrAVX10.td
+++ b/llvm/lib/Target/X86/X86InstrAVX10.td
@@ -451,3 +451,176 @@ defm VMINMAXSH : avx10_minmax_scalar<"vminmaxsh", v8f16x_info, X86vminmaxs, X86v
AVX512PSIi8Base, VEX_LIG, EVEX, VVVV, EVEX_CD8<16, CD8VT1>, TA;
defm VMINMAXSS : avx10_minmax_scalar<"vminmaxss", v4f32x_info, X86vminmaxs, X86vminmaxsSae>,
AVX512AIi8Base, VEX_LIG, EVEX, VVVV, EVEX_CD8<32, CD8VT1>;
+
+//-------------------------------------------------
+// AVX10 SATCVT instructions
+//-------------------------------------------------
+
+multiclass avx10_sat_cvt_rmb<bits<8> Opc, string OpStr, X86FoldableSchedWrite sched,
+ X86VectorVTInfo DestInfo,
+ X86VectorVTInfo SrcInfo,
+ SDNode MaskNode> {
+ defm rr: AVX512_maskable<Opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.RC:$src), OpStr, "$src", "$src",
+ (DestInfo.VT (MaskNode (SrcInfo.VT SrcInfo.RC:$src)))>,
+ Sched<[sched]>;
+ defm rm: AVX512_maskable<Opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.MemOp:$src), OpStr, "$src", "$src",
+ (DestInfo.VT (MaskNode (SrcInfo.VT
+ (SrcInfo.LdFrag addr:$src))))>,
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
+ defm rmb: AVX512_maskable<Opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.ScalarMemOp:$src), OpStr,
+ "${src}"#SrcInfo.BroadcastStr, "${src}"#SrcInfo.BroadcastStr,
+ (DestInfo.VT (MaskNode (SrcInfo.VT
+ (SrcInfo.BroadcastLdFrag addr:$src))))>, EVEX_B,
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
+}
+
+// Conversion with rounding control (RC)
+multiclass avx10_sat_cvt_rc<bits<8> Opc, string OpStr, X86SchedWriteWidths sched,
+ AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo,
+ SDNode MaskNode> {
+ let Predicates = [HasAVX10_2_512], Uses = [MXCSR] in
+ defm Zrrb : AVX512_maskable<Opc, MRMSrcReg, DestInfo.info512,
+ (outs DestInfo.info512.RC:$dst),
+ (ins SrcInfo.info512.RC:$src, AVX512RC:$rc),
+ OpStr, "$rc, $src", "$src, $rc",
+ (DestInfo.info512.VT
+ (MaskNode (SrcInfo.info512.VT SrcInfo.info512.RC:$src),
+ (i32 timm:$rc)))>,
+ Sched<[sched.ZMM]>, EVEX, EVEX_RC, EVEX_B;
+ let Predicates = [HasAVX10_2], hasEVEX_U = 1 in {
+ defm Z256rrb : AVX512_maskable<Opc, MRMSrcReg, DestInfo.info256,
+ (outs DestInfo.info256.RC:$dst),
+ (ins SrcInfo.info256.RC:$src, AVX512RC:$rc),
+ OpStr, "$rc, $src", "$src, $rc",
+ (DestInfo.info256.VT
+ (MaskNode (SrcInfo.info256.VT SrcInfo.info256.RC:$src),
+ (i32 timm:$rc)))>,
+ Sched<[sched.YMM]>, EVEX, EVEX_RC, EVEX_B;
+ }
+}
+
+// Conversion with SAE
+multiclass avx10_sat_cvt_sae<bits<8> Opc, string OpStr, X86SchedWriteWidths sched,
+ AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo,
+ SDNode Node> {
+ let Predicates = [HasAVX10_2_512], Uses = [MXCSR] in
+ defm Zrrb : AVX512_maskable<Opc, MRMSrcReg, DestInfo.info512,
+ (outs DestInfo.info512.RC:$dst),
+ (ins SrcInfo.info512.RC:$src),
+ OpStr, "{sae}, $src", "$src, {sae}",
+ (DestInfo.info512.VT
+ (Node (SrcInfo.info512.VT SrcInfo.info512.RC:$src)))>,
+ Sched<[sched.ZMM]>, EVEX, EVEX_B;
+ let Predicates = [HasAVX10_2], hasEVEX_U = 1 in {
+ defm Z256rrb : AVX512_maskable<Opc, MRMSrcReg, DestInfo.info256,
+ (outs DestInfo.info256.RC:$dst),
+ (ins SrcInfo.info256.RC:$src),
+ OpStr, "{sae}, $src", "$src, {sae}",
+ (DestInfo.info256.VT
+ (Node (SrcInfo.info256.VT SrcInfo.info256.RC:$src)))>,
+ Sched<[sched.YMM]>, EVEX, EVEX_B;
+ }
+}
+
+multiclass avx10_sat_cvt_base<bits<8> Opc, string OpStr, X86SchedWriteWidths sched,
+ SDNode MaskNode, AVX512VLVectorVTInfo DestInfo,
+ AVX512VLVectorVTInfo SrcInfo> {
+ let Predicates = [HasAVX10_2_512] in
+ defm Z : avx10_sat_cvt_rmb<Opc, OpStr, sched.ZMM,
+ DestInfo.info512, SrcInfo.info512,
+ MaskNode>,
+ EVEX, EVEX_V512;
+ let Predicates = [HasAVX10_2] in {
+ defm Z256
+ : avx10_sat_cvt_rmb<Opc, OpStr, sched.YMM,
+ DestInfo.info256, SrcInfo.info256,
+ MaskNode>,
+ EVEX, EVEX_V256;
+ defm Z128
+ : avx10_sat_cvt_rmb<Opc, OpStr, sched.XMM,
+ DestInfo.info128, SrcInfo.info128,
+ MaskNode>,
+ EVEX, EVEX_V128;
+ }
+}
+
+defm VCVTNEBF162IBS : avx10_sat_cvt_base<0x69, "vcvtnebf162ibs",
+ SchedWriteVecIMul, X86vcvtp2ibs,
+ avx512vl_i16_info, avx512vl_bf16_info>,
+ AVX512XDIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+defm VCVTNEBF162IUBS : avx10_sat_cvt_base<0x6b, "vcvtnebf162iubs",
+ SchedWriteVecIMul, X86vcvtp2iubs,
+ avx512vl_i16_info, avx512vl_bf16_info>,
+ AVX512XDIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+
+defm VCVTPH2IBS : avx10_sat_cvt_base<0x69, "vcvtph2ibs", SchedWriteVecIMul,
+ X86vcvtp2ibs, avx512vl_i16_info,
+ avx512vl_f16_info>,
+ avx10_sat_cvt_rc<0x69, "vcvtph2ibs", SchedWriteVecIMul,
+ avx512vl_i16_info, avx512vl_f16_info,
+ X86vcvtp2ibsRnd>,
+ AVX512PSIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+defm VCVTPH2IUBS : avx10_sat_cvt_base<0x6b, "vcvtph2iubs", SchedWriteVecIMul,
+ X86vcvtp2iubs, avx512vl_i16_info,
+ avx512vl_f16_info>,
+ avx10_sat_cvt_rc<0x6b, "vcvtph2iubs", SchedWriteVecIMul,
+ avx512vl_i16_info, avx512vl_f16_info,
+ X86vcvtp2iubsRnd>,
+ AVX512PSIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+
+defm VCVTPS2IBS : avx10_sat_cvt_base<0x69, "vcvtps2ibs", SchedWriteVecIMul,
+ X86vcvtp2ibs, avx512vl_i32_info,
+ avx512vl_f32_info>,
+ avx10_sat_cvt_rc<0x69, "vcvtps2ibs", SchedWriteVecIMul,
+ avx512vl_i32_info, avx512vl_f32_info,
+ X86vcvtp2ibsRnd>,
+ AVX512PDIi8Base, T_MAP5, EVEX_CD8<32, CD8VF>;
+defm VCVTPS2IUBS : avx10_sat_cvt_base<0x6b, "vcvtps2iubs", SchedWriteVecIMul,
+ X86vcvtp2iubs, avx512vl_i32_info,
+ avx512vl_f32_info>,
+ avx10_sat_cvt_rc<0x6b, "vcvtps2iubs", SchedWriteVecIMul,
+ avx512vl_i32_info, avx512vl_f32_info,
+ X86vcvtp2iubsRnd>,
+ AVX512PDIi8Base, T_MAP5, EVEX_CD8<32, CD8VF>;
+
+defm VCVTTNEBF162IBS : avx10_sat_cvt_base<0x68, "vcvttnebf162ibs",
+ SchedWriteVecIMul, X86vcvttp2ibs,
+ avx512vl_i16_info, avx512vl_bf16_info>,
+ AVX512XDIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+defm VCVTTNEBF162IUBS : avx10_sat_cvt_base<0x6a, "vcvttnebf162iubs",
+ SchedWriteVecIMul, X86vcvttp2iubs,
+ avx512vl_i16_info, avx512vl_bf16_info>,
+ AVX512XDIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+
+defm VCVTTPH2IBS : avx10_sat_cvt_base<0x68, "vcvttph2ibs", SchedWriteVecIMul,
+ X86vcvttp2ibs, avx512vl_i16_info,
+ avx512vl_f16_info>,
+ avx10_sat_cvt_sae<0x68, "vcvttph2ibs", SchedWriteVecIMul,
+ avx512vl_i16_info, avx512vl_f16_info,
+ X86vcvttp2ibsSAE>,
+ AVX512PSIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+defm VCVTTPH2IUBS : avx10_sat_cvt_base<0x6a, "vcvttph2iubs", SchedWriteVecIMul,
+ X86vcvttp2iubs, avx512vl_i16_info,
+ avx512vl_f16_info>,
+ avx10_sat_cvt_sae<0x6a, "vcvttph2iubs", SchedWriteVecIMul,
+ avx512vl_i16_info, avx512vl_f16_info,
+ X86vcvttp2iubsSAE>,
+ AVX512PSIi8Base, T_MAP5, EVEX_CD8<16, CD8VF>;
+
+defm VCVTTPS2IBS : avx10_sat_cvt_base<0x68, "vcvttps2ibs", SchedWriteVecIMul,
+ X86vcvttp2ibs, avx512vl_i32_info,
+ avx512vl_f32_info>,
+ avx10_sat_cvt_sae<0x68, "vcvttps2ibs", SchedWriteVecIMul,
+ avx512vl_i32_info, avx512vl_f32_info,
+ X86vcvttp2ibsSAE>,
+ AVX512PDIi8Base, T_MAP5, EVEX_CD8<32, CD8VF>;
+defm VCVTTPS2IUBS : avx10_sat_cvt_base<0x6a, "vcvttps2iubs", SchedWriteVecIMul,
+ X86vcvttp2iubs, avx512vl_i32_info,
+ avx512vl_f32_info>,
+ avx10_sat_cvt_sae<0x6a, "vcvttps2iubs", SchedWriteVecIMul,
+ avx512vl_i32_info, avx512vl_f32_info,
+ X86vcvttp2iubsSAE>,
+ AVX512PDIi8Base, T_MAP5, EVEX_CD8<32, CD8VF>;
diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 78c76ca..6db1cf7 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -833,6 +833,20 @@ def X86vpdpwuuds : SDNode<"X86ISD::VPDPWUUDS", SDTVnni>;
def X86Vmpsadbw : SDNode<"X86ISD::MPSADBW", SDTX86PSADBW>;
+// in place saturated cvt fp-to-int
+def X86vcvtp2ibs : SDNode<"X86ISD::CVTP2IBS", SDTFloatToInt>;
+def X86vcvtp2iubs : SDNode<"X86ISD::CVTP2IUBS", SDTFloatToInt>;
+
+def X86vcvtp2ibsRnd : SDNode<"X86ISD::CVTP2IBS_RND", SDTFloatToIntRnd>;
+def X86vcvtp2iubsRnd : SDNode<"X86ISD::CVTP2IUBS_RND", SDTFloatToIntRnd>;
+
+// in place saturated cvtt fp-to-int staff
+def X86vcvttp2ibs : SDNode<"X86ISD::CVTTP2IBS", SDTFloatToInt>;
+def X86vcvttp2iubs : SDNode<"X86ISD::CVTTP2IUBS", SDTFloatToInt>;
+
+def X86vcvttp2ibsSAE : SDNode<"X86ISD::CVTTP2IBS_SAE", SDTFloatToInt>;
+def X86vcvttp2iubsSAE : SDNode<"X86ISD::CVTTP2IUBS_SAE", SDTFloatToInt>;
+
//===----------------------------------------------------------------------===//
// SSE pattern fragments
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index 8387b76..208af63 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -313,7 +313,7 @@ def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
def v32f16_info : X86VectorVTInfo<32, f16, VR512, "ph">;
-def v32bf16_info: X86VectorVTInfo<32, bf16, VR512, "pbf">;
+def v32bf16_info: X86VectorVTInfo<32, bf16, VR512, "pbh">;
def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
@@ -323,7 +323,7 @@ def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
def v16f16x_info : X86VectorVTInfo<16, f16, VR256X, "ph">;
-def v16bf16x_info: X86VectorVTInfo<16, bf16, VR256X, "pbf">;
+def v16bf16x_info: X86VectorVTInfo<16, bf16, VR256X, "pbh">;
def v8f32x_info : X86VectorVTInfo<8, f32, VR256X, "ps">;
def v4f64x_info : X86VectorVTInfo<4, f64, VR256X, "pd">;
@@ -332,7 +332,7 @@ def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
def v8f16x_info : X86VectorVTInfo<8, f16, VR128X, "ph">;
-def v8bf16x_info : X86VectorVTInfo<8, bf16, VR128X, "pbf">;
+def v8bf16x_info : X86VectorVTInfo<8, bf16, VR128X, "pbh">;
def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">;
def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">;
diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
index 536391d..47be08c 100644
--- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
@@ -408,6 +408,18 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::CVTP2UI, X86ISD::CVTP2UI_RND),
X86_INTRINSIC_DATA(avx10_mask_vcvtph2dq256, INTR_TYPE_1OP_MASK,
X86ISD::CVTP2SI, X86ISD::CVTP2SI_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtph2ibs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtph2ibs256, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IBS, X86ISD::CVTP2IBS_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtph2ibs512, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IBS, X86ISD::CVTP2IBS_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtph2iubs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtph2iubs256, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IUBS, X86ISD::CVTP2IUBS_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtph2iubs512, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IUBS, X86ISD::CVTP2IUBS_RND),
X86_INTRINSIC_DATA(avx10_mask_vcvtph2pd256, INTR_TYPE_1OP_MASK_SAE,
ISD::FP_EXTEND, X86ISD::VFPEXT_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvtph2psx256, INTR_TYPE_1OP_MASK_SAE,
@@ -424,6 +436,18 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::CVTP2SI, X86ISD::CVTP2SI_RND),
X86_INTRINSIC_DATA(avx10_mask_vcvtps2dq256, INTR_TYPE_1OP_MASK,
X86ISD::CVTP2SI, X86ISD::CVTP2SI_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtps2ibs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtps2ibs256, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IBS, X86ISD::CVTP2IBS_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtps2ibs512, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IBS, X86ISD::CVTP2IBS_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtps2iubs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtps2iubs256, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IUBS, X86ISD::CVTP2IUBS_RND),
+ X86_INTRINSIC_DATA(avx10_mask_vcvtps2iubs512, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTP2IUBS, X86ISD::CVTP2IUBS_RND),
X86_INTRINSIC_DATA(avx10_mask_vcvtps2pd256, INTR_TYPE_1OP_MASK_SAE,
ISD::FP_EXTEND, X86ISD::VFPEXT_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvtps2phx256, INTR_TYPE_1OP_MASK,
@@ -444,6 +468,18 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::CVTTP2UI, X86ISD::CVTTP2UI_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvttph2dq256, INTR_TYPE_1OP_MASK,
X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttph2ibs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttph2ibs256, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IBS, X86ISD::CVTTP2IBS_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttph2ibs512, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IBS, X86ISD::CVTTP2IBS_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttph2iubs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttph2iubs256, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IUBS, X86ISD::CVTTP2IUBS_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttph2iubs512, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IUBS, X86ISD::CVTTP2IUBS_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvttph2qq256, INTR_TYPE_1OP_MASK,
X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvttph2udq256, INTR_TYPE_1OP_MASK,
@@ -456,6 +492,18 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvttps2dq256, INTR_TYPE_1OP_MASK,
X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttps2ibs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttps2ibs256, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTTP2IBS, X86ISD::CVTTP2IBS_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttps2ibs512, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IBS, X86ISD::CVTTP2IBS_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttps2iubs128, INTR_TYPE_1OP_MASK,
+ X86ISD::CVTTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttps2iubs256, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IUBS, X86ISD::CVTTP2IUBS_SAE),
+ X86_INTRINSIC_DATA(avx10_mask_vcvttps2iubs512, INTR_TYPE_1OP_MASK_SAE,
+ X86ISD::CVTTP2IUBS, X86ISD::CVTTP2IUBS_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvttps2qq256, INTR_TYPE_1OP_MASK,
X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE),
X86_INTRINSIC_DATA(avx10_mask_vcvttps2udq256, INTR_TYPE_1OP_MASK,
@@ -546,6 +594,30 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FADD_RND),
X86_INTRINSIC_DATA(avx10_vaddps256, INTR_TYPE_2OP, ISD::FADD,
X86ISD::FADD_RND),
+ X86_INTRINSIC_DATA(avx10_vcvtnebf162ibs128, INTR_TYPE_1OP, X86ISD::CVTP2IBS,
+ 0),
+ X86_INTRINSIC_DATA(avx10_vcvtnebf162ibs256, INTR_TYPE_1OP, X86ISD::CVTP2IBS,
+ 0),
+ X86_INTRINSIC_DATA(avx10_vcvtnebf162ibs512, INTR_TYPE_1OP, X86ISD::CVTP2IBS,
+ 0),
+ X86_INTRINSIC_DATA(avx10_vcvtnebf162iubs128, INTR_TYPE_1OP,
+ X86ISD::CVTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvtnebf162iubs256, INTR_TYPE_1OP,
+ X86ISD::CVTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvtnebf162iubs512, INTR_TYPE_1OP,
+ X86ISD::CVTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvttnebf162ibs128, INTR_TYPE_1OP,
+ X86ISD::CVTTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvttnebf162ibs256, INTR_TYPE_1OP,
+ X86ISD::CVTTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvttnebf162ibs512, INTR_TYPE_1OP,
+ X86ISD::CVTTP2IBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvttnebf162iubs128, INTR_TYPE_1OP,
+ X86ISD::CVTTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvttnebf162iubs256, INTR_TYPE_1OP,
+ X86ISD::CVTTP2IUBS, 0),
+ X86_INTRINSIC_DATA(avx10_vcvttnebf162iubs512, INTR_TYPE_1OP,
+ X86ISD::CVTTP2IUBS, 0),
X86_INTRINSIC_DATA(avx10_vdivpd256, INTR_TYPE_2OP, ISD::FDIV,
X86ISD::FDIV_RND),
X86_INTRINSIC_DATA(avx10_vdivph256, INTR_TYPE_2OP, ISD::FDIV,
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 555ede9..3376367 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -45,6 +45,12 @@ static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
+static cl::opt<bool>
+ DisableRegAllocNDDHints("x86-disable-regalloc-hints-for-ndd", cl::Hidden,
+ cl::init(false),
+ cl::desc("Disable two address hints for register "
+ "allocation"));
+
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
: X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
X86_MC::getDwarfRegFlavour(TT, false),
@@ -1080,10 +1086,57 @@ bool X86RegisterInfo::getRegAllocationHints(Register VirtReg,
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
VirtReg, Order, Hints, MF, VRM, Matrix);
+ const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
+ const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
unsigned ID = RC.getID();
- if (ID != X86::TILERegClassID)
+
+ if (!VRM)
+ return BaseImplRetVal;
+
+ if (ID != X86::TILERegClassID) {
+ if (DisableRegAllocNDDHints || !ST.hasNDD() ||
+ !TRI.isGeneralPurposeRegisterClass(&RC))
+ return BaseImplRetVal;
+
+ // Add any two address hints after any copy hints.
+ SmallSet<unsigned, 4> TwoAddrHints;
+
+ auto TryAddNDDHint = [&](const MachineOperand &MO) {
+ Register Reg = MO.getReg();
+ Register PhysReg =
+ Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg));
+ if (PhysReg && !MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
+ TwoAddrHints.insert(PhysReg);
+ };
+
+ // NDD instructions is compressible when Op0 is allocated to the same
+ // physic register as Op1 (or Op2 if it's commutable).
+ for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
+ const MachineInstr &MI = *MO.getParent();
+ if (!X86::getNonNDVariant(MI.getOpcode()))
+ continue;
+ unsigned OpIdx = MI.getOperandNo(&MO);
+ if (OpIdx == 0) {
+ assert(MI.getOperand(1).isReg());
+ TryAddNDDHint(MI.getOperand(1));
+ if (MI.isCommutable()) {
+ assert(MI.getOperand(2).isReg());
+ TryAddNDDHint(MI.getOperand(2));
+ }
+ } else if (OpIdx == 1) {
+ TryAddNDDHint(MI.getOperand(0));
+ } else if (MI.isCommutable() && OpIdx == 2) {
+ TryAddNDDHint(MI.getOperand(0));
+ }
+ }
+
+ for (MCPhysReg OrderReg : Order)
+ if (TwoAddrHints.count(OrderReg))
+ Hints.push_back(OrderReg);
+
return BaseImplRetVal;
+ }
ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI);
auto AddHint = [&](MCPhysReg PhysReg) {
diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp
index bf89aac..55911a7 100644
--- a/llvm/lib/TargetParser/Triple.cpp
+++ b/llvm/lib/TargetParser/Triple.cpp
@@ -44,8 +44,6 @@ StringRef Triple::getArchTypeName(ArchType Kind) {
case hsail: return "hsail";
case kalimba: return "kalimba";
case lanai: return "lanai";
- case le32: return "le32";
- case le64: return "le64";
case loongarch32: return "loongarch32";
case loongarch64: return "loongarch64";
case m68k: return "m68k";
@@ -199,9 +197,6 @@ StringRef Triple::getArchTypePrefix(ArchType Kind) {
case nvptx: return "nvvm";
case nvptx64: return "nvvm";
- case le32: return "le32";
- case le64: return "le64";
-
case amdil:
case amdil64: return "amdil";
@@ -434,8 +429,6 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("xcore", xcore)
.Case("nvptx", nvptx)
.Case("nvptx64", nvptx64)
- .Case("le32", le32)
- .Case("le64", le64)
.Case("amdil", amdil)
.Case("amdil64", amdil64)
.Case("hsail", hsail)
@@ -576,8 +569,6 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Case("xcore", Triple::xcore)
.Case("nvptx", Triple::nvptx)
.Case("nvptx64", Triple::nvptx64)
- .Case("le32", Triple::le32)
- .Case("le64", Triple::le64)
.Case("amdil", Triple::amdil)
.Case("amdil64", Triple::amdil64)
.Case("hsail", Triple::hsail)
@@ -908,8 +899,6 @@ static Triple::ObjectFormatType getDefaultFormat(const Triple &T) {
case Triple::hsail:
case Triple::kalimba:
case Triple::lanai:
- case Triple::le32:
- case Triple::le64:
case Triple::loongarch32:
case Triple::loongarch64:
case Triple::m68k:
@@ -1606,7 +1595,6 @@ unsigned Triple::getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::hsail:
case llvm::Triple::kalimba:
case llvm::Triple::lanai:
- case llvm::Triple::le32:
case llvm::Triple::loongarch32:
case llvm::Triple::m68k:
case llvm::Triple::mips:
@@ -1639,7 +1627,6 @@ unsigned Triple::getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
case llvm::Triple::hsail64:
- case llvm::Triple::le64:
case llvm::Triple::loongarch64:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
@@ -1698,7 +1685,6 @@ Triple Triple::get32BitArchVariant() const {
case Triple::hsail:
case Triple::kalimba:
case Triple::lanai:
- case Triple::le32:
case Triple::loongarch32:
case Triple::m68k:
case Triple::mips:
@@ -1729,7 +1715,6 @@ Triple Triple::get32BitArchVariant() const {
case Triple::aarch64_be: T.setArch(Triple::armeb); break;
case Triple::amdil64: T.setArch(Triple::amdil); break;
case Triple::hsail64: T.setArch(Triple::hsail); break;
- case Triple::le64: T.setArch(Triple::le32); break;
case Triple::loongarch64: T.setArch(Triple::loongarch32); break;
case Triple::mips64:
T.setArch(Triple::mips, getSubArch());
@@ -1784,7 +1769,6 @@ Triple Triple::get64BitArchVariant() const {
case Triple::bpfeb:
case Triple::bpfel:
case Triple::hsail64:
- case Triple::le64:
case Triple::loongarch64:
case Triple::mips64:
case Triple::mips64el:
@@ -1808,7 +1792,6 @@ Triple Triple::get64BitArchVariant() const {
case Triple::arm: T.setArch(Triple::aarch64); break;
case Triple::armeb: T.setArch(Triple::aarch64_be); break;
case Triple::hsail: T.setArch(Triple::hsail64); break;
- case Triple::le32: T.setArch(Triple::le64); break;
case Triple::loongarch32: T.setArch(Triple::loongarch64); break;
case Triple::mips:
T.setArch(Triple::mips64, getSubArch());
@@ -1851,8 +1834,6 @@ Triple Triple::getBigEndianArchVariant() const {
case Triple::hsail64:
case Triple::hsail:
case Triple::kalimba:
- case Triple::le32:
- case Triple::le64:
case Triple::loongarch32:
case Triple::loongarch64:
case Triple::msp430:
@@ -1956,8 +1937,6 @@ bool Triple::isLittleEndian() const {
case Triple::hsail64:
case Triple::hsail:
case Triple::kalimba:
- case Triple::le32:
- case Triple::le64:
case Triple::loongarch32:
case Triple::loongarch64:
case Triple::mips64el:
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index cd31c4b..db5e948 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -1325,20 +1325,20 @@ struct AAPointerInfoImpl
const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
-
- // Without going backwards in the call tree, can we reach the access
- // from the least dominating write. Do not allow to pass the instruction
- // itself either.
- bool Inserted = ExclusionSet.insert(&I).second;
-
- if (!FnReachabilityAA ||
- !FnReachabilityAA->instructionCanReach(
- A, *LeastDominatingWriteInst,
- *Acc.getRemoteInst()->getFunction(), &ExclusionSet))
- WriteChecked = true;
-
- if (Inserted)
- ExclusionSet.erase(&I);
+ if (FnReachabilityAA) {
+ // Without going backwards in the call tree, can we reach the access
+ // from the least dominating write. Do not allow to pass the
+ // instruction itself either.
+ bool Inserted = ExclusionSet.insert(&I).second;
+
+ if (!FnReachabilityAA->instructionCanReach(
+ A, *LeastDominatingWriteInst,
+ *Acc.getRemoteInst()->getFunction(), &ExclusionSet))
+ WriteChecked = true;
+
+ if (Inserted)
+ ExclusionSet.erase(&I);
+ }
}
if (ReadChecked && WriteChecked)
@@ -11874,14 +11874,24 @@ struct AAUnderlyingObjectsImpl
/// See AbstractAttribute::getAsStr().
const std::string getAsStr(Attributor *A) const override {
- return std::string("UnderlyingObjects ") +
- (isValidState()
- ? (std::string("inter #") +
- std::to_string(InterAssumedUnderlyingObjects.size()) +
- " objs" + std::string(", intra #") +
- std::to_string(IntraAssumedUnderlyingObjects.size()) +
- " objs")
- : "<invalid>");
+ if (!isValidState())
+ return "<invalid>";
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ OS << "underlying objects: inter " << InterAssumedUnderlyingObjects.size()
+ << " objects, intra " << IntraAssumedUnderlyingObjects.size()
+ << " objects.\n";
+ if (!InterAssumedUnderlyingObjects.empty()) {
+ OS << "inter objects:\n";
+ for (auto *Obj : InterAssumedUnderlyingObjects)
+ OS << *Obj << '\n';
+ }
+ if (!IntraAssumedUnderlyingObjects.empty()) {
+ OS << "intra objects:\n";
+ for (auto *Obj : IntraAssumedUnderlyingObjects)
+ OS << *Obj << '\n';
+ }
+ return Str;
}
/// See AbstractAttribute::trackStatistics()
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index a22ee1d..6025e73 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -155,42 +155,41 @@ static Value *foldSelectICmpAnd(SelectInst &Sel, ICmpInst *Cmp,
} else {
return nullptr;
}
+ if (Pred == ICmpInst::ICMP_NE)
+ std::swap(SelTC, SelFC);
// In general, when both constants are non-zero, we would need an offset to
// replace the select. This would require more instructions than we started
// with. But there's one special-case that we handle here because it can
// simplify/reduce the instructions.
- APInt TC = *SelTC;
- APInt FC = *SelFC;
+ const APInt &TC = *SelTC;
+ const APInt &FC = *SelFC;
if (!TC.isZero() && !FC.isZero()) {
- // If the select constants differ by exactly one bit and that's the same
- // bit that is masked and checked by the select condition, the select can
- // be replaced by bitwise logic to set/clear one bit of the constant result.
- if (TC.getBitWidth() != AndMask.getBitWidth() || (TC ^ FC) != AndMask)
+ if (TC.getBitWidth() != AndMask.getBitWidth())
return nullptr;
- if (CreateAnd) {
- // If we have to create an 'and', then we must kill the cmp to not
- // increase the instruction count.
- if (!Cmp->hasOneUse())
- return nullptr;
- V = Builder.CreateAnd(V, ConstantInt::get(SelType, AndMask));
- }
- bool ExtraBitInTC = TC.ugt(FC);
- if (Pred == ICmpInst::ICMP_EQ) {
- // If the masked bit in V is clear, clear or set the bit in the result:
- // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) ^ TC
- // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) | TC
- Constant *C = ConstantInt::get(SelType, TC);
- return ExtraBitInTC ? Builder.CreateXor(V, C) : Builder.CreateOr(V, C);
- }
- if (Pred == ICmpInst::ICMP_NE) {
- // If the masked bit in V is set, set or clear the bit in the result:
- // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) | FC
- // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) ^ FC
- Constant *C = ConstantInt::get(SelType, FC);
- return ExtraBitInTC ? Builder.CreateOr(V, C) : Builder.CreateXor(V, C);
+ // If we have to create an 'and', then we must kill the cmp to not
+ // increase the instruction count.
+ if (CreateAnd && !Cmp->hasOneUse())
+ return nullptr;
+
+ // (V & AndMaskC) == 0 ? TC : FC --> TC | (V & AndMaskC)
+ // (V & AndMaskC) == 0 ? TC : FC --> TC ^ (V & AndMaskC)
+ // (V & AndMaskC) == 0 ? TC : FC --> TC + (V & AndMaskC)
+ // (V & AndMaskC) == 0 ? TC : FC --> TC - (V & AndMaskC)
+ Constant *TCC = ConstantInt::get(SelType, TC);
+ Constant *FCC = ConstantInt::get(SelType, FC);
+ Constant *MaskC = ConstantInt::get(SelType, AndMask);
+ for (auto Opc : {Instruction::Or, Instruction::Xor, Instruction::Add,
+ Instruction::Sub}) {
+ if (ConstantFoldBinaryOpOperands(Opc, TCC, MaskC, Sel.getDataLayout()) ==
+ FCC) {
+ if (CreateAnd)
+ V = Builder.CreateAnd(V, MaskC);
+ return Builder.CreateBinOp(Opc, TCC, V);
+ }
}
- llvm_unreachable("Only expecting equality predicates");
+
+ return nullptr;
}
// Make sure one of the select arms is a power-of-2.
@@ -203,7 +202,6 @@ static Value *foldSelectICmpAnd(SelectInst &Sel, ICmpInst *Cmp,
unsigned ValZeros = ValC.logBase2();
unsigned AndZeros = AndMask.logBase2();
bool ShouldNotVal = !TC.isZero();
- ShouldNotVal ^= Pred == ICmpInst::ICMP_NE;
// If we would need to create an 'and' + 'shift' + 'xor' to replace a 'select'
// + 'icmp', then this transformation would result in more instructions and
@@ -899,7 +897,6 @@ static Instruction *foldSelectZeroOrMul(SelectInst &SI, InstCombinerImpl &IC) {
/// Transform patterns such as (a > b) ? a - b : 0 into usub.sat(a, b).
/// There are 8 commuted/swapped variants of this pattern.
-/// TODO: Also support a - UMIN(a,b) patterns.
static Value *canonicalizeSaturatedSubtract(const ICmpInst *ICI,
const Value *TrueVal,
const Value *FalseVal,
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 329b3ef..d48b128 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -2806,7 +2806,8 @@ static bool hoistBOAssociation(Instruction &I, Loop &L,
return false;
auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(0));
- if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative())
+ if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
+ BO0->hasNUsesOrMore(3))
return false;
// Transform: "(LV op C1) op C2" ==> "LV op (C1 op C2)"
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 7192efe..efb02fd 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1726,7 +1726,7 @@ void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
// For now, when there is a store to parts of the variable (but we do not
// know which part) we insert an dbg.value intrinsic to indicate that we
// know nothing about the variable's content.
- DV = UndefValue::get(DV->getType());
+ DV = PoisonValue::get(DV->getType());
insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
SI->getIterator());
}
@@ -1798,7 +1798,7 @@ void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR,
// For now, when there is a store to parts of the variable (but we do not
// know which part) we insert an dbg.value intrinsic to indicate that we
// know nothing about the variable's content.
- DV = UndefValue::get(DV->getType());
+ DV = PoisonValue::get(DV->getType());
ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
DbgVariableRecord *NewDVR =
new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index ed9dfa6..1dc291e 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -253,6 +253,21 @@ static FixedVectorType *getWidenedType(Type *ScalarTy, unsigned VF) {
VF * getNumElements(ScalarTy));
}
+static void transformScalarShuffleIndiciesToVector(unsigned VecTyNumElements,
+ SmallVectorImpl<int> &Mask) {
+ // The ShuffleBuilder implementation use shufflevector to splat an "element".
+ // But the element have different meaning for SLP (scalar) and REVEC
+ // (vector). We need to expand Mask into masks which shufflevector can use
+ // directly.
+ SmallVector<int> NewMask(Mask.size() * VecTyNumElements);
+ for (unsigned I : seq<unsigned>(Mask.size()))
+ for (auto [J, MaskV] : enumerate(MutableArrayRef(NewMask).slice(
+ I * VecTyNumElements, VecTyNumElements)))
+ MaskV = Mask[I] == PoisonMaskElem ? PoisonMaskElem
+ : Mask[I] * VecTyNumElements + J;
+ Mask.swap(NewMask);
+}
+
/// \returns True if the value is a constant (but not globals/constant
/// expressions).
static bool isConstant(Value *V) {
@@ -4617,7 +4632,17 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
// 3. The loads are ordered, or number of unordered loads <=
// MaxProfitableUnorderedLoads, or loads are in reversed order.
// (this check is to avoid extra costs for very expensive shuffles).
- if (IsPossibleStrided && (((Sz > MinProfitableStridedLoads ||
+ // 4. Any pointer operand is an instruction with the users outside of the
+ // current graph (for masked gathers extra extractelement instructions
+ // might be required).
+ auto IsAnyPointerUsedOutGraph =
+ IsPossibleStrided && any_of(PointerOps, [&](Value *V) {
+ return isa<Instruction>(V) && any_of(V->users(), [&](User *U) {
+ return !getTreeEntry(U) && !MustGather.contains(U);
+ });
+ });
+ if (IsPossibleStrided && (IsAnyPointerUsedOutGraph ||
+ ((Sz > MinProfitableStridedLoads ||
(static_cast<unsigned>(std::abs(*Diff)) <=
MaxProfitableLoadStride * Sz &&
isPowerOf2_32(std::abs(*Diff)))) &&
@@ -6468,6 +6493,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
+ case Instruction::Freeze:
return TreeEntry::Vectorize;
case Instruction::GetElementPtr: {
// We don't combine GEPs with complicated (nested) indexing.
@@ -7305,7 +7331,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
- case Instruction::Xor: {
+ case Instruction::Xor:
+ case Instruction::Freeze: {
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
ReuseShuffleIndices);
LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
@@ -7762,6 +7789,31 @@ namespace {
/// The base class for shuffle instruction emission and shuffle cost estimation.
class BaseShuffleAnalysis {
protected:
+ Type *ScalarTy = nullptr;
+
+ BaseShuffleAnalysis(Type *ScalarTy) : ScalarTy(ScalarTy) {}
+
+ /// V is expected to be a vectorized value.
+ /// When REVEC is disabled, there is no difference between VF and
+ /// VNumElements.
+ /// When REVEC is enabled, VF is VNumElements / ScalarTyNumElements.
+ /// e.g., if ScalarTy is <4 x Ty> and V1 is <8 x Ty>, 2 is returned instead
+ /// of 8.
+ unsigned getVF(Value *V) const {
+ assert(V && "V cannot be nullptr");
+ assert(isa<FixedVectorType>(V->getType()) &&
+ "V does not have FixedVectorType");
+ assert(ScalarTy && "ScalarTy cannot be nullptr");
+ unsigned ScalarTyNumElements = getNumElements(ScalarTy);
+ unsigned VNumElements =
+ cast<FixedVectorType>(V->getType())->getNumElements();
+ assert(VNumElements > ScalarTyNumElements &&
+ "the number of elements of V is not large enough");
+ assert(VNumElements % ScalarTyNumElements == 0 &&
+ "the number of elements of V is not a vectorized value");
+ return VNumElements / ScalarTyNumElements;
+ }
+
/// Checks if the mask is an identity mask.
/// \param IsStrict if is true the function returns false if mask size does
/// not match vector size.
@@ -8255,7 +8307,6 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
bool IsFinalized = false;
SmallVector<int> CommonMask;
SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors;
- Type *ScalarTy = nullptr;
const TargetTransformInfo &TTI;
InstructionCost Cost = 0;
SmallDenseSet<Value *> VectorizedVals;
@@ -8837,14 +8888,14 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
} else if (V1 && P2.isNull()) {
// Shuffle single vector.
ExtraCost += GetValueMinBWAffectedCost(V1);
- CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements();
+ CommonVF = getVF(V1);
assert(
all_of(Mask,
[=](int Idx) { return Idx < static_cast<int>(CommonVF); }) &&
"All elements in mask must be less than CommonVF.");
} else if (V1 && !V2) {
// Shuffle vector and tree node.
- unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements();
+ unsigned VF = getVF(V1);
const TreeEntry *E2 = P2.get<const TreeEntry *>();
CommonVF = std::max(VF, E2->getVectorFactor());
assert(all_of(Mask,
@@ -8870,7 +8921,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
} else if (!V1 && V2) {
// Shuffle vector and tree node.
- unsigned VF = cast<FixedVectorType>(V2->getType())->getNumElements();
+ unsigned VF = getVF(V2);
const TreeEntry *E1 = P1.get<const TreeEntry *>();
CommonVF = std::max(VF, E1->getVectorFactor());
assert(all_of(Mask,
@@ -8898,9 +8949,8 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
} else {
assert(V1 && V2 && "Expected both vectors.");
- unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements();
- CommonVF =
- std::max(VF, cast<FixedVectorType>(V2->getType())->getNumElements());
+ unsigned VF = getVF(V1);
+ CommonVF = std::max(VF, getVF(V2));
assert(all_of(Mask,
[=](int Idx) {
return Idx < 2 * static_cast<int>(CommonVF);
@@ -8918,6 +8968,11 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
}
}
+ if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
+ assert(SLPReVec && "FixedVectorType is not expected.");
+ transformScalarShuffleIndiciesToVector(VecTy->getNumElements(),
+ CommonMask);
+ }
InVectors.front() =
Constant::getNullValue(getWidenedType(ScalarTy, CommonMask.size()));
if (InVectors.size() == 2)
@@ -8930,7 +8985,7 @@ public:
ShuffleCostEstimator(Type *ScalarTy, TargetTransformInfo &TTI,
ArrayRef<Value *> VectorizedVals, BoUpSLP &R,
SmallPtrSetImpl<Value *> &CheckedExtracts)
- : ScalarTy(ScalarTy), TTI(TTI),
+ : BaseShuffleAnalysis(ScalarTy), TTI(TTI),
VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), R(R),
CheckedExtracts(CheckedExtracts) {}
Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask,
@@ -9135,7 +9190,7 @@ public:
}
assert(!InVectors.empty() && !CommonMask.empty() &&
"Expected only tree entries from extracts/reused buildvectors.");
- unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements();
+ unsigned VF = getVF(V1);
if (InVectors.size() == 2) {
Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask);
transformMaskAfterShuffle(CommonMask, CommonMask);
@@ -9169,12 +9224,32 @@ public:
}
Vals.push_back(Constant::getNullValue(V->getType()));
}
+ if (auto *VecTy = dyn_cast<FixedVectorType>(Vals.front()->getType())) {
+ assert(SLPReVec && "FixedVectorType is not expected.");
+ // When REVEC is enabled, we need to expand vector types into scalar
+ // types.
+ unsigned VecTyNumElements = VecTy->getNumElements();
+ SmallVector<Constant *> NewVals(VF * VecTyNumElements, nullptr);
+ for (auto [I, V] : enumerate(Vals)) {
+ Type *ScalarTy = V->getType()->getScalarType();
+ Constant *NewVal;
+ if (isa<PoisonValue>(V))
+ NewVal = PoisonValue::get(ScalarTy);
+ else if (isa<UndefValue>(V))
+ NewVal = UndefValue::get(ScalarTy);
+ else
+ NewVal = Constant::getNullValue(ScalarTy);
+ std::fill_n(NewVals.begin() + I * VecTyNumElements, VecTyNumElements,
+ NewVal);
+ }
+ Vals.swap(NewVals);
+ }
return ConstantVector::get(Vals);
}
return ConstantVector::getSplat(
ElementCount::getFixed(
cast<FixedVectorType>(Root->getType())->getNumElements()),
- getAllOnesValue(*R.DL, ScalarTy));
+ getAllOnesValue(*R.DL, ScalarTy->getScalarType()));
}
InstructionCost createFreeze(InstructionCost Cost) { return Cost; }
/// Finalize emission of the shuffles.
@@ -10045,6 +10120,8 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
};
return GetCostDiff(GetScalarCost, GetVectorCost);
}
+ case Instruction::Freeze:
+ return CommonCost;
default:
llvm_unreachable("Unknown instruction");
}
@@ -11675,8 +11752,8 @@ Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root, Type *ScalarTy) {
Type *Ty) {
Value *Scalar = V;
if (Scalar->getType() != Ty) {
- assert(Scalar->getType()->isIntegerTy() && Ty->isIntegerTy() &&
- "Expected integer types only.");
+ assert(Scalar->getType()->isIntOrIntVectorTy() &&
+ Ty->isIntOrIntVectorTy() && "Expected integer types only.");
Value *V = Scalar;
if (auto *CI = dyn_cast<CastInst>(Scalar);
isa_and_nonnull<SExtInst, ZExtInst>(CI)) {
@@ -11689,10 +11766,21 @@ Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root, Type *ScalarTy) {
V, Ty, !isKnownNonNegative(Scalar, SimplifyQuery(*DL)));
}
- Vec = Builder.CreateInsertElement(Vec, Scalar, Builder.getInt32(Pos));
- auto *InsElt = dyn_cast<InsertElementInst>(Vec);
- if (!InsElt)
- return Vec;
+ Instruction *InsElt;
+ if (auto *VecTy = dyn_cast<FixedVectorType>(Scalar->getType())) {
+ assert(SLPReVec && "FixedVectorType is not expected.");
+ Vec = InsElt = Builder.CreateInsertVector(
+ Vec->getType(), Vec, V,
+ Builder.getInt64(Pos * VecTy->getNumElements()));
+ auto *II = dyn_cast<IntrinsicInst>(InsElt);
+ if (!II || II->getIntrinsicID() != Intrinsic::vector_insert)
+ return Vec;
+ } else {
+ Vec = Builder.CreateInsertElement(Vec, Scalar, Builder.getInt32(Pos));
+ InsElt = dyn_cast<InsertElementInst>(Vec);
+ if (!InsElt)
+ return Vec;
+ }
GatherShuffleExtractSeq.insert(InsElt);
CSEBlocks.insert(InsElt->getParent());
// Add to our 'need-to-extract' list.
@@ -11793,7 +11881,6 @@ class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
/// resulting shuffle and the second operand sets to be the newly added
/// operand. The \p CommonMask is transformed in the proper way after that.
SmallVector<Value *, 2> InVectors;
- Type *ScalarTy = nullptr;
IRBuilderBase &Builder;
BoUpSLP &R;
@@ -11919,7 +12006,7 @@ class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
public:
ShuffleInstructionBuilder(Type *ScalarTy, IRBuilderBase &Builder, BoUpSLP &R)
- : ScalarTy(ScalarTy), Builder(Builder), R(R) {}
+ : BaseShuffleAnalysis(ScalarTy), Builder(Builder), R(R) {}
/// Adjusts extractelements after reusing them.
Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask,
@@ -12176,7 +12263,7 @@ public:
break;
}
}
- int VF = cast<FixedVectorType>(V1->getType())->getNumElements();
+ int VF = getVF(V1);
for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF);
@@ -12199,6 +12286,15 @@ public:
finalize(ArrayRef<int> ExtMask, unsigned VF = 0,
function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) {
IsFinalized = true;
+ SmallVector<int> NewExtMask(ExtMask);
+ if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
+ assert(SLPReVec && "FixedVectorType is not expected.");
+ transformScalarShuffleIndiciesToVector(VecTy->getNumElements(),
+ CommonMask);
+ transformScalarShuffleIndiciesToVector(VecTy->getNumElements(),
+ NewExtMask);
+ ExtMask = NewExtMask;
+ }
if (Action) {
Value *Vec = InVectors.front();
if (InVectors.size() == 2) {
@@ -13298,6 +13394,24 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
return V;
}
+ case Instruction::Freeze: {
+ setInsertPointAfterBundle(E);
+
+ Value *Op = vectorizeOperand(E, 0, PostponedPHIs);
+
+ if (E->VectorizedValue) {
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
+ return E->VectorizedValue;
+ }
+
+ Value *V = Builder.CreateFreeze(Op);
+ V = FinalShuffle(V, E, VecTy);
+
+ E->VectorizedValue = V;
+ ++NumVectorInstructions;
+
+ return V;
+ }
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
@@ -13982,6 +14096,17 @@ Value *BoUpSLP::vectorizeTree(
if (GEP->hasName())
CloneGEP->takeName(GEP);
Ex = CloneGEP;
+ } else if (auto *VecTy =
+ dyn_cast<FixedVectorType>(Scalar->getType())) {
+ assert(SLPReVec && "FixedVectorType is not expected.");
+ unsigned VecTyNumElements = VecTy->getNumElements();
+ // When REVEC is enabled, we need to extract a vector.
+ // Note: The element size of Scalar may be different from the
+ // element size of Vec.
+ Ex = Builder.CreateExtractVector(
+ FixedVectorType::get(Vec->getType()->getScalarType(),
+ VecTyNumElements),
+ Vec, Builder.getInt64(ExternalUse.Lane * VecTyNumElements));
} else {
Ex = Builder.CreateExtractElement(Vec, Lane);
}
@@ -14039,12 +14164,19 @@ Value *BoUpSLP::vectorizeTree(
"ExternallyUsedValues map or remain as scalar in vectorized "
"instructions");
if (auto *VecI = dyn_cast<Instruction>(Vec)) {
- if (auto *PHI = dyn_cast<PHINode>(VecI))
- Builder.SetInsertPoint(PHI->getParent(),
- PHI->getParent()->getFirstNonPHIIt());
- else
+ if (auto *PHI = dyn_cast<PHINode>(VecI)) {
+ if (PHI->getParent()->isLandingPad())
+ Builder.SetInsertPoint(
+ PHI->getParent(),
+ std::next(
+ PHI->getParent()->getLandingPadInst()->getIterator()));
+ else
+ Builder.SetInsertPoint(PHI->getParent(),
+ PHI->getParent()->getFirstNonPHIIt());
+ } else {
Builder.SetInsertPoint(VecI->getParent(),
std::next(VecI->getIterator()));
+ }
} else {
Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
}
@@ -14070,11 +14202,18 @@ Value *BoUpSLP::vectorizeTree(
auto VecIt = VectorCasts.find(Key);
if (VecIt == VectorCasts.end()) {
IRBuilderBase::InsertPointGuard Guard(Builder);
- if (auto *IVec = dyn_cast<PHINode>(Vec))
- Builder.SetInsertPoint(
- IVec->getParent()->getFirstNonPHIOrDbgOrLifetime());
- else if (auto *IVec = dyn_cast<Instruction>(Vec))
+ if (auto *IVec = dyn_cast<PHINode>(Vec)) {
+ if (IVec->getParent()->isLandingPad())
+ Builder.SetInsertPoint(IVec->getParent(),
+ std::next(IVec->getParent()
+ ->getLandingPadInst()
+ ->getIterator()));
+ else
+ Builder.SetInsertPoint(
+ IVec->getParent()->getFirstNonPHIOrDbgOrLifetime());
+ } else if (auto *IVec = dyn_cast<Instruction>(Vec)) {
Builder.SetInsertPoint(IVec->getNextNonDebugInstruction());
+ }
Vec = Builder.CreateIntCast(
Vec,
getWidenedType(
@@ -17312,27 +17451,23 @@ public:
// Try to handle shuffled extractelements.
if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() &&
I + 1 < E) {
- InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI);
- if (NextS.getOpcode() == Instruction::ExtractElement &&
- !NextS.isAltShuffle()) {
- SmallVector<Value *> CommonCandidates(Candidates);
- for (Value *RV : ReducedVals[I + 1]) {
- Value *RdxVal = TrackedVals.find(RV)->second;
- // Check if the reduction value was not overriden by the
- // extractelement instruction because of the vectorization and
- // exclude it, if it is not compatible with other values.
- if (auto *Inst = dyn_cast<Instruction>(RdxVal))
- if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst))
- continue;
- CommonCandidates.push_back(RdxVal);
- TrackedToOrig.try_emplace(RdxVal, RV);
- }
- SmallVector<int> Mask;
- if (isFixedVectorShuffle(CommonCandidates, Mask)) {
- ++I;
- Candidates.swap(CommonCandidates);
- ShuffledExtracts = true;
- }
+ SmallVector<Value *> CommonCandidates(Candidates);
+ for (Value *RV : ReducedVals[I + 1]) {
+ Value *RdxVal = TrackedVals.find(RV)->second;
+ // Check if the reduction value was not overriden by the
+ // extractelement instruction because of the vectorization and
+ // exclude it, if it is not compatible with other values.
+ auto *Inst = dyn_cast<ExtractElementInst>(RdxVal);
+ if (!Inst)
+ continue;
+ CommonCandidates.push_back(RdxVal);
+ TrackedToOrig.try_emplace(RdxVal, RV);
+ }
+ SmallVector<int> Mask;
+ if (isFixedVectorShuffle(CommonCandidates, Mask)) {
+ ++I;
+ Candidates.swap(CommonCandidates);
+ ShuffledExtracts = true;
}
}
@@ -18536,6 +18671,12 @@ static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI,
if (CI1->getOperand(0)->getType()->getTypeID() >
CI2->getOperand(0)->getType()->getTypeID())
return false;
+ if (CI1->getOperand(0)->getType()->getScalarSizeInBits() <
+ CI2->getOperand(0)->getType()->getScalarSizeInBits())
+ return !IsCompatibility;
+ if (CI1->getOperand(0)->getType()->getScalarSizeInBits() >
+ CI2->getOperand(0)->getType()->getScalarSizeInBits())
+ return false;
CmpInst::Predicate Pred1 = CI1->getPredicate();
CmpInst::Predicate Pred2 = CI2->getPredicate();
CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
@@ -18703,6 +18844,12 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
return true;
if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
return false;
+ if (V1->getType()->getScalarSizeInBits() <
+ V2->getType()->getScalarSizeInBits())
+ return true;
+ if (V1->getType()->getScalarSizeInBits() >
+ V2->getType()->getScalarSizeInBits())
+ return false;
ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
if (Opcodes1.size() < Opcodes2.size())
@@ -19123,6 +19270,12 @@ bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
if (V->getPointerOperandType()->getTypeID() >
V2->getPointerOperandType()->getTypeID())
return false;
+ if (V->getValueOperand()->getType()->getScalarSizeInBits() <
+ V2->getValueOperand()->getType()->getScalarSizeInBits())
+ return true;
+ if (V->getValueOperand()->getType()->getScalarSizeInBits() >
+ V2->getValueOperand()->getType()->getScalarSizeInBits())
+ return false;
// UndefValues are compatible with all other values.
if (isa<UndefValue>(V->getValueOperand()) ||
isa<UndefValue>(V2->getValueOperand()))
diff --git a/llvm/test/Analysis/BasicAA/nusw_nuw_nonneg.ll b/llvm/test/Analysis/BasicAA/nusw_nuw_nonneg.ll
new file mode 100644
index 0000000..84df629
--- /dev/null
+++ b/llvm/test/Analysis/BasicAA/nusw_nuw_nonneg.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -passes=aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
+
+; CHECK-LABEL: test
+; CHECK: NoAlias: i8* %p.minus.2, i8* %p.plus.2
+; CHECK: MayAlias: i8* %p.idx.maybeneg, i8* %p.minus.2
+; CHECK: MayAlias: i8* %p.idx.maybeneg, i8* %p.plus.2
+; CHECK: NoAlias: i8* %p.idx.nneg, i8* %p.minus.2
+; CHECK: MayAlias: i8* %p.idx.nneg, i8* %p.plus.2
+; CHECK: MustAlias: i8* %p.idx.maybeneg, i8* %p.idx.nneg
+define void @test(ptr %p, i64 %idx) {
+ %p.minus.2 = getelementptr i8, ptr %p, i64 -2
+ %p.plus.2 = getelementptr i8, ptr %p, i64 2
+ %p.idx.maybeneg = getelementptr inbounds i8, ptr %p, i64 %idx
+ %p.idx.nneg = getelementptr nuw nusw i8, ptr %p, i64 %idx
+ load i8, ptr %p.minus.2
+ load i8, ptr %p.plus.2
+ load i8, ptr %p.idx.maybeneg
+ load i8, ptr %p.idx.nneg
+ ret void
+}
diff --git a/llvm/test/Analysis/BasicAA/struct-geps.ll b/llvm/test/Analysis/BasicAA/struct-geps.ll
index c7ca731..008d73e 100644
--- a/llvm/test/Analysis/BasicAA/struct-geps.ll
+++ b/llvm/test/Analysis/BasicAA/struct-geps.ll
@@ -51,6 +51,17 @@ define void @test_not_inbounds(ptr %st, i64 %i, i64 %j, i64 %k) {
ret void
}
+; It is sufficient to have nusw instead of inbounds.
+; CHECK-LABEL: test_nusw
+; CHECK: NoAlias: i32* %x, i32* %y
+define void @test_nusw(ptr %st, i64 %i, i64 %j, i64 %k) {
+ %x = getelementptr nusw %struct, ptr %st, i64 %i, i32 0
+ %y = getelementptr nusw %struct, ptr %st, i64 %j, i32 1
+ load i32, ptr %x
+ load i32, ptr %y
+ ret void
+}
+
; CHECK-LABEL: test_in_array
; CHECK-DAG: MayAlias: [1 x %struct]* %st, i32* %x
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
index 1993023..004b696 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
@@ -691,8 +691,8 @@ define void @get_lane_mask() #0 {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef)
@@ -701,8 +701,8 @@ define void @get_lane_mask() #0 {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 144 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; TYPE_BASED_ONLY-LABEL: 'get_lane_mask'
diff --git a/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll b/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
index 40091c3..384ebbe 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
@@ -62,27 +62,27 @@ define i32 @add(i32 %arg) {
;
; NEON-RECIP-LABEL: 'add'
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 117 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; MVE-RECIP-LABEL: 'add'
@@ -137,27 +137,27 @@ define i32 @add(i32 %arg) {
;
; NEON-SIZE-LABEL: 'add'
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 43 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 105 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
; MVE-SIZE-LABEL: 'add'
@@ -268,27 +268,27 @@ define i32 @sub(i32 %arg) {
;
; NEON-RECIP-LABEL: 'sub'
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 117 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; MVE-RECIP-LABEL: 'sub'
@@ -343,27 +343,27 @@ define i32 @sub(i32 %arg) {
;
; NEON-SIZE-LABEL: 'sub'
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 43 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 105 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
; MVE-SIZE-LABEL: 'sub'
diff --git a/llvm/test/Analysis/CostModel/ARM/arith-usat.ll b/llvm/test/Analysis/CostModel/ARM/arith-usat.ll
index 679ac94..64d49f7 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-usat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-usat.ll
@@ -62,27 +62,27 @@ define i32 @add(i32 %arg) {
;
; NEON-RECIP-LABEL: 'add'
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 58 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; MVE-RECIP-LABEL: 'add'
@@ -137,27 +137,27 @@ define i32 @add(i32 %arg) {
;
; NEON-SIZE-LABEL: 'add'
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
; MVE-SIZE-LABEL: 'add'
@@ -268,27 +268,27 @@ define i32 @sub(i32 %arg) {
;
; NEON-RECIP-LABEL: 'sub'
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 58 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; MVE-RECIP-LABEL: 'sub'
@@ -343,27 +343,27 @@ define i32 @sub(i32 %arg) {
;
; NEON-SIZE-LABEL: 'sub'
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
;
; MVE-SIZE-LABEL: 'sub'
diff --git a/llvm/test/Analysis/CtxProfAnalysis/load.ll b/llvm/test/Analysis/CtxProfAnalysis/load.ll
new file mode 100644
index 0000000..9cd78cf
--- /dev/null
+++ b/llvm/test/Analysis/CtxProfAnalysis/load.ll
@@ -0,0 +1,58 @@
+; REQUIRES: x86_64-linux
+
+; RUN: split-file %s %t
+; RUN: llvm-ctxprof-util fromJSON --input=%t/profile.json --output=%t/profile.ctxprofdata
+; RUN: not opt -passes='require<ctx-prof-analysis>,print<ctx-prof-analysis>' \
+; RUN: %t/empty.ll -S 2>&1 | FileCheck %s --check-prefix=NO-FILE
+
+; RUN: not opt -passes='require<ctx-prof-analysis>,print<ctx-prof-analysis>' \
+; RUN: -use-ctx-profile=does_not_exist.ctxprofdata %t/empty.ll -S 2>&1 | FileCheck %s --check-prefix=NO-FILE
+
+; RUN: opt -passes='require<ctx-prof-analysis>,print<ctx-prof-analysis>' \
+; RUN: -use-ctx-profile=%t/profile.ctxprofdata %t/empty.ll -S 2> %t/output.json
+; RUN: diff %t/profile.json %t/output.json
+
+; NO-FILE: error: could not open contextual profile file
+;
+; This is the reference profile, laid out in the format the json formatter will
+; output it from opt.
+;--- profile.json
+[
+ {
+ "Callsites": [
+ [],
+ [
+ {
+ "Counters": [
+ 4,
+ 5
+ ],
+ "Guid": 2000
+ },
+ {
+ "Counters": [
+ 6,
+ 7,
+ 8
+ ],
+ "Guid": 18446744073709551613
+ }
+ ]
+ ],
+ "Counters": [
+ 1,
+ 2,
+ 3
+ ],
+ "Guid": 1000
+ },
+ {
+ "Counters": [
+ 5,
+ 9,
+ 10
+ ],
+ "Guid": 18446744073709551612
+ }
+]
+;--- empty.ll
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/always_uniform.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/always_uniform.ll
index 80ffc31..99d9e28 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/always_uniform.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/always_uniform.ll
@@ -103,6 +103,39 @@ define void @workgroup_id_z(ptr addrspace(1) inreg %out) {
ret void
}
+; CHECK-LABEL: for function 's_getpc':
+; CHECK: ALL VALUES UNIFORM
+define void @s_getpc(ptr addrspace(1) inreg %out) {
+ %result = call i64 @llvm.amdgcn.s.getpc()
+ store i64 %result, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+; CHECK-LABEL: for function 's_getreg':
+; CHECK: ALL VALUES UNIFORM
+define void @s_getreg(ptr addrspace(1) inreg %out) {
+ %result = call i32 @llvm.amdgcn.s.getreg(i32 123)
+ store i32 %result, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+; CHECK-LABEL: for function 's_memtime':
+; CHECK: ALL VALUES UNIFORM
+define void @s_memtime(ptr addrspace(1) inreg %out) {
+ %result = call i64 @llvm.amdgcn.s.memtime()
+ store i64 %result, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+; CHECK-LABEL: for function 's_memrealtime':
+; CHECK: ALL VALUES UNIFORM
+define void @s_memrealtime(ptr addrspace(1) inreg %out) {
+ %result = call i64 @llvm.amdgcn.s.memrealtime()
+ store i64 %result, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+
declare i32 @llvm.amdgcn.workitem.id.x() #0
declare i32 @llvm.amdgcn.readfirstlane(i32) #0
declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #1
diff --git a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-exchange-fence.ll b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-exchange-fence.ll
new file mode 100644
index 0000000..2adbc70
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-exchange-fence.ll
@@ -0,0 +1,64 @@
+; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+lse -O0 | FileCheck %s
+; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+lse -O1 | FileCheck %s
+
+; When their destination register is WZR/ZZR, SWP operations are not regarded as
+; a read for the purpose of a DMB.LD in the AArch64 memory model.
+; This test ensures that the AArch64DeadRegisterDefinitions pass does not
+; replace the desitnation register of SWP instructions with the zero register
+; when the read value is unused.
+
+define dso_local i32 @atomic_exchange_monotonic(ptr %ptr, ptr %ptr2, i32 %value) {
+; CHECK-LABEL: atomic_exchange_monotonic:
+; CHECK: // %bb.0:
+; CHECK-NEXT: swp
+; CHECK-NOT: wzr
+; CHECK-NEXT: dmb ishld
+; CHECK-NEXT: ldr w0, [x1]
+; CHECK-NEXT: ret
+ %r0 = atomicrmw xchg ptr %ptr, i32 %value monotonic
+ fence acquire
+ %r1 = load atomic i32, ptr %ptr2 monotonic, align 4
+ ret i32 %r1
+}
+
+define dso_local i32 @atomic_exchange_acquire(ptr %ptr, ptr %ptr2, i32 %value) {
+; CHECK-LABEL: atomic_exchange_acquire:
+; CHECK: // %bb.0:
+; CHECK-NEXT: swpa
+; CHECK-NOT: wzr
+; CHECK-NEXT: dmb ishld
+; CHECK-NEXT: ldr w0, [x1]
+; CHECK-NEXT: ret
+ %r0 = atomicrmw xchg ptr %ptr, i32 %value acquire
+ fence acquire
+ %r1 = load atomic i32, ptr %ptr2 monotonic, align 4
+ ret i32 %r1
+}
+
+define dso_local i32 @atomic_exchange_release(ptr %ptr, ptr %ptr2, i32 %value) {
+; CHECK-LABEL: atomic_exchange_release:
+; CHECK: // %bb.0:
+; CHECK-NEXT: swpl
+; CHECK-NOT: wzr
+; CHECK-NEXT: dmb ishld
+; CHECK-NEXT: ldr w0, [x1]
+; CHECK-NEXT: ret
+ %r0 = atomicrmw xchg ptr %ptr, i32 %value release
+ fence acquire
+ %r1 = load atomic i32, ptr %ptr2 monotonic, align 4
+ ret i32 %r1
+}
+
+define dso_local i32 @atomic_exchange_acquire_release(ptr %ptr, ptr %ptr2, i32 %value) {
+; CHECK-LABEL: atomic_exchange_acquire_release:
+; CHECK: // %bb.0:
+; CHECK-NEXT: swpal
+; CHECK-NOT: wzr
+; CHECK-NEXT: dmb ishld
+; CHECK-NEXT: ldr w0, [x1]
+; CHECK-NEXT: ret
+ %r0 = atomicrmw xchg ptr %ptr, i32 %value acq_rel
+ fence acquire
+ %r1 = load atomic i32, ptr %ptr2 monotonic, align 4
+ ret i32 %r1
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
index 9d12c3c..7109482 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
@@ -367,3 +367,32 @@ body: |
%shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %buildvec(<4 x s32>), %undef, shufflemask(0, 0, 0, 0)
$q0 = COPY %shuf(<4 x s32>)
RET_ReallyLR implicit $q0
+
+...
+---
+name: build_vector_rhs
+alignment: 4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $w0, $w1, $w2, $w3, $w4
+ ; The G_SHUFFLE_VECTOR is fed by a G_BUILD_VECTOR, and the 0th input
+ ; operand is not a constant. We should get a G_DUP.
+ ;
+ ; CHECK-LABEL: name: build_vector
+ ; CHECK: liveins: $w0, $w1, $w2, $w3, $w4
+ ; CHECK: %lane_1:_(s32) = COPY $w1
+ ; CHECK: %shuf:_(<4 x s32>) = G_DUP %lane_1(s32)
+ ; CHECK: $q0 = COPY %shuf(<4 x s32>)
+ ; CHECK: RET_ReallyLR implicit $q0
+ %lane_0:_(s32) = COPY $w0
+ %lane_1:_(s32) = COPY $w1
+ %b:_(s32) = COPY $w2
+ %c:_(s32) = COPY $w3
+ %d:_(s32) = COPY $w4
+ %buildvec0:_(<4 x s32>) = G_BUILD_VECTOR %lane_0(s32), %b(s32), %c(s32), %d(s32)
+ %buildvec1:_(<4 x s32>) = G_BUILD_VECTOR %lane_1(s32), %b(s32), %c(s32), %d(s32)
+ %shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %buildvec0(<4 x s32>), %buildvec1, shufflemask(4, 4, 4, 4)
+ $q0 = COPY %shuf(<4 x s32>)
+ RET_ReallyLR implicit $q0
diff --git a/llvm/test/CodeGen/AArch64/O0-pipeline.ll b/llvm/test/CodeGen/AArch64/O0-pipeline.ll
index bfcb9a7..ba61149 100644
--- a/llvm/test/CodeGen/AArch64/O0-pipeline.ll
+++ b/llvm/test/CodeGen/AArch64/O0-pipeline.ll
@@ -22,7 +22,6 @@
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/AArch64/O3-pipeline.ll b/llvm/test/CodeGen/AArch64/O3-pipeline.ll
index 017349a..845634e 100644
--- a/llvm/test/CodeGen/AArch64/O3-pipeline.ll
+++ b/llvm/test/CodeGen/AArch64/O3-pipeline.ll
@@ -60,7 +60,6 @@
; CHECK-NEXT: Constant Hoisting
; CHECK-NEXT: Replace intrinsics with calls to vector library
; CHECK-NEXT: Partially inline calls to library functions
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index d4ad33f..215907c 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -571,6 +571,28 @@ define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
ret i32 %abs
}
+define i64 @vector_legalized(i16 %a, i16 %b) {
+; CHECK-LABEL: vector_legalized:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: sub w8, w8, w1, sxth
+; CHECK-NEXT: addp d0, v0.2d
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cneg w8, w8, mi
+; CHECK-NEXT: fmov x9, d0
+; CHECK-NEXT: add x0, x9, x8
+; CHECK-NEXT: ret
+ %ea = sext i16 %a to i32
+ %eb = sext i16 %b to i32
+ %s = sub i32 %ea, %eb
+ %ab = call i32 @llvm.abs.i32(i32 %s, i1 false)
+ %e = zext i32 %ab to i64
+ %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> zeroinitializer)
+ %z = add i64 %red, %e
+ ret i64 %z
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index 983db62..f70f095 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -409,6 +409,32 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
ret i128 %sel
}
+;
+; negative tests
+;
+
+define i64 @vector_legalized(i16 %a, i16 %b) {
+; CHECK-LABEL: vector_legalized:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: and w8, w0, #0xffff
+; CHECK-NEXT: sub w8, w8, w1, uxth
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: addp d0, v0.2d
+; CHECK-NEXT: cneg w8, w8, mi
+; CHECK-NEXT: fmov x9, d0
+; CHECK-NEXT: add x0, x9, x8
+; CHECK-NEXT: ret
+ %ea = zext i16 %a to i32
+ %eb = zext i16 %b to i32
+ %s = sub i32 %ea, %eb
+ %ab = call i32 @llvm.abs.i32(i32 %s, i1 false)
+ %e = zext i32 %ab to i64
+ %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> zeroinitializer)
+ %z = add i64 %red, %e
+ ret i64 %z
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i32 @llvm.abs.i32(i32, i1)
diff --git a/llvm/test/CodeGen/AArch64/emutls_alias.ll b/llvm/test/CodeGen/AArch64/emutls_alias.ll
new file mode 100644
index 0000000..4a157d8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/emutls_alias.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -emulated-tls -mtriple=aarch64-linux-ohos \
+; RUN: | FileCheck -check-prefix=EMUTLS_CHECK %s
+
+%struct.__res_state = type { [5 x i8] }
+
+@foo = dso_local thread_local global %struct.__res_state { [5 x i8] c"\01\02\03\04\05" }, align 1
+
+@bar = hidden thread_local(initialexec) alias %struct.__res_state, ptr @foo
+
+define dso_local i32 @main() {
+ %1 = alloca i32, align 4
+ store i32 0, ptr %1, align 4
+ store i8 0, ptr @bar, align 1
+ ; EMUTLS_CHECK: adrp x0, __emutls_v.foo
+ ; EMUTLS_CHECK-NEXT: add x0, x0, :lo12:__emutls_v.foo
+ ret i32 0
+}
diff --git a/llvm/test/CodeGen/AArch64/note-gnu-property-elf-pauthabi.ll b/llvm/test/CodeGen/AArch64/note-gnu-property-elf-pauthabi.ll
index 728cffe..b2ebf1f 100644
--- a/llvm/test/CodeGen/AArch64/note-gnu-property-elf-pauthabi.ll
+++ b/llvm/test/CodeGen/AArch64/note-gnu-property-elf-pauthabi.ll
@@ -10,7 +10,7 @@
!llvm.module.flags = !{!0, !1}
!0 = !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
-!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 85}
+!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 341}
; ASM: .section .note.gnu.property,"a",@note
; ASM-NEXT: .p2align 3, 0x0
@@ -22,12 +22,12 @@
; ASM-NEXT: .word 3221225473
; ASM-NEXT: .word 16
; ASM-NEXT: .xword 268435458
-; ASM-NEXT: .xword 85
+; ASM-NEXT: .xword 341
; OBJ: Displaying notes found in: .note.gnu.property
; OBJ-NEXT: Owner Data size Description
; OBJ-NEXT: GNU 0x00000018 NT_GNU_PROPERTY_TYPE_0 (property note)
-; OBJ-NEXT: AArch64 PAuth ABI core info: platform 0x10000002 (llvm_linux), version 0x55 (PointerAuthIntrinsics, !PointerAuthCalls, PointerAuthReturns, !PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini)
+; OBJ-NEXT: AArch64 PAuth ABI core info: platform 0x10000002 (llvm_linux), version 0x155 (PointerAuthIntrinsics, !PointerAuthCalls, PointerAuthReturns, !PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini, !PointerAuthInitFiniAddressDiscrimination, PointerAuthELFGOT)
; ERR: either both or no 'aarch64-elf-pauthabi-platform' and 'aarch64-elf-pauthabi-version' module flags must be present
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-basic-pic.ll b/llvm/test/CodeGen/AArch64/ptrauth-basic-pic.ll
new file mode 100644
index 0000000..de6901f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-basic-pic.ll
@@ -0,0 +1,82 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -global-isel=0 -fast-isel=0 -verify-machineinstrs \
+; RUN: -relocation-model=pic -mattr=+pauth %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -global-isel=0 -fast-isel=1 -verify-machineinstrs \
+; RUN: -relocation-model=pic -mattr=+pauth %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -global-isel=1 -global-isel-abort=1 -verify-machineinstrs \
+; RUN: -relocation-model=pic -mattr=+pauth %s -o - | FileCheck %s
+
+;; Note: for FastISel, we fall back to SelectionDAG
+
+@var = global i32 0
+
+define i32 @get_globalvar() {
+; CHECK-LABEL: get_globalvar:
+; CHECK: adrp x[[GOT:[0-9]+]], :got_auth:var
+; CHECK-NEXT: add x[[GOT]], x[[GOT]], :got_auth_lo12:var
+; CHECK-NEXT: ldr x[[SYM:[0-9]+]], [x[[GOT]]]
+; CHECK-NEXT: autda x[[SYM]], x[[GOT]]
+; CHECK-NEXT: ldr w0, [x[[SYM]]]
+
+ %val = load i32, ptr @var
+ ret i32 %val
+}
+
+define ptr @get_globalvaraddr() {
+; CHECK-LABEL: get_globalvaraddr:
+; CHECK: adrp x[[GOT:[0-9]+]], :got_auth:var
+; CHECK-NEXT: add x[[GOT]], x[[GOT]], :got_auth_lo12:var
+; CHECK-NEXT: ldr x0, [x[[GOT]]]
+; CHECK-NEXT: autda x0, x[[GOT]]
+
+ %val = load i32, ptr @var
+ ret ptr @var
+}
+
+declare i32 @foo()
+
+define ptr @resign_globalfunc() {
+; CHECK-LABEL: resign_globalfunc:
+; CHECK: adrp x17, :got_auth:foo
+; CHECK-NEXT: add x17, x17, :got_auth_lo12:foo
+; CHECK-NEXT: ldr x16, [x17]
+; CHECK-NEXT: autia x16, x17
+; CHECK-NEXT: mov x17, #42
+; CHECK-NEXT: pacia x16, x17
+; CHECK-NEXT: mov x0, x16
+; CHECK-NEXT: ret
+
+ ret ptr ptrauth (ptr @foo, i32 0, i64 42)
+}
+
+define ptr @resign_globalvar() {
+; CHECK-LABEL: resign_globalvar:
+; CHECK: adrp x17, :got_auth:var
+; CHECK-NEXT: add x17, x17, :got_auth_lo12:var
+; CHECK-NEXT: ldr x16, [x17]
+; CHECK-NEXT: autda x16, x17
+; CHECK-NEXT: mov x17, #43
+; CHECK-NEXT: pacdb x16, x17
+; CHECK-NEXT: mov x0, x16
+; CHECK-NEXT: ret
+
+ ret ptr ptrauth (ptr @var, i32 3, i64 43)
+}
+
+define ptr @resign_globalvar_offset() {
+; CHECK-LABEL: resign_globalvar_offset:
+; CHECK: adrp x17, :got_auth:var
+; CHECK-NEXT: add x17, x17, :got_auth_lo12:var
+; CHECK-NEXT: ldr x16, [x17]
+; CHECK-NEXT: autda x16, x17
+; CHECK-NEXT: add x16, x16, #16
+; CHECK-NEXT: mov x17, #44
+; CHECK-NEXT: pacda x16, x17
+; CHECK-NEXT: mov x0, x16
+; CHECK-NEXT: ret
+
+ ret ptr ptrauth (ptr getelementptr (i8, ptr @var, i64 16), i32 2, i64 44)
+}
+
+!llvm.module.flags = !{!0, !1}
+!0 = !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 256}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-elf-globals-pic.ll b/llvm/test/CodeGen/AArch64/ptrauth-elf-globals-pic.ll
new file mode 100644
index 0000000..2b7d863
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-elf-globals-pic.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mtriple=arm64 -global-isel=0 -fast-isel=0 -relocation-model=pic -o - %s -mcpu=cyclone -mattr=+pauth | FileCheck %s
+; RUN: llc -mtriple=arm64 -global-isel=0 -fast-isel=1 -relocation-model=pic -o - %s -mcpu=cyclone -mattr=+pauth | FileCheck %s
+; RUN: llc -mtriple=arm64 -global-isel=1 -global-isel-abort=1 -relocation-model=pic -o - %s -mcpu=cyclone -mattr=+pauth | FileCheck %s
+
+;; Note: for FastISel, we fall back to SelectionDAG
+
+@var8 = external global i8, align 1
+
+define i8 @test_i8(i8 %new) {
+ %val = load i8, ptr @var8, align 1
+ store i8 %new, ptr @var8
+ ret i8 %val
+
+; CHECK: adrp x[[HIREG:[0-9]+]], :got_auth:var8
+; CHECK-NEXT: add x[[HIREG]], x[[HIREG]], :got_auth_lo12:var8
+; CHECK-NEXT: ldr x[[VAR_ADDR:[0-9]+]], [x[[HIREG]]]
+; CHECK-NEXT: autda x[[VAR_ADDR]], x[[HIREG]]
+; CHECK-NEXT: ldrb {{w[0-9]+}}, [x[[VAR_ADDR]]]
+}
+
+!llvm.module.flags = !{!0, !1}
+!0 = !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 256}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-extern-weak.ll b/llvm/test/CodeGen/AArch64/ptrauth-extern-weak.ll
new file mode 100644
index 0000000..88b6111
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-extern-weak.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel=0 -fast-isel=0 -relocation-model=pic -mattr=+pauth -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel=0 -fast-isel=1 -relocation-model=pic -mattr=+pauth -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel=1 -global-isel-abort=1 -relocation-model=pic -mattr=+pauth -o - %s | FileCheck %s
+
+;; Note: for FastISel, we fall back to SelectionDAG
+
+declare extern_weak dso_local i32 @var()
+
+define ptr @foo() {
+; The usual ADRP/ADD pair can't be used for a weak reference because it must
+; evaluate to 0 if the symbol is undefined. We use a GOT entry for PIC
+; otherwise a litpool entry.
+ ret ptr @var
+
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got_auth:var
+; CHECK-NEXT: add x[[ADDRHI]], x[[ADDRHI]], :got_auth_lo12:var
+; CHECK-NEXT: ldr x0, [x[[ADDRHI]]]
+; CHECK-NEXT: autia x0, x[[ADDRHI]]
+}
+
+@arr_var = extern_weak global [10 x i32]
+
+define ptr @bar() {
+ %addr = getelementptr [10 x i32], ptr @arr_var, i32 0, i32 5
+
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got_auth:arr_var
+; CHECK-NEXT: add x[[ADDRHI]], x[[ADDRHI]], :got_auth_lo12:arr_var
+; CHECK-NEXT: ldr [[BASE:x[0-9]+]], [x[[ADDRHI]]]
+; CHECK-NEXT: autda [[BASE]], x[[ADDRHI]]
+; CHECK-NEXT: add x0, [[BASE]], #20
+ ret ptr %addr
+}
+
+!llvm.module.flags = !{!0, !1}
+!0 = !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 256}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-got-abuse.ll b/llvm/test/CodeGen/AArch64/ptrauth-got-abuse.ll
new file mode 100644
index 0000000..c158053
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-got-abuse.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -asm-verbose=false -global-isel=0 -fast-isel=0 -relocation-model=pic -mattr=+pauth -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -asm-verbose=false -global-isel=0 -fast-isel=1 -relocation-model=pic -mattr=+pauth -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -asm-verbose=false -global-isel=1 -global-isel-abort=1 -relocation-model=pic -mattr=+pauth -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -asm-verbose=false -global-isel=0 -fast-isel=0 -relocation-model=pic -filetype=obj -mattr=+pauth -o /dev/null %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -asm-verbose=false -global-isel=0 -fast-isel=1 -relocation-model=pic -filetype=obj -mattr=+pauth -o /dev/null %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -asm-verbose=false -global-isel=1 -global-isel-abort=1 -relocation-model=pic -filetype=obj -mattr=+pauth -o /dev/null %s
+
+;; Note: for FastISel, we fall back to SelectionDAG
+
+declare void @consume(i32)
+declare void @func()
+
+define void @aliasee_func() {
+ ret void
+}
+@alias_func = alias void (), ptr @aliasee_func
+
+@aliasee_global = global i32 42
+@alias_global = alias i32, ptr @aliasee_global
+
+define void @foo() nounwind {
+; CHECK-LABEL: foo:
+entry:
+ call void @consume(i32 ptrtoint (ptr @func to i32))
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got_auth:func
+; CHECK-NEXT: add x[[ADDRHI]], x[[ADDRHI]], :got_auth_lo12:func
+; CHECK-NEXT: ldr x[[SYM:[0-9]+]], [x[[ADDRHI]]]
+; CHECK-NEXT: autia x[[SYM:[0-9]+]], x[[ADDRHI]]
+ call void @consume(i32 ptrtoint (ptr @alias_func to i32))
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got_auth:alias_func
+; CHECK-NEXT: add x[[ADDRHI]], x[[ADDRHI]], :got_auth_lo12:alias_func
+; CHECK-NEXT: ldr x[[SYM:[0-9]+]], [x[[ADDRHI]]]
+; CHECK-NEXT: autia x[[SYM:[0-9]+]], x[[ADDRHI]]
+ call void @consume(i32 ptrtoint (ptr @alias_global to i32))
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got_auth:alias_global
+; CHECK-NEXT: add x[[ADDRHI]], x[[ADDRHI]], :got_auth_lo12:alias_global
+; CHECK-NEXT: ldr x[[SYM:[0-9]+]], [x[[ADDRHI]]]
+; CHECK-NEXT: autda x[[SYM:[0-9]+]], x[[ADDRHI]]
+ ret void
+}
+
+!llvm.module.flags = !{!0, !1}
+!0 = !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 256}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-init-fini.ll b/llvm/test/CodeGen/AArch64/ptrauth-init-fini.ll
new file mode 100644
index 0000000..186a31c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-init-fini.ll
@@ -0,0 +1,104 @@
+; RUN: rm -rf %t && split-file %s %t && cd %t
+
+;--- nodisc.ll
+
+; RUN: llc -mtriple aarch64-elf -mattr=+pauth -filetype=asm -o - nodisc.ll | \
+; RUN: FileCheck %s --check-prefix=ASM
+; RUN: llc -mtriple aarch64-elf -mattr=+pauth -filetype=obj -o - nodisc.ll | \
+; RUN: llvm-readelf -r -x .init_array -x .fini_array - | FileCheck %s --check-prefix=OBJ
+
+; ASM: .section .init_array,"aw",@init_array
+; ASM-NEXT: .p2align 3, 0x0
+; ASM-NEXT: .xword foo@AUTH(ia,55764)
+; ASM-NEXT: .section .fini_array,"aw",@fini_array
+; ASM-NEXT: .p2align 3, 0x0
+; ASM-NEXT: .xword bar@AUTH(ia,55764)
+
+; OBJ: Relocation section '.rela.init_array' at offset 0x[[#]] contains 1 entries:
+; OBJ-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
+; OBJ-NEXT: 0000000000000000 0000000700000244 R_AARCH64_AUTH_ABS64 0000000000000000 foo + 0
+; OBJ: Relocation section '.rela.fini_array' at offset 0x[[#]] contains 1 entries:
+; OBJ-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
+; OBJ-NEXT: 0000000000000000 0000000800000244 R_AARCH64_AUTH_ABS64 0000000000000004 bar + 0
+; OBJ: Hex dump of section '.init_array':
+; OBJ-NEXT: 0x00000000 00000000 d4d90000
+; OBJ: Hex dump of section '.fini_array':
+; OBJ-NEXT: 0x00000000 00000000 d4d90000
+;; ^^^^ 0xD9D4: constant discriminator = 55764
+;; ^^ 0x80: bits 61..60 key = IA; bit 63 addr disc = false
+
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @foo, i32 0, i64 55764), ptr null }]
+@llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @bar, i32 0, i64 55764), ptr null }]
+
+define void @foo() {
+ ret void
+}
+
+define void @bar() {
+ ret void
+}
+
+;--- disc.ll
+
+; RUN: llc -mtriple aarch64-elf -mattr=+pauth -filetype=asm -o - disc.ll | \
+; RUN: FileCheck %s --check-prefix=ASM-DISC
+; RUN: llc -mtriple aarch64-elf -mattr=+pauth -filetype=obj -o - disc.ll | \
+; RUN: llvm-readelf -r -x .init_array -x .fini_array - | FileCheck %s --check-prefix=OBJ-DISC
+
+; ASM-DISC: .section .init_array,"aw",@init_array
+; ASM-DISC-NEXT: .p2align 3, 0x0
+; ASM-DISC-NEXT: .xword foo@AUTH(ia,55764,addr)
+; ASM-DISC-NEXT: .section .fini_array,"aw",@fini_array
+; ASM-DISC-NEXT: .p2align 3, 0x0
+; ASM-DISC-NEXT: .xword bar@AUTH(ia,55764,addr)
+
+; OBJ-DISC: Relocation section '.rela.init_array' at offset 0x[[#]] contains 1 entries:
+; OBJ-DISC-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
+; OBJ-DISC-NEXT: 0000000000000000 0000000700000244 R_AARCH64_AUTH_ABS64 0000000000000000 foo + 0
+; OBJ-DISC: Relocation section '.rela.fini_array' at offset 0x[[#]] contains 1 entries:
+; OBJ-DISC-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
+; OBJ-DISC-NEXT: 0000000000000000 0000000800000244 R_AARCH64_AUTH_ABS64 0000000000000004 bar + 0
+; OBJ-DISC: Hex dump of section '.init_array':
+; OBJ-DISC-NEXT: 0x00000000 00000000 d4d90080
+; OBJ-DISC: Hex dump of section '.fini_array':
+; OBJ-DISC-NEXT: 0x00000000 00000000 d4d90080
+;; ^^^^ 0xD9D4: constant discriminator = 55764
+;; ^^ 0x80: bits 61..60 key = IA; bit 63 addr disc = true
+
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @foo, i32 0, i64 55764, ptr inttoptr (i64 1 to ptr)), ptr null }]
+@llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @bar, i32 0, i64 55764, ptr inttoptr (i64 1 to ptr)), ptr null }]
+
+define void @foo() {
+ ret void
+}
+
+define void @bar() {
+ ret void
+}
+
+;--- err1.ll
+
+; RUN: not --crash llc -mtriple aarch64-elf -mattr=+pauth -filetype=asm -o - err1.ll 2>&1 | \
+; RUN: FileCheck %s --check-prefix=ERR1
+
+; ERR1: LLVM ERROR: unexpected address discrimination value for ctors/dtors entry, only 'ptr inttoptr (i64 1 to ptr)' is allowed
+
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @foo, i32 0, i64 55764, ptr inttoptr (i64 2 to ptr)), ptr null }]
+
+define void @foo() {
+ ret void
+}
+
+;--- err2.ll
+
+; RUN: not --crash llc -mtriple aarch64-elf -mattr=+pauth -filetype=asm -o - err2.ll 2>&1 | \
+; RUN: FileCheck %s --check-prefix=ERR2
+
+; ERR2: LLVM ERROR: unexpected address discrimination value for ctors/dtors entry, only 'ptr inttoptr (i64 1 to ptr)' is allowed
+
+@g = external global ptr
+@llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr ptrauth (ptr @bar, i32 0, i64 55764, ptr @g), ptr null }]
+
+define void @bar() {
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-tagged-globals-pic.ll b/llvm/test/CodeGen/AArch64/ptrauth-tagged-globals-pic.ll
new file mode 100644
index 0000000..c9a6722
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-tagged-globals-pic.ll
@@ -0,0 +1,66 @@
+; RUN: llc --relocation-model=pic -mattr=+pauth < %s | FileCheck %s --check-prefixes=CHECK,GISEL
+
+; RUN: llc -global-isel=0 -fast-isel=0 -O0 --relocation-model=pic < %s -mattr=+pauth | FileCheck %s --check-prefixes=CHECK,DAGISEL
+; RUN: llc -global-isel=0 -fast-isel=1 -O0 --relocation-model=pic < %s -mattr=+pauth | FileCheck %s --check-prefixes=CHECK,DAGISEL
+; RUN: llc -global-isel=1 -global-isel-abort=1 -O0 --relocation-model=pic < %s -mattr=+pauth | FileCheck %s --check-prefixes=CHECK,GISEL
+
+;; Note: for FastISel, we fall back to SelectionDAG
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-linux-android"
+
+@global = external global i32
+declare void @func()
+
+define ptr @global_addr() #0 {
+ ; CHECK-LABEL: global_addr:
+ ; CHECK: adrp [[REG:x[0-9]+]], :got_auth:global
+ ; CHECK-NEXT: add [[REG]], [[REG]], :got_auth_lo12:global
+ ; CHECK-NEXT: ldr x0, [[[REG]]]
+ ; CHECK-NEXT: autda x0, [[REG]]
+ ; CHECK-NEXT: ret
+
+ ret ptr @global
+}
+
+define i32 @global_load() #0 {
+ ; CHECK-LABEL: global_load:
+ ; CHECK: adrp [[REG0:x[0-9]+]], :got_auth:global
+ ; CHECK-NEXT: add [[REG0]], [[REG0]], :got_auth_lo12:global
+ ; CHECK-NEXT: ldr [[REG1:x[0-9]+]], [[[REG0]]]
+ ; CHECK-NEXT: autda [[REG1]], [[REG0]]
+ ; CHECK-NEXT: ldr w0, [[[REG1]]]
+ ; CHECK-NEXT: ret
+ %load = load i32, ptr @global
+ ret i32 %load
+}
+
+define void @global_store() #0 {
+ ; CHECK-LABEL: global_store:
+ ; CHECK: adrp [[REG0:x[0-9]+]], :got_auth:global
+ ; CHECK-NEXT: add [[REG0]], [[REG0]], :got_auth_lo12:global
+ ; CHECK-NEXT: ldr [[REG1:x[0-9]+]], [[[REG0]]]
+ ; CHECK-NEXT: autda [[REG1]], [[REG0]]
+ ; GISEL-NEXT: str wzr, [[[REG1]]]
+ ; DAGISEL-NEXT: mov w8, wzr
+ ; DAGISEL-NEXT: str w8, [[[REG1]]]
+ ; CHECK-NEXT: ret
+ store i32 0, ptr @global
+ ret void
+}
+
+define ptr @func_addr() #0 {
+ ; CHECK-LABEL: func_addr:
+ ; CHECK: adrp [[REG:x[0-9]+]], :got_auth:func
+ ; CHECK-NEXT: add [[REG]], [[REG]], :got_auth_lo12:func
+ ; CHECK-NEXT: ldr x0, [[[REG]]]
+ ; CHECK-NEXT: autia x0, [[REG]]
+ ; CHECK-NEXT: ret
+ ret ptr @func
+}
+
+attributes #0 = { "target-features"="+tagged-globals" }
+
+!llvm.module.flags = !{!0, !1}
+!0 = !{i32 1, !"aarch64-elf-pauthabi-platform", i32 268435458}
+!1 = !{i32 1, !"aarch64-elf-pauthabi-version", i32 256}
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
index 6e2c48f..9d865b1 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+b16b16 -force-streaming -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+sve-b16b16 -force-streaming -verify-machineinstrs < %s | FileCheck %s
; SMAX (Single, x2)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
index d379845..575bcbc 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+b16b16 -force-streaming -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+sve-b16b16 -force-streaming -verify-machineinstrs < %s | FileCheck %s
; SMIN (Single, x2)
diff --git a/llvm/test/CodeGen/AArch64/sms-order-physreg-deps.mir b/llvm/test/CodeGen/AArch64/sms-order-physreg-deps.mir
new file mode 100644
index 0000000..4d8067e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sms-order-physreg-deps.mir
@@ -0,0 +1,452 @@
+# RUN: llc --verify-machineinstrs -mtriple=aarch64 -o - %s -mcpu=a64fx -aarch64-enable-pipeliner -pipeliner-max-mii=100 -pipeliner-enable-copytophi=0 -debug-only=pipeliner -run-pass=pipeliner -treat-scalable-fixed-error-as-warning 2>&1 | FileCheck %s
+
+# REQUIRES: asserts
+
+# Verify that the order of the instructions is correct if they are scheduled in
+# the same cycle and they have physical register dependencies.
+
+# CHECK: Schedule Found? 1
+# CHECK: cycle {{[0-9]+}} (0) {{.*}} SUBS{{.*}} implicit-def $nzcv
+# CHECK-NOT: cycle {{[0-9]+}} (0) {{.*}} implicit-def {{.*}} $nzcv
+
+--- |
+ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+
+ declare void @free(ptr allocptr nocapture noundef) local_unnamed_addr #0
+
+ define dso_local noundef i32 @main(i32 noundef %argc, ptr nocapture noundef readnone %argv) local_unnamed_addr #1 {
+ entry:
+ %ret.i.i55 = alloca ptr, align 8
+ %ret.i.i = alloca ptr, align 8
+ %0 = load ptr, ptr %ret.i.i, align 8
+ br label %vector.ph
+
+ vector.ph: ; preds = %for.inc20.i, %entry
+ %lsr.iv1 = phi i64 [ %lsr.iv.next2, %for.inc20.i ], [ 0, %entry ]
+ %indvars.iv45.i = phi i64 [ 0, %entry ], [ %indvars.iv.next46.i, %for.inc20.i ]
+ %broadcast.splatinsert = insertelement <vscale x 4 x i64> poison, i64 %indvars.iv45.i, i64 0
+ %broadcast.splat = shufflevector <vscale x 4 x i64> %broadcast.splatinsert, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ br label %vector.body
+
+ vector.body: ; preds = %vector.body, %vector.ph
+ %lsr.iv3 = phi i64 [ %lsr.iv.next4, %vector.body ], [ %lsr.iv1, %vector.ph ]
+ %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 2800, %vector.ph ]
+ %vec.ind = phi <vscale x 4 x i64> [ zeroinitializer, %vector.ph ], [ %vec.ind.next.6, %vector.body ]
+ %1 = mul nuw nsw <vscale x 4 x i64> %vec.ind, %broadcast.splat
+ %2 = trunc <vscale x 4 x i64> %1 to <vscale x 4 x i32>
+ %3 = urem <vscale x 4 x i32> %2, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %4 = add nuw nsw <vscale x 4 x i32> %3, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %5 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep16 = getelementptr i8, ptr %0, i64 %5
+ %6 = add nuw nsw <vscale x 4 x i64> %vec.ind, %broadcast.splat
+ %7 = trunc <vscale x 4 x i64> %6 to <vscale x 4 x i32>
+ %8 = urem <vscale x 4 x i32> %7, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %9 = icmp eq <vscale x 4 x i32> %8, zeroinitializer
+ %10 = urem <vscale x 4 x i32> %7, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %11 = icmp eq <vscale x 4 x i32> %10, zeroinitializer
+ %12 = or <vscale x 4 x i1> %9, %11
+ %13 = urem <vscale x 4 x i32> %7, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %14 = icmp eq <vscale x 4 x i32> %13, zeroinitializer
+ %15 = or <vscale x 4 x i1> %14, %12
+ %16 = select <vscale x 4 x i1> %15, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %4
+ store <vscale x 4 x i32> %16, ptr %scevgep16, align 4
+ %vec.ind.next = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 4, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %17 = mul nuw nsw <vscale x 4 x i64> %vec.ind.next, %broadcast.splat
+ %18 = trunc <vscale x 4 x i64> %17 to <vscale x 4 x i32>
+ %19 = urem <vscale x 4 x i32> %18, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %20 = add nuw nsw <vscale x 4 x i32> %19, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %21 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep14 = getelementptr i8, ptr %0, i64 %21
+ %scevgep15 = getelementptr i8, ptr %scevgep14, i64 16
+ %22 = add nuw nsw <vscale x 4 x i64> %vec.ind.next, %broadcast.splat
+ %23 = trunc <vscale x 4 x i64> %22 to <vscale x 4 x i32>
+ %24 = urem <vscale x 4 x i32> %23, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %25 = icmp eq <vscale x 4 x i32> %24, zeroinitializer
+ %26 = urem <vscale x 4 x i32> %23, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %27 = icmp eq <vscale x 4 x i32> %26, zeroinitializer
+ %28 = or <vscale x 4 x i1> %25, %27
+ %29 = urem <vscale x 4 x i32> %23, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %30 = icmp eq <vscale x 4 x i32> %29, zeroinitializer
+ %31 = or <vscale x 4 x i1> %30, %28
+ %32 = select <vscale x 4 x i1> %31, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %20
+ store <vscale x 4 x i32> %32, ptr %scevgep15, align 4
+ %vec.ind.next.1 = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 8, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %33 = mul nuw nsw <vscale x 4 x i64> %vec.ind.next.1, %broadcast.splat
+ %34 = trunc <vscale x 4 x i64> %33 to <vscale x 4 x i32>
+ %35 = urem <vscale x 4 x i32> %34, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %36 = add nuw nsw <vscale x 4 x i32> %35, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %37 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep12 = getelementptr i8, ptr %0, i64 %37
+ %scevgep13 = getelementptr i8, ptr %scevgep12, i64 32
+ %38 = add nuw nsw <vscale x 4 x i64> %vec.ind.next.1, %broadcast.splat
+ %39 = trunc <vscale x 4 x i64> %38 to <vscale x 4 x i32>
+ %40 = urem <vscale x 4 x i32> %39, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %41 = icmp eq <vscale x 4 x i32> %40, zeroinitializer
+ %42 = urem <vscale x 4 x i32> %39, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %43 = icmp eq <vscale x 4 x i32> %42, zeroinitializer
+ %44 = or <vscale x 4 x i1> %41, %43
+ %45 = urem <vscale x 4 x i32> %39, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %46 = icmp eq <vscale x 4 x i32> %45, zeroinitializer
+ %47 = or <vscale x 4 x i1> %46, %44
+ %48 = select <vscale x 4 x i1> %47, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %36
+ store <vscale x 4 x i32> %48, ptr %scevgep13, align 4
+ %vec.ind.next.2 = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 12, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %49 = mul nuw nsw <vscale x 4 x i64> %vec.ind.next.2, %broadcast.splat
+ %50 = trunc <vscale x 4 x i64> %49 to <vscale x 4 x i32>
+ %51 = urem <vscale x 4 x i32> %50, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %52 = add nuw nsw <vscale x 4 x i32> %51, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %53 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep10 = getelementptr i8, ptr %0, i64 %53
+ %scevgep11 = getelementptr i8, ptr %scevgep10, i64 48
+ %54 = add nuw nsw <vscale x 4 x i64> %vec.ind.next.2, %broadcast.splat
+ %55 = trunc <vscale x 4 x i64> %54 to <vscale x 4 x i32>
+ %56 = urem <vscale x 4 x i32> %55, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %57 = icmp eq <vscale x 4 x i32> %56, zeroinitializer
+ %58 = urem <vscale x 4 x i32> %55, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %59 = icmp eq <vscale x 4 x i32> %58, zeroinitializer
+ %60 = or <vscale x 4 x i1> %57, %59
+ %61 = urem <vscale x 4 x i32> %55, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %62 = icmp eq <vscale x 4 x i32> %61, zeroinitializer
+ %63 = or <vscale x 4 x i1> %62, %60
+ %64 = select <vscale x 4 x i1> %63, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %52
+ store <vscale x 4 x i32> %64, ptr %scevgep11, align 4
+ %vec.ind.next.3 = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 16, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %65 = mul nuw nsw <vscale x 4 x i64> %vec.ind.next.3, %broadcast.splat
+ %66 = trunc <vscale x 4 x i64> %65 to <vscale x 4 x i32>
+ %67 = urem <vscale x 4 x i32> %66, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %68 = add nuw nsw <vscale x 4 x i32> %67, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %69 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep8 = getelementptr i8, ptr %0, i64 %69
+ %scevgep9 = getelementptr i8, ptr %scevgep8, i64 64
+ %70 = add nuw nsw <vscale x 4 x i64> %vec.ind.next.3, %broadcast.splat
+ %71 = trunc <vscale x 4 x i64> %70 to <vscale x 4 x i32>
+ %72 = urem <vscale x 4 x i32> %71, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %73 = icmp eq <vscale x 4 x i32> %72, zeroinitializer
+ %74 = urem <vscale x 4 x i32> %71, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %75 = icmp eq <vscale x 4 x i32> %74, zeroinitializer
+ %76 = or <vscale x 4 x i1> %73, %75
+ %77 = urem <vscale x 4 x i32> %71, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %78 = icmp eq <vscale x 4 x i32> %77, zeroinitializer
+ %79 = or <vscale x 4 x i1> %78, %76
+ %80 = select <vscale x 4 x i1> %79, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %68
+ store <vscale x 4 x i32> %80, ptr %scevgep9, align 4
+ %vec.ind.next.4 = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 20, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %81 = mul nuw nsw <vscale x 4 x i64> %vec.ind.next.4, %broadcast.splat
+ %82 = trunc <vscale x 4 x i64> %81 to <vscale x 4 x i32>
+ %83 = urem <vscale x 4 x i32> %82, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %84 = add nuw nsw <vscale x 4 x i32> %83, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %85 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep6 = getelementptr i8, ptr %0, i64 %85
+ %scevgep7 = getelementptr i8, ptr %scevgep6, i64 80
+ %86 = add nuw nsw <vscale x 4 x i64> %vec.ind.next.4, %broadcast.splat
+ %87 = trunc <vscale x 4 x i64> %86 to <vscale x 4 x i32>
+ %88 = urem <vscale x 4 x i32> %87, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %89 = icmp eq <vscale x 4 x i32> %88, zeroinitializer
+ %90 = urem <vscale x 4 x i32> %87, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %91 = icmp eq <vscale x 4 x i32> %90, zeroinitializer
+ %92 = or <vscale x 4 x i1> %89, %91
+ %93 = urem <vscale x 4 x i32> %87, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %94 = icmp eq <vscale x 4 x i32> %93, zeroinitializer
+ %95 = or <vscale x 4 x i1> %94, %92
+ %96 = select <vscale x 4 x i1> %95, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %84
+ store <vscale x 4 x i32> %96, ptr %scevgep7, align 4
+ %vec.ind.next.5 = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 24, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %97 = mul nuw nsw <vscale x 4 x i64> %vec.ind.next.5, %broadcast.splat
+ %98 = trunc <vscale x 4 x i64> %97 to <vscale x 4 x i32>
+ %99 = urem <vscale x 4 x i32> %98, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %100 = add nuw nsw <vscale x 4 x i32> %99, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %101 = shl nuw nsw i64 %lsr.iv3, 2
+ %scevgep = getelementptr i8, ptr %0, i64 %101
+ %scevgep5 = getelementptr i8, ptr %scevgep, i64 96
+ %102 = add nuw nsw <vscale x 4 x i64> %vec.ind.next.5, %broadcast.splat
+ %103 = trunc <vscale x 4 x i64> %102 to <vscale x 4 x i32>
+ %104 = urem <vscale x 4 x i32> %103, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 13, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %105 = icmp eq <vscale x 4 x i32> %104, zeroinitializer
+ %106 = urem <vscale x 4 x i32> %103, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 7, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %107 = icmp eq <vscale x 4 x i32> %106, zeroinitializer
+ %108 = or <vscale x 4 x i1> %105, %107
+ %109 = urem <vscale x 4 x i32> %103, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 11, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+ %110 = icmp eq <vscale x 4 x i32> %109, zeroinitializer
+ %111 = or <vscale x 4 x i1> %110, %108
+ %112 = select <vscale x 4 x i1> %111, <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 999, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> %100
+ store <vscale x 4 x i32> %112, ptr %scevgep5, align 4
+ %vec.ind.next.6 = add <vscale x 4 x i64> %vec.ind, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 28, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+ %lsr.iv.next = add nsw i64 %lsr.iv, -28
+ %lsr.iv.next4 = add nuw nsw i64 %lsr.iv3, 28
+ %113 = icmp eq i64 %lsr.iv.next, 0
+ br i1 %113, label %for.inc20.i, label %vector.body
+
+ for.inc20.i: ; preds = %vector.body
+ %indvars.iv.next46.i = add nuw nsw i64 %indvars.iv45.i, 1
+ %lsr.iv.next2 = add nuw nsw i64 %lsr.iv1, 2800
+ %exitcond48.not.i = icmp eq i64 %indvars.iv.next46.i, 2800
+ br i1 %exitcond48.not.i, label %init_array.exit, label %vector.ph
+
+ init_array.exit: ; preds = %for.inc20.i
+ call void @free(ptr noundef nonnull %0)
+ ret i32 0
+ }
+
+ attributes #0 = { mustprogress nounwind willreturn allockind("free") memory(argmem: readwrite, inaccessiblemem: readwrite) "alloc-family"="malloc" "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="a64fx" "target-features"="+aes,+complxnum,+crc,+fp-armv8,+fullfp16,+lse,+neon,+outline-atomics,+perfmon,+ras,+rdm,+sha2,+sve,+v8.1a,+v8.2a,+v8a,-fmv" }
+ attributes #1 = { nounwind uwtable vscale_range(1,1) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="a64fx" "target-features"="+aes,+complxnum,+crc,+fp-armv8,+fullfp16,+lse,+neon,+outline-atomics,+perfmon,+ras,+rdm,+sha2,+sve,+v8.1a,+v8.2a,+v8a,-fmv" }
+
+...
+---
+name: main
+tracksRegLiveness: true
+stack:
+ - { id: 0, name: ret.i.i55, size: 8, alignment: 8, local-offset: -8 }
+ - { id: 1, name: ret.i.i, size: 8, alignment: 8, local-offset: -16 }
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ %18:gpr64all = COPY $xzr
+ %17:gpr64all = COPY %18
+ %19:gpr64common = LDRXui %stack.1.ret.i.i, 0 :: (dereferenceable load (s64) from %ir.ret.i.i)
+ %0:gpr64common = COPY %19
+ %21:zpr = DUP_ZI_D 0, 0, implicit $vg
+ %23:gpr32 = MOVi32imm 2800
+ %24:ppr_3b = PTRUE_D 31, implicit $vg
+ %28:ppr_3b = PTRUE_S 31, implicit $vg
+ %29:gpr32common = MOVi32imm 613566757
+ %30:zpr = DUP_ZR_S %29
+ %36:zpr = DUP_ZI_S 7, 0, implicit $vg
+ %43:gpr32common = MOVi32imm -991146299
+ %44:zpr = DUP_ZR_S %43
+ %46:gpr32common = MOVi32imm 330382099
+ %47:zpr = DUP_ZR_S %46
+ %49:gpr32common = MOVi32imm -1227133513
+ %50:zpr = DUP_ZR_S %49
+ %52:gpr32common = MOVi32imm 613566756
+ %53:zpr = DUP_ZR_S %52
+ %56:gpr32common = MOVi32imm -1171354717
+ %57:zpr = DUP_ZR_S %56
+ %59:gpr32common = MOVi32imm 390451572
+ %60:zpr = DUP_ZR_S %59
+ %63:gpr32common = MOVi32imm 999
+ %64:zpr = DUP_ZR_S %63
+ %79:gpr64common = MOVi64imm 4
+ %104:gpr64common = MOVi64imm 8
+ %129:gpr64common = MOVi64imm 12
+ %154:gpr64common = MOVi64imm 16
+ %179:gpr64common = MOVi64imm 20
+ %204:gpr64common = MOVi64imm 24
+
+ bb.1.vector.ph:
+ %1:gpr64sp = PHI %17, %bb.0, %14, %bb.3
+ %2:gpr64sp = PHI %17, %bb.0, %13, %bb.3
+ %4:zpr = DUP_ZR_D %2
+ %22:zpr = COPY %21
+ %20:gpr64all = SUBREG_TO_REG 0, %23, %subreg.sub_32
+
+ bb.2.vector.body:
+ successors: %bb.3(0x04000000), %bb.2(0x7c000000)
+
+ %5:gpr64common = PHI %1, %bb.1, %12, %bb.2
+ %6:gpr64sp = PHI %20, %bb.1, %11, %bb.2
+ %7:zpr = PHI %21, %bb.1, %9, %bb.2
+ %8:zpr = PHI %22, %bb.1, %10, %bb.2
+ %25:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %7, %4
+ %26:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %8, %4
+ %27:zpr = UZP1_ZZZ_S killed %25, killed %26
+ %31:zpr = UMULH_ZPZZ_S_UNDEF %28, %27, %30
+ %32:zpr = SUB_ZZZ_S %27, %31
+ %33:zpr = LSR_ZZI_S killed %32, 1
+ %34:zpr = ADD_ZZZ_S killed %33, %31
+ %35:zpr = LSR_ZZI_S killed %34, 2
+ %37:zpr = MLS_ZPZZZ_S_UNDEF %28, %27, killed %35, %36
+ %38:zpr = nuw nsw ADD_ZI_S %37, 1, 0
+ %39:gpr64common = ADDXrs %19, %5, 2
+ %40:zpr = nuw nsw ADD_ZZZ_D %7, %4
+ %41:zpr = nuw nsw ADD_ZZZ_D %8, %4
+ %42:zpr = UZP1_ZZZ_S killed %40, killed %41
+ %45:zpr = MUL_ZPZZ_S_UNDEF %28, %42, %44
+ %48:ppr = CMPHS_PPzZZ_S %28, %47, killed %45, implicit-def dead $nzcv
+ %51:zpr = MUL_ZPZZ_S_UNDEF %28, %42, %50
+ %54:ppr = CMPHS_PPzZZ_S %28, %53, killed %51, implicit-def dead $nzcv
+ %55:ppr = SEL_PPPP %48, %48, killed %54
+ %58:zpr = MUL_ZPZZ_S_UNDEF %28, %42, %57
+ %61:ppr = CMPHS_PPzZZ_S %28, %60, killed %58, implicit-def dead $nzcv
+ %62:ppr = SEL_PPPP %61, %61, killed %55
+ %65:zpr = SEL_ZPZZ_S killed %62, %64, killed %38
+ ST1W killed %65, %28, %0, %5 :: (store (<vscale x 1 x s128>) into %ir.scevgep16, align 4)
+ %67:zpr = ADD_ZI_D %8, 4, 0
+ %68:zpr = ADD_ZI_D %7, 4, 0
+ %69:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %68, %4
+ %70:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %67, %4
+ %71:zpr = UZP1_ZZZ_S killed %69, killed %70
+ %72:zpr = UMULH_ZPZZ_S_UNDEF %28, %71, %30
+ %73:zpr = SUB_ZZZ_S %71, %72
+ %74:zpr = LSR_ZZI_S killed %73, 1
+ %75:zpr = ADD_ZZZ_S killed %74, %72
+ %76:zpr = LSR_ZZI_S killed %75, 2
+ %77:zpr = MLS_ZPZZZ_S_UNDEF %28, %71, killed %76, %36
+ %78:zpr = nuw nsw ADD_ZI_S %77, 1, 0
+ %80:zpr = nuw nsw ADD_ZZZ_D %68, %4
+ %81:zpr = nuw nsw ADD_ZZZ_D %67, %4
+ %82:zpr = UZP1_ZZZ_S killed %80, killed %81
+ %83:zpr = MUL_ZPZZ_S_UNDEF %28, %82, %44
+ %84:ppr = CMPHS_PPzZZ_S %28, %47, killed %83, implicit-def dead $nzcv
+ %85:zpr = MUL_ZPZZ_S_UNDEF %28, %82, %50
+ %86:ppr = CMPHS_PPzZZ_S %28, %53, killed %85, implicit-def dead $nzcv
+ %87:ppr = SEL_PPPP %84, %84, killed %86
+ %88:zpr = MUL_ZPZZ_S_UNDEF %28, %82, %57
+ %89:ppr = CMPHS_PPzZZ_S %28, %60, killed %88, implicit-def dead $nzcv
+ %90:ppr = SEL_PPPP %89, %89, killed %87
+ %91:zpr = SEL_ZPZZ_S killed %90, %64, killed %78
+ ST1W killed %91, %28, %39, %79 :: (store (<vscale x 1 x s128>) into %ir.scevgep15, align 4)
+ %92:zpr = ADD_ZI_D %8, 8, 0
+ %93:zpr = ADD_ZI_D %7, 8, 0
+ %94:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %93, %4
+ %95:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %92, %4
+ %96:zpr = UZP1_ZZZ_S killed %94, killed %95
+ %97:zpr = UMULH_ZPZZ_S_UNDEF %28, %96, %30
+ %98:zpr = SUB_ZZZ_S %96, %97
+ %99:zpr = LSR_ZZI_S killed %98, 1
+ %100:zpr = ADD_ZZZ_S killed %99, %97
+ %101:zpr = LSR_ZZI_S killed %100, 2
+ %102:zpr = MLS_ZPZZZ_S_UNDEF %28, %96, killed %101, %36
+ %103:zpr = nuw nsw ADD_ZI_S %102, 1, 0
+ %105:zpr = nuw nsw ADD_ZZZ_D %93, %4
+ %106:zpr = nuw nsw ADD_ZZZ_D %92, %4
+ %107:zpr = UZP1_ZZZ_S killed %105, killed %106
+ %108:zpr = MUL_ZPZZ_S_UNDEF %28, %107, %44
+ %109:ppr = CMPHS_PPzZZ_S %28, %47, killed %108, implicit-def dead $nzcv
+ %110:zpr = MUL_ZPZZ_S_UNDEF %28, %107, %50
+ %111:ppr = CMPHS_PPzZZ_S %28, %53, killed %110, implicit-def dead $nzcv
+ %112:ppr = SEL_PPPP %109, %109, killed %111
+ %113:zpr = MUL_ZPZZ_S_UNDEF %28, %107, %57
+ %114:ppr = CMPHS_PPzZZ_S %28, %60, killed %113, implicit-def dead $nzcv
+ %115:ppr = SEL_PPPP %114, %114, killed %112
+ %116:zpr = SEL_ZPZZ_S killed %115, %64, killed %103
+ ST1W killed %116, %28, %39, %104 :: (store (<vscale x 1 x s128>) into %ir.scevgep13, align 4)
+ %117:zpr = ADD_ZI_D %8, 12, 0
+ %118:zpr = ADD_ZI_D %7, 12, 0
+ %119:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %118, %4
+ %120:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %117, %4
+ %121:zpr = UZP1_ZZZ_S killed %119, killed %120
+ %122:zpr = UMULH_ZPZZ_S_UNDEF %28, %121, %30
+ %123:zpr = SUB_ZZZ_S %121, %122
+ %124:zpr = LSR_ZZI_S killed %123, 1
+ %125:zpr = ADD_ZZZ_S killed %124, %122
+ %126:zpr = LSR_ZZI_S killed %125, 2
+ %127:zpr = MLS_ZPZZZ_S_UNDEF %28, %121, killed %126, %36
+ %128:zpr = nuw nsw ADD_ZI_S %127, 1, 0
+ %130:zpr = nuw nsw ADD_ZZZ_D %118, %4
+ %131:zpr = nuw nsw ADD_ZZZ_D %117, %4
+ %132:zpr = UZP1_ZZZ_S killed %130, killed %131
+ %133:zpr = MUL_ZPZZ_S_UNDEF %28, %132, %44
+ %134:ppr = CMPHS_PPzZZ_S %28, %47, killed %133, implicit-def dead $nzcv
+ %135:zpr = MUL_ZPZZ_S_UNDEF %28, %132, %50
+ %136:ppr = CMPHS_PPzZZ_S %28, %53, killed %135, implicit-def dead $nzcv
+ %137:ppr = SEL_PPPP %134, %134, killed %136
+ %138:zpr = MUL_ZPZZ_S_UNDEF %28, %132, %57
+ %139:ppr = CMPHS_PPzZZ_S %28, %60, killed %138, implicit-def dead $nzcv
+ %140:ppr = SEL_PPPP %139, %139, killed %137
+ %141:zpr = SEL_ZPZZ_S killed %140, %64, killed %128
+ ST1W killed %141, %28, %39, %129 :: (store (<vscale x 1 x s128>) into %ir.scevgep11, align 4)
+ %142:zpr = ADD_ZI_D %8, 16, 0
+ %143:zpr = ADD_ZI_D %7, 16, 0
+ %144:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %143, %4
+ %145:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %142, %4
+ %146:zpr = UZP1_ZZZ_S killed %144, killed %145
+ %147:zpr = UMULH_ZPZZ_S_UNDEF %28, %146, %30
+ %148:zpr = SUB_ZZZ_S %146, %147
+ %149:zpr = LSR_ZZI_S killed %148, 1
+ %150:zpr = ADD_ZZZ_S killed %149, %147
+ %151:zpr = LSR_ZZI_S killed %150, 2
+ %152:zpr = MLS_ZPZZZ_S_UNDEF %28, %146, killed %151, %36
+ %153:zpr = nuw nsw ADD_ZI_S %152, 1, 0
+ %155:zpr = nuw nsw ADD_ZZZ_D %143, %4
+ %156:zpr = nuw nsw ADD_ZZZ_D %142, %4
+ %157:zpr = UZP1_ZZZ_S killed %155, killed %156
+ %158:zpr = MUL_ZPZZ_S_UNDEF %28, %157, %44
+ %159:ppr = CMPHS_PPzZZ_S %28, %47, killed %158, implicit-def dead $nzcv
+ %160:zpr = MUL_ZPZZ_S_UNDEF %28, %157, %50
+ %161:ppr = CMPHS_PPzZZ_S %28, %53, killed %160, implicit-def dead $nzcv
+ %162:ppr = SEL_PPPP %159, %159, killed %161
+ %163:zpr = MUL_ZPZZ_S_UNDEF %28, %157, %57
+ %164:ppr = CMPHS_PPzZZ_S %28, %60, killed %163, implicit-def dead $nzcv
+ %165:ppr = SEL_PPPP %164, %164, killed %162
+ %166:zpr = SEL_ZPZZ_S killed %165, %64, killed %153
+ ST1W killed %166, %28, %39, %154 :: (store (<vscale x 1 x s128>) into %ir.scevgep9, align 4)
+ %167:zpr = ADD_ZI_D %8, 20, 0
+ %168:zpr = ADD_ZI_D %7, 20, 0
+ %169:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %168, %4
+ %170:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %167, %4
+ %171:zpr = UZP1_ZZZ_S killed %169, killed %170
+ %172:zpr = UMULH_ZPZZ_S_UNDEF %28, %171, %30
+ %173:zpr = SUB_ZZZ_S %171, %172
+ %174:zpr = LSR_ZZI_S killed %173, 1
+ %175:zpr = ADD_ZZZ_S killed %174, %172
+ %176:zpr = LSR_ZZI_S killed %175, 2
+ %177:zpr = MLS_ZPZZZ_S_UNDEF %28, %171, killed %176, %36
+ %178:zpr = nuw nsw ADD_ZI_S %177, 1, 0
+ %180:zpr = nuw nsw ADD_ZZZ_D %168, %4
+ %181:zpr = nuw nsw ADD_ZZZ_D %167, %4
+ %182:zpr = UZP1_ZZZ_S killed %180, killed %181
+ %183:zpr = MUL_ZPZZ_S_UNDEF %28, %182, %44
+ %184:ppr = CMPHS_PPzZZ_S %28, %47, killed %183, implicit-def dead $nzcv
+ %185:zpr = MUL_ZPZZ_S_UNDEF %28, %182, %50
+ %186:ppr = CMPHS_PPzZZ_S %28, %53, killed %185, implicit-def dead $nzcv
+ %187:ppr = SEL_PPPP %184, %184, killed %186
+ %188:zpr = MUL_ZPZZ_S_UNDEF %28, %182, %57
+ %189:ppr = CMPHS_PPzZZ_S %28, %60, killed %188, implicit-def dead $nzcv
+ %190:ppr = SEL_PPPP %189, %189, killed %187
+ %191:zpr = SEL_ZPZZ_S killed %190, %64, killed %178
+ ST1W killed %191, %28, %39, %179 :: (store (<vscale x 1 x s128>) into %ir.scevgep7, align 4)
+ %192:zpr = ADD_ZI_D %8, 24, 0
+ %193:zpr = ADD_ZI_D %7, 24, 0
+ %194:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %193, %4
+ %195:zpr = nuw nsw MUL_ZPZZ_D_UNDEF %24, %192, %4
+ %196:zpr = UZP1_ZZZ_S killed %194, killed %195
+ %197:zpr = UMULH_ZPZZ_S_UNDEF %28, %196, %30
+ %198:zpr = SUB_ZZZ_S %196, %197
+ %199:zpr = LSR_ZZI_S killed %198, 1
+ %200:zpr = ADD_ZZZ_S killed %199, %197
+ %201:zpr = LSR_ZZI_S killed %200, 2
+ %202:zpr = MLS_ZPZZZ_S_UNDEF %28, %196, killed %201, %36
+ %203:zpr = nuw nsw ADD_ZI_S %202, 1, 0
+ %205:zpr = nuw nsw ADD_ZZZ_D %193, %4
+ %206:zpr = nuw nsw ADD_ZZZ_D %192, %4
+ %207:zpr = UZP1_ZZZ_S killed %205, killed %206
+ %208:zpr = MUL_ZPZZ_S_UNDEF %28, %207, %44
+ %209:ppr = CMPHS_PPzZZ_S %28, %47, killed %208, implicit-def dead $nzcv
+ %210:zpr = MUL_ZPZZ_S_UNDEF %28, %207, %50
+ %211:ppr = CMPHS_PPzZZ_S %28, %53, killed %210, implicit-def dead $nzcv
+ %212:ppr = SEL_PPPP %209, %209, killed %211
+ %213:zpr = MUL_ZPZZ_S_UNDEF %28, %207, %57
+ %214:ppr = CMPHS_PPzZZ_S %28, %60, killed %213, implicit-def dead $nzcv
+ %215:ppr = SEL_PPPP %214, %214, killed %212
+ %216:zpr = SEL_ZPZZ_S killed %215, %64, killed %203
+ ST1W killed %216, %28, %39, %204 :: (store (<vscale x 1 x s128>) into %ir.scevgep5, align 4)
+ %9:zpr = ADD_ZI_D %7, 28, 0
+ %10:zpr = ADD_ZI_D %8, 28, 0
+ %217:gpr64 = nsw SUBSXri %6, 28, 0, implicit-def $nzcv
+ %11:gpr64all = COPY %217
+ %218:gpr64sp = nuw nsw ADDXri %5, 28, 0
+ %12:gpr64all = COPY %218
+ Bcc 1, %bb.2, implicit $nzcv
+ B %bb.3
+
+ bb.3.for.inc20.i:
+ successors: %bb.4(0x04000000), %bb.1(0x7c000000)
+
+ %219:gpr64sp = nuw nsw ADDXri %2, 1, 0
+ %13:gpr64all = COPY %219
+ %220:gpr64sp = nuw nsw ADDXri %1, 2800, 0
+ %14:gpr64all = COPY %220
+ dead $xzr = SUBSXri %219, 2800, 0, implicit-def $nzcv
+ Bcc 1, %bb.1, implicit $nzcv
+ B %bb.4
+
+ bb.4.init_array.exit:
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ $x0 = COPY %0
+ BL @free, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ %222:gpr32all = COPY $wzr
+ $w0 = COPY %222
+ RET_ReallyLR implicit $w0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/srem-seteq-vec-splat.ll b/llvm/test/CodeGen/AArch64/srem-seteq-vec-splat.ll
index c0c0ae5..03d40fc 100644
--- a/llvm/test/CodeGen/AArch64/srem-seteq-vec-splat.ll
+++ b/llvm/test/CodeGen/AArch64/srem-seteq-vec-splat.ll
@@ -206,7 +206,7 @@ define <4 x i32> @test_srem_int_min(<4 x i32> %X) nounwind {
; CHECK-NEXT: movi v1.4s, #128, lsl #24
; CHECK-NEXT: usra v3.4s, v2.4s, #1
; CHECK-NEXT: and v1.16b, v3.16b, v1.16b
-; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-NEXT: movi v1.4s, #1
; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
new file mode 100644
index 0000000..0b6bf38
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
@@ -0,0 +1,152 @@
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-PADDING
+
+; Don't emit remarks for non-streaming functions.
+define float @csr_x20_stackargs_notsc(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) {
+; CHECK-NOT: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs_notsc':
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs_notsc':
+entry:
+ tail call void asm sideeffect "", "~{x20}"() #1
+ ret float %i
+}
+
+; Don't emit remarks for functions that only access GPR stack objects.
+define i64 @stackargs_gpr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h, i64 %i) #2 {
+; CHECK-NOT: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs_gpr':
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs_gpr':
+entry:
+ ret i64 %i
+}
+
+; Don't emit remarks for functions that only access FPR stack objects.
+define double @stackargs_fpr(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) #2 {
+; CHECK-NOT: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs_fpr':
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs_fpr':
+entry:
+ ret double %i
+}
+
+; As this case is handled by addition of stack hazard padding, only emit remarks when this is not switched on.
+define i32 @csr_d8_alloci64(i64 %d) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'csr_d8_alloci64': FPR stack object at [SP-16] is too close to GPR stack object at [SP-8]
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'csr_d8_alloci64':
+entry:
+ %a = alloca i64
+ tail call void asm sideeffect "", "~{d8}"() #1
+ store i64 %d, ptr %a
+ ret i32 0
+}
+
+; As this case is handled by addition of stack hazard padding, only emit remarks when this is not switched on.
+define i32 @csr_d8_allocnxv4i32(i64 %d) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'csr_d8_allocnxv4i32': FPR stack object at [SP-16] is too close to GPR stack object at [SP-8]
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'csr_d8_allocnxv4i32':
+entry:
+ %a = alloca <vscale x 4 x i32>
+ tail call void asm sideeffect "", "~{d8}"() #1
+ store <vscale x 4 x i32> zeroinitializer, ptr %a
+ ret i32 0
+}
+
+define float @csr_x20_stackargs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs': GPR stack object at [SP-16] is too close to FPR stack object at [SP+0]
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'csr_x20_stackargs': GPR stack object at [SP-16] is too close to FPR stack object at [SP+0]
+entry:
+ tail call void asm sideeffect "", "~{x20}"() #1
+ ret float %i
+}
+
+; In this case, addition of stack hazard padding triggers x29 (fp) spill, so we hazard occurs between FPR argument and GPR spill.
+define float @csr_d8_stackargs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) #2 {
+; CHECK-NOT: remark: <unknown>:0:0: stack hazard in 'csr_d8_stackargs':
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'csr_d8_stackargs': GPR stack object at [SP-8] is too close to FPR stack object at [SP+0]
+entry:
+ tail call void asm sideeffect "", "~{d8}"() #1
+ ret float %i
+}
+
+; SVE calling conventions
+; Predicate register spills end up in FP region, currently.
+
+define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-48-258 * vscale] is too close to FPR stack object at [SP-48-256 * vscale]
+; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-48-16 * vscale] is too close to GPR stack object at [SP-48]
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-1072-258 * vscale] is too close to FPR stack object at [SP-1072-256 * vscale]
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call':
+entry:
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
+ ret i32 -396142473
+}
+
+define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-48-258 * vscale] is too close to FPR stack object at [SP-48-256 * vscale]
+; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-48-16 * vscale] is too close to GPR stack object at [SP-48]
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-1072-258 * vscale] is too close to FPR stack object at [SP-1072-256 * vscale]
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call':
+entry:
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ %0 = alloca [37 x i8], align 16
+ %call = call ptr @memset(ptr noundef nonnull %0, i32 noundef 45, i32 noundef 37)
+ ret i32 -396142473
+}
+declare ptr @memset(ptr, i32, i32)
+
+%struct.mixed_struct = type { i32, float }
+
+define i32 @mixed_stack_object(i32 %a, float %b) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'mixed_stack_object': Mixed stack object at [SP-8] accessed by both GP and FP instructions
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'mixed_stack_object': Mixed stack object at [SP-8] accessed by both GP and FP instructions
+entry:
+ %s = alloca %struct.mixed_struct
+ %s.i = getelementptr %struct.mixed_struct, ptr %s, i32 0, i32 0
+ %s.f = getelementptr %struct.mixed_struct, ptr %s, i32 0, i32 1
+ store i32 %a, ptr %s.i
+ store float %b, ptr %s.f
+ ret i32 %a
+}
+
+define i32 @mixed_stack_objects(i32 %a, float %b) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'mixed_stack_objects': Mixed stack object at [SP-16] is too close to Mixed stack object at [SP-8]
+; CHECK: remark: <unknown>:0:0: stack hazard in 'mixed_stack_objects': Mixed stack object at [SP-16] accessed by both GP and FP instructions
+; CHECK: remark: <unknown>:0:0: stack hazard in 'mixed_stack_objects': Mixed stack object at [SP-8] accessed by both GP and FP instructions
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'mixed_stack_objects': Mixed stack object at [SP-16] is too close to Mixed stack object at [SP-8]
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'mixed_stack_objects': Mixed stack object at [SP-16] accessed by both GP and FP instructions
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'mixed_stack_objects': Mixed stack object at [SP-8] accessed by both GP and FP instructions
+entry:
+ %s0 = alloca %struct.mixed_struct
+ %s0.i = getelementptr %struct.mixed_struct, ptr %s0, i32 0, i32 0
+ %s0.f = getelementptr %struct.mixed_struct, ptr %s0, i32 0, i32 1
+ store i32 %a, ptr %s0.i
+ store float %b, ptr %s0.f
+
+ %s1 = alloca %struct.mixed_struct
+ %s1.i = getelementptr %struct.mixed_struct, ptr %s1, i32 0, i32 0
+ %s1.f = getelementptr %struct.mixed_struct, ptr %s1, i32 0, i32 1
+ store i32 %a, ptr %s1.i
+ store float %b, ptr %s1.f
+
+ ret i32 %a
+}
+
+; VLA-area stack objects are not separated.
+define i32 @csr_d8_allocnxv4i32i32f64_vlai32f64(double %d, i32 %i) #2 {
+; CHECK: remark: <unknown>:0:0: stack hazard in 'csr_d8_allocnxv4i32i32f64_vlai32f64': GPR stack object at [SP-48-16 * vscale] is too close to FPR stack object at [SP-48-16 * vscale]
+; CHECK: remark: <unknown>:0:0: stack hazard in 'csr_d8_allocnxv4i32i32f64_vlai32f64': FPR stack object at [SP-32] is too close to GPR stack object at [SP-24]
+; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'csr_d8_allocnxv4i32i32f64_vlai32f64': GPR stack object at [SP-2096-16 * vscale] is too close to FPR stack object at [SP-2096-16 * vscale]
+; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'csr_d8_allocnxv4i32i32f64_vlai32f64':
+entry:
+ %a = alloca <vscale x 4 x i32>
+ %0 = zext i32 %i to i64
+ %vla0 = alloca i32, i64 %0
+ %vla1 = alloca double, i64 %0
+ %c = alloca double
+ tail call void asm sideeffect "", "~{d8}"() #1
+ store <vscale x 4 x i32> zeroinitializer, ptr %a
+ store i32 zeroinitializer, ptr %vla0
+ store double %d, ptr %vla1
+ store double %d, ptr %c
+ ret i32 0
+}
+
+attributes #2 = { "aarch64_pstate_sm_compatible" }
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
index 2541910..adbdee0 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
@@ -1505,9 +1505,9 @@ define <vscale x 2 x i64> @sub_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i6
; CHECK-LABEL: sub_nxv2i64_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sub z0.d, z0.d, z1.d
; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
-; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
+; CHECK-NEXT: subr z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -1520,9 +1520,9 @@ define <vscale x 4 x i32> @sub_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i3
; CHECK-LABEL: sub_nxv4i32_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: sub z0.s, z0.s, z1.s
; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
-; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
+; CHECK-NEXT: subr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -1535,9 +1535,9 @@ define <vscale x 8 x i16> @sub_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i1
; CHECK-LABEL: sub_nxv8i16_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: sub z0.h, z0.h, z1.h
; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
-; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
+; CHECK-NEXT: subr z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -1550,9 +1550,9 @@ define <vscale x 16 x i8> @sub_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i
; CHECK-LABEL: sub_nxv16i8_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: sub z0.b, z0.b, z1.b
; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
-; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b
+; CHECK-NEXT: subr z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -2517,10 +2517,10 @@ define <vscale x 4 x float> @fsub_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
; CHECK-LABEL: fsub_nxv4f32_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: fsub z0.s, z0.s, z1.s
; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
; CHECK-NEXT: not p0.b, p0/z, p1.b
-; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
+; CHECK-NEXT: fsubr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
@@ -2533,10 +2533,10 @@ define <vscale x 8 x half> @fsub_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
; CHECK-LABEL: fsub_nxv8f16_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: fsub z0.h, z0.h, z1.h
; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
; CHECK-NEXT: not p0.b, p0/z, p1.b
-; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
+; CHECK-NEXT: fsubr z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
@@ -2549,10 +2549,10 @@ define <vscale x 2 x double> @fsub_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
; CHECK-LABEL: fsub_nxv2f64_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: fsub z0.d, z0.d, z1.d
; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
; CHECK-NEXT: not p0.b, p0/z, p1.b
-; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
+; CHECK-NEXT: fsubr z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
index bafd5ab..6607f9c 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
@@ -932,9 +932,9 @@ define <vscale x 2 x i64> @sub_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i6
; CHECK-LABEL: sub_nxv2i64_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sub z0.d, z0.d, z1.d
; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
-; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
+; CHECK-NEXT: subr z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
@@ -947,9 +947,9 @@ define <vscale x 4 x i32> @sub_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i3
; CHECK-LABEL: sub_nxv4i32_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: sub z0.s, z0.s, z1.s
; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
-; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
+; CHECK-NEXT: subr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
@@ -962,9 +962,9 @@ define <vscale x 8 x i16> @sub_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i1
; CHECK-LABEL: sub_nxv8i16_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: sub z0.h, z0.h, z1.h
; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
-; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
+; CHECK-NEXT: subr z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
@@ -977,9 +977,9 @@ define <vscale x 16 x i8> @sub_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i
; CHECK-LABEL: sub_nxv16i8_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: sub z0.b, z0.b, z1.b
; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
-; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b
+; CHECK-NEXT: subr z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
@@ -1588,10 +1588,10 @@ define <vscale x 4 x float> @fsub_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
; CHECK-LABEL: fsub_nxv4f32_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: fsub z0.s, z0.s, z1.s
; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
; CHECK-NEXT: not p0.b, p0/z, p1.b
-; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
+; CHECK-NEXT: fsubr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
@@ -1604,10 +1604,10 @@ define <vscale x 8 x half> @fsub_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
; CHECK-LABEL: fsub_nxv8f16_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: fsub z0.h, z0.h, z1.h
; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
; CHECK-NEXT: not p0.b, p0/z, p1.b
-; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
+; CHECK-NEXT: fsubr z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
@@ -1620,10 +1620,10 @@ define <vscale x 2 x double> @fsub_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
; CHECK-LABEL: fsub_nxv2f64_y:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: fsub z0.d, z0.d, z1.d
; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
; CHECK-NEXT: not p0.b, p0/z, p1.b
-; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
+; CHECK-NEXT: fsubr z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
entry:
%c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
index 431c9dc..ec94198 100644
--- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
@@ -150,8 +150,8 @@ entry:
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40-16 x vscale], Type: Variable, Align: 8, Size: 8
-; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: VariableSized, Align: 1, Size: 0
-; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: VariableSized, Align: 1, Size: 0
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: VariableSized, Align: 1, Size: 0
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: VariableSized, Align: 1, Size: 0
define i32 @csr_d8_allocnxv4i32i32f64_vla(double %d, i32 %i) "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: csr_d8_allocnxv4i32i32f64_vla:
diff --git a/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll b/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll
index 365fd53..d5fda04 100644
--- a/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 < %s | FileCheck %s
; Replace pattern min(max(v1,v2),v3) by clamp
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
index 221bb3b..7b921d7 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfadd_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfclamp.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfclamp.ll
index 7934f83..baadd08 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfclamp.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfclamp.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sme2 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sme2 -mattr=+sve-b16b16 -verify-machineinstrs < %s | FileCheck %s
define <vscale x 8 x bfloat> @bfclamp(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
; CHECK-LABEL: bfclamp:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
index 24c4fed..55ef452 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfmax_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
index 25fe9cf7..9b0f7e0 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfmaxnm_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
index d5b0b8b..8c586fd 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfmin_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
index c019dc7..9013222 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfminnm_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
index 02b1db1..eb7e99f 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -verify-machineinstrs < %s | FileCheck %s
define <vscale x 8 x bfloat> @bfmla_m(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
; CHECK-LABEL: bfmla_m:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla_lane.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla_lane.ll
index d0e3a82..ece96b3 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla_lane.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla_lane.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -verify-machineinstrs < %s | FileCheck %s
define <vscale x 8 x bfloat> @bfmla_lane_idx1(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
; CHECK-LABEL: bfmla_lane_idx1:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
index 987fe1f..8ff1afc 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -verify-machineinstrs < %s | FileCheck %s
define <vscale x 8 x bfloat> @bfmls_m(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
; CHECK-LABEL: bfmls_m:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls_lane.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls_lane.ll
index 16b4538..81406bf 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls_lane.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls_lane.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -verify-machineinstrs < %s | FileCheck %s
define <vscale x 8 x bfloat> @bfmls_lane_idx1(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
; CHECK-LABEL: bfmls_lane_idx1:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
index a04c5a5..8b6a087 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfmul_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul_lane.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul_lane.ll
index 2962d59..28ae9b0 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul_lane.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul_lane.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -verify-machineinstrs < %s | FileCheck %s
define <vscale x 8 x bfloat> @bfmul_lane_idx1(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
; CHECK-LABEL: bfmul_lane_idx1:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
index 752b5ae..1b13043 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+sve-b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define <vscale x 8 x bfloat> @bfsub_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll
index b9846a6..b2b4331 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll
@@ -441,6 +441,43 @@ define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_ext_fma_mul_rhs(<4 x float> %
ret <4 x float> %d
}
+define amdgpu_ps float @test_matching_source_from_unmerge(ptr addrspace(3) %aptr, float %b) {
+; GFX9-DENORM-LABEL: test_matching_source_from_unmerge:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: ds_read_b64 v[2:3], v0
+; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DENORM-NEXT: v_mad_mix_f32 v0, v2, v3, v1 op_sel:[1,1,0] op_sel_hi:[1,1,0]
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_matching_source_from_unmerge:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: ds_read_b64 v[2:3], v0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: v_fma_mix_f32 v0, v2, v3, v1 op_sel:[1,1,0] op_sel_hi:[1,1,0]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_matching_source_from_unmerge:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: ds_read_b64 v[2:3], v0
+; GFX10-CONTRACT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CONTRACT-NEXT: v_fma_mix_f32 v0, v2, v3, v1 op_sel:[1,1,0] op_sel_hi:[1,1,0]
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_matching_source_from_unmerge:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: ds_read_b64 v[2:3], v0
+; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-DENORM-NEXT: v_fma_mix_f32 v0, v2, v3, v1 op_sel:[1,1,0] op_sel_hi:[1,1,0]
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = load <4 x half>, ptr addrspace(3) %aptr, align 16
+ %a_f32 = fpext <4 x half> %a to <4 x float>
+ %.a3_f32 = extractelement <4 x float> %a_f32, i64 3
+ %.a1_f32 = extractelement <4 x float> %a_f32, i64 1
+ %res = call float @llvm.fmuladd.f32(float %.a1_f32, float %.a3_f32, float %b)
+ ret float %res
+}
+
declare float @llvm.fmuladd.f32(float, float, float) #0
declare half @llvm.fmuladd.f16(half, half, half) #0
declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) #0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.ll
index 9b9249b..66b8823 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.ll
@@ -7,10 +7,11 @@ target triple = "amdgcn-amd-amdhsa"
; Make sure flat_scratch_init is set
; GCN-LABEL: {{^}}stack_object_addrspacecast_in_kernel_no_calls:
-; RW-FLAT: s_add_u32 flat_scratch_lo, s4, s7
-; RW-FLAT: s_addc_u32 flat_scratch_hi, s5, 0
+; RW-FLAT: s_add_u32 s0, s0, s7
+; RW-FLAT: s_addc_u32 s1, s1, 0
; RO-FLAT-NOT: flat_scratch
-; GCN: flat_store_dword
+; RW-FLAT: buffer_store_dword
+; RO-FLAT: scratch_store_dword
; RO-FLAT-NOT: .amdhsa_user_sgpr_private_segment_buffer
; RW-FLAT: .amdhsa_user_sgpr_flat_scratch_init 1
; RO-FLAT-NOT: .amdhsa_user_sgpr_flat_scratch_init
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-copy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-copy.mir
index 48de483..30c374d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-copy.mir
@@ -1,6 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=amdgpu-regbankselect -regbankselect-fast -verify-machineinstrs %s -o - | FileCheck %s
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=amdgpu-regbankselect -regbankselect-greedy -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-regbankselect -regbankselect-fast -verify-machineinstrs %s -o - | FileCheck --check-prefix=WAVE32 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-regbankselect -regbankselect-greedy -verify-machineinstrs %s -o - | FileCheck --check-prefix=WAVE32 %s
---
name: copy_s32_vgpr_to_vgpr
@@ -201,3 +203,299 @@ body: |
%2:vcc(s1) = COPY %1
S_ENDPGM 0, implicit %2
...
+
+---
+name: wave64_copy_sgpr_64_to_s1
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr4_sgpr5
+ ; CHECK-LABEL: name: wave64_copy_sgpr_64_to_s1
+ ; CHECK: liveins: $sgpr4_sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vcc(s1) = COPY $sgpr4_sgpr5
+ ; CHECK-NEXT: [[CONST1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[CONST2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY]](s1), [[CONST1]], [[CONST2]]
+ %0:_(s1) = COPY $sgpr4_sgpr5
+ %1:_(s32) = G_ZEXT %0:_(s1)
+...
+
+---
+name: wave32_copy_sgpr_32_to_s1
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0
+ ; WAVE32-LABEL: name: wave32_copy_sgpr_32_to_s1
+ ; WAVE32: liveins: $sgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vcc(s1) = COPY $sgpr0
+ ; WAVE32-NEXT: [[CONST1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; WAVE32-NEXT: [[CONST2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; WAVE32-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY]](s1), [[CONST1]], [[CONST2]]
+ %0:_(s1) = COPY $sgpr0
+ %1:_(s32) = G_ZEXT %0:_(s1)
+...
+
+---
+name: wave64_copy2_sgpr_64_to_s1
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr4_sgpr5, $sgpr6_sgpr7
+ ; CHECK-LABEL: name: wave64_copy2_sgpr_64_to_s1
+ ; CHECK: liveins: $sgpr4_sgpr5, $sgpr6_sgpr7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vcc(s1) = COPY $sgpr4_sgpr5
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vcc(s1) = COPY $sgpr6_sgpr7
+ ; CHECK-NEXT: [[CONST1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[CONST2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY1]](s1), [[CONST1]], [[CONST2]]
+ ; CHECK-NEXT: [[CONST3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[CONST4:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY2]](s1), [[CONST3]], [[CONST4]]
+ %0:_(s1) = COPY $sgpr4_sgpr5
+ %1:_(s1) = COPY $sgpr6_sgpr7
+ %2:_(s32) = G_ZEXT %0:_(s1)
+ %3:_(s32) = G_ZEXT %1:_(s1)
+...
+
+---
+name: wave32_copy2_sgpr_32_to_s1
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; WAVE32-LABEL: name: wave32_copy2_sgpr_32_to_s1
+ ; WAVE32: liveins: $sgpr0, $sgpr1
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY1:%[0-9]+]]:vcc(s1) = COPY $sgpr0
+ ; WAVE32-NEXT: [[COPY2:%[0-9]+]]:vcc(s1) = COPY $sgpr1
+ ; WAVE32-NEXT: [[CONST1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; WAVE32-NEXT: [[CONST2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; WAVE32-NEXT: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY1]](s1), [[CONST1]], [[CONST2]]
+ ; WAVE32-NEXT: [[CONST3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; WAVE32-NEXT: [[CONST4:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; WAVE32-NEXT: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY2]](s1), [[CONST3]], [[CONST4]]
+ %0:_(s1) = COPY $sgpr0
+ %1:_(s1) = COPY $sgpr1
+ %2:_(s32) = G_ZEXT %0:_(s1)
+ %3:_(s32) = G_ZEXT %1:_(s1)
+...
+
+---
+name: copy_sgpr_64_to_s1_vgpr
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr4_sgpr5
+ ; CHECK-LABEL: name: copy_sgpr_64_to_s1_vgpr
+ ; CHECK: liveins: $sgpr4_sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s1) = COPY $sgpr4_sgpr5
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vgpr(s32) = G_ZEXT [[COPY]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_sgpr_64_to_s1_vgpr
+ ; WAVE32: liveins: $sgpr4_sgpr5
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s1) = COPY $sgpr4_sgpr5
+ ; WAVE32-NEXT: [[ZEXT:%[0-9]+]]:vgpr(s32) = G_ZEXT [[COPY]](s1)
+ %0:vgpr(s1) = COPY $sgpr4_sgpr5
+ %1:_(s32) = G_ZEXT %0:vgpr(s1)
+...
+
+---
+name: copy_sgpr_32_to_s1_vgpr
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0
+ ; CHECK-LABEL: name: copy_sgpr_32_to_s1_vgpr
+ ; CHECK: liveins: $sgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s1) = COPY $sgpr0
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vgpr(s32) = G_ZEXT [[COPY]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_sgpr_32_to_s1_vgpr
+ ; WAVE32: liveins: $sgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s1) = COPY $sgpr0
+ ; WAVE32-NEXT: [[ZEXT:%[0-9]+]]:vgpr(s32) = G_ZEXT [[COPY]](s1)
+ %0:vgpr(s1) = COPY $sgpr0
+ %1:_(s32) = G_ZEXT %0:vgpr(s1)
+...
+
+---
+name: wave64_copy_sgpr_64_to_s1_vcc
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr4_sgpr5
+ ; CHECK-LABEL: name: wave64_copy_sgpr_64_to_s1_vcc
+ ; CHECK: liveins: $sgpr4_sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vcc(s1) = COPY $sgpr4_sgpr5
+ ; CHECK-NEXT: [[CONST1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[CONST2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY]](s1), [[CONST1]], [[CONST2]]
+ %0:vcc(s1) = COPY $sgpr4_sgpr5
+ %1:_(s32) = G_ZEXT %0:vcc(s1)
+...
+
+---
+name: wave32_copy_sgpr_32_to_s1_vcc
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0
+ ; WAVE32-LABEL: name: wave32_copy_sgpr_32_to_s1_vcc
+ ; WAVE32: liveins: $sgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vcc(s1) = COPY $sgpr0
+ ; WAVE32-NEXT: [[CONST1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; WAVE32-NEXT: [[CONST2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; WAVE32-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[COPY]](s1), [[CONST1]], [[CONST2]]
+ %0:vcc(s1) = COPY $sgpr0
+ %1:_(s32) = G_ZEXT %0:vcc(s1)
+...
+
+---
+name: copy_virt_reg_to_s1
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: copy_virt_reg_to_s1
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s1) = COPY [[TRUNC]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_virt_reg_to_s1
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; WAVE32-NEXT: [[COPY2:%[0-9]+]]:vgpr(s1) = COPY [[TRUNC]](s1)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s1) = G_TRUNC %0
+ %2:_(s1) = COPY %1
+...
+
+---
+name: copy_virt_reg_to_s1_vgpr
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: copy_virt_reg_to_s1_vgpr
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s1) = COPY [[TRUNC]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s1) = COPY [[COPY2]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_virt_reg_to_s1_vgpr
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; WAVE32-NEXT: [[COPY2:%[0-9]+]]:vgpr(s1) = COPY [[TRUNC]](s1)
+ ; WAVE32-NEXT: [[COPY3:%[0-9]+]]:vgpr(s1) = COPY [[COPY2]](s1)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s1) = G_TRUNC %0
+ %2:vgpr(s1) = COPY %1
+ %3:_(s1) = COPY %2
+...
+
+
+---
+name: copy_virt_reg_to_s1_vcc
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: copy_virt_reg_to_s1_vcc
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[COPY2]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_virt_reg_to_s1_vcc
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; WAVE32-NEXT: [[COPY2:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1)
+ ; WAVE32-NEXT: [[COPY3:%[0-9]+]]:vcc(s1) = COPY [[COPY2]](s1)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s1) = G_TRUNC %0
+ %2:vcc(s1) = COPY %1
+ %3:_(s1) = COPY %2
+...
+
+---
+name: copy_s1_to_sgpr_64
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: copy_s1_to_sgpr_64
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[TRUNC]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_s1_to_sgpr_64
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; WAVE32-NEXT: $sgpr4_sgpr5 = COPY [[TRUNC]](s1)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s1) = G_TRUNC %0
+ $sgpr4_sgpr5 = COPY %1
+...
+
+---
+name: copy_s1_to_sgpr_32
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: copy_s1_to_sgpr_32
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: $sgpr0 = COPY [[TRUNC]](s1)
+ ;
+ ; WAVE32-LABEL: name: copy_s1_to_sgpr_32
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s1) = G_TRUNC [[COPY]](s32)
+ ; WAVE32-NEXT: $sgpr0 = COPY [[TRUNC]](s1)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s1) = G_TRUNC %0
+ $sgpr0 = COPY %1
+...
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
index 1315d57..4b1484e 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
@@ -5,22 +5,11 @@ target triple = "amdgcn-amd-amdhsa"
; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast:
-; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
-; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
-; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
-; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
-; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
-
-; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base
-
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x0{{$}}
+; GFX9-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x0{{$}}
+; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], [[APERTURE]]
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
-
-; GFX9: s_cmp_lg_u32 [[PTR]], -1
-; GFX9-DAG: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
-; GFX9-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0
-
-; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
+; HSA-DAG: ds_write_b32 [[PTR]], [[K]]
; HSA: .amdhsa_user_sgpr_private_segment_buffer 1
; HSA: .amdhsa_user_sgpr_dispatch_ptr 0
@@ -39,22 +28,8 @@ define amdgpu_kernel void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr
; Test handling inside a non-kernel
; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast_func:
-; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x10{{$}}
-; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
-; CI-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
-; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
-; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0
-
-; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base
-
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-
-; GFX9-DAG: v_mov_b32_e32 v[[VREG_HIBASE:[0-9]+]], s[[HIBASE]]
-; GFX9-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
-; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0, vcc
-; GFX9-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, v[[VREG_HIBASE]], vcc
-
-; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
+; HSA-DAG: ds_write_b32 v0, [[K]]
define void @use_group_to_flat_addrspacecast_func(ptr addrspace(3) %ptr) #0 {
%stof = addrspacecast ptr addrspace(3) %ptr to ptr
store volatile i32 7, ptr %stof
@@ -63,23 +38,16 @@ define void @use_group_to_flat_addrspacecast_func(ptr addrspace(3) %ptr) #0 {
; HSA-LABEL: {{^}}use_private_to_flat_addrspacecast:
-; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
-; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}
-
-; CI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
-; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
-; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
-
-; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
-; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_private_base
-
-; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; GFX9: s_cmp_lg_u32 [[PTR]], -1
-; GFX9: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
-; GFX9: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0
-
-; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x0{{$}}
+; GFX9-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x0{{$}}
+; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], [[APERTURE]]
+; HSA-DAG: s_mov_b64 s[{{[0-9]+}}:[[RSRCHI:[0-9]+]]], s[2:3]
+; HSA-DAG: s_mov_b64 s[[[BASELO:[0-9]+]]:[[BASEHI:[0-9]+]]], s[0:1]
+; SI-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s9
+; GFX9-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s7
+; HSA-DAG: s_addc_u32 s[[BASEHI]], s[[BASEHI]], 0
+; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
+; HSA: buffer_store_dword [[K]], [[PTR]], s[[[BASELO]]:[[RSRCHI]]], 0 offen
; HSA: .amdhsa_user_sgpr_private_segment_buffer 1
; HSA: .amdhsa_user_sgpr_dispatch_ptr 0
@@ -97,10 +65,12 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %p
; HSA-LABEL: {{^}}use_global_to_flat_addrspacecast:
; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
-; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
-; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
+; CI-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
+; CI-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; HSA: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]
+; CI: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]
+; GFX9-DAG: v_mov_b32_e32 [[ADDR:v[0-9]+]], 0
+; GFX9: global_store_dword [[ADDR]], [[K]], s[[[PTRLO]]:[[PTRHI]]]
; HSA: .amdhsa_user_sgpr_queue_ptr 0
define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %ptr) #0 {
@@ -112,9 +82,7 @@ define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %pt
; no-op
; HSA-LABEL: {{^}}use_constant_to_flat_addrspacecast:
; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
-; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
-; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
-; HSA: flat_load_dword v{{[0-9]+}}, v[[[VPTRLO]]:[[VPTRHI]]]
+; HSA-DAG: s_load_dword s0, s[[[PTRLO]]:[[PTRHI]]], 0x0
define amdgpu_kernel void @use_constant_to_flat_addrspacecast(ptr addrspace(4) %ptr) #0 {
%stof = addrspacecast ptr addrspace(4) %ptr to ptr
%ld = load volatile i32, ptr %stof
@@ -215,14 +183,9 @@ define amdgpu_kernel void @use_flat_to_constant_addrspacecast(ptr %ptr) #0 {
}
; HSA-LABEL: {{^}}cast_0_group_to_flat_addrspacecast:
-; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10
-; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
-
-; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_shared_base
-
; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
-; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
+; HSA: ds_write_b32 v[[LO]], v[[K]]
define amdgpu_kernel void @cast_0_group_to_flat_addrspacecast() #0 {
%cast = addrspacecast ptr addrspace(3) null to ptr
store volatile i32 7, ptr %cast
@@ -240,10 +203,9 @@ define amdgpu_kernel void @cast_0_flat_to_group_addrspacecast() #0 {
}
; HSA-LABEL: {{^}}cast_neg1_group_to_flat_addrspacecast:
-; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
-; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
-; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
+; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], -1
+; HSA: ds_write_b32 v[[LO]], v[[K]]
define amdgpu_kernel void @cast_neg1_group_to_flat_addrspacecast() #0 {
%cast = addrspacecast ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr
store volatile i32 7, ptr %cast
@@ -262,14 +224,13 @@ define amdgpu_kernel void @cast_neg1_flat_to_group_addrspacecast() #0 {
; FIXME: Shouldn't need to enable queue ptr
; HSA-LABEL: {{^}}cast_0_private_to_flat_addrspacecast:
-; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11
-; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
-
-; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_private_base
-
-; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
+; HSA-DAG: s_mov_b64 s[{{[0-9]+}}:[[RSRCHI:[0-9]+]]], s[2:3]
+; HSA-DAG: s_mov_b64 s[[[BASELO:[0-9]+]]:[[BASEHI:[0-9]+]]], s[0:1]
+; CI-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s7
+; GFX9-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s5
+; HSA-DAG: s_addc_u32 s[[BASEHI]], s[[BASEHI]], 0
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
-; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
+; HSA: buffer_store_dword v[[K]], off, s[[[BASELO]]:[[RSRCHI]]], 0
define amdgpu_kernel void @cast_0_private_to_flat_addrspacecast() #0 {
%cast = addrspacecast ptr addrspace(5) null to ptr
store volatile i32 7, ptr %cast
@@ -286,13 +247,16 @@ define amdgpu_kernel void @cast_0_flat_to_private_addrspacecast() #0 {
ret void
}
-
; HSA-LABEL: {{^}}cast_neg1_private_to_flat_addrspacecast:
-; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
+; HSA-DAG: s_mov_b64 s[{{[0-9]+}}:[[RSRCHI:[0-9]+]]], s[2:3]
+; HSA-DAG: s_mov_b64 s[[[BASELO:[0-9]+]]:[[BASEHI:[0-9]+]]], s[0:1]
+; CI-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s7
+; GFX9-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s5
+; HSA-DAG: s_addc_u32 s[[BASEHI]], s[[BASEHI]], 0
+; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
-; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
-; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
+; HSA: buffer_store_dword v[[K]], [[PTR]], s[[[BASELO]]:[[RSRCHI]]], 0 offen
; CI: .amdhsa_user_sgpr_queue_ptr 1
; GFX9: .amdhsa_user_sgpr_queue_ptr 0
@@ -342,16 +306,18 @@ end:
; Check for prologue initializing special SGPRs pointing to scratch.
; HSA-LABEL: {{^}}store_flat_scratch:
-; CI-DAG: s_mov_b32 flat_scratch_lo, s9
; CI-DAG: s_add_i32 [[ADD:s[0-9]+]], s8, s11
; CI-DAG: s_lshr_b32 flat_scratch_hi, [[ADD]], 8
-
-; GFX9: s_add_u32 flat_scratch_lo, s6, s9
-; GFX9: s_addc_u32 flat_scratch_hi, s7, 0
-
-; HSA: {{flat|global}}_store_dword
-; HSA: s_barrier
-; HSA: {{flat|global}}_load_dword
+; HSA: buffer_store_dword
+; HSA: s_barrier
+; HSA: buffer_load_dword [[K:v[0-9]+]], v{{[0-9]+}}, s[0:3], 0 offen glc
+; HSA-DAG: s_load_dwordx2
+; CI-DAG: s_mov_b32 flat_scratch_lo, s9
+; CI-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], s4
+; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], s5
+; GFX9-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], 0
+; CI: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
+; GFX9: global_store_dword [[PTR]], [[K]]
define amdgpu_kernel void @store_flat_scratch(ptr addrspace(1) noalias %out, i32) #0 {
%alloca = alloca i32, i32 9, align 4, addrspace(5)
%x = call i32 @llvm.amdgcn.workitem.id.x() #2
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index 43cdf85..879bcea 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -425,8 +425,7 @@ define amdgpu_kernel void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr
;
; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast
; ATTRIBUTOR_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR12:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT: [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr
-; ATTRIBUTOR_HSA-NEXT: store volatile i32 0, ptr [[STOF]], align 4
+; ATTRIBUTOR_HSA-NEXT: store volatile i32 0, ptr addrspace(3) [[PTR]], align 4
; ATTRIBUTOR_HSA-NEXT: ret void
;
%stof = addrspacecast ptr addrspace(3) %ptr to ptr
@@ -443,8 +442,7 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %p
;
; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_private_to_flat_addrspacecast
; ATTRIBUTOR_HSA-SAME: (ptr addrspace(5) [[PTR:%.*]]) #[[ATTR12]] {
-; ATTRIBUTOR_HSA-NEXT: [[STOF:%.*]] = addrspacecast ptr addrspace(5) [[PTR]] to ptr
-; ATTRIBUTOR_HSA-NEXT: store volatile i32 0, ptr [[STOF]], align 4
+; ATTRIBUTOR_HSA-NEXT: store volatile i32 0, ptr addrspace(5) [[PTR]], align 4
; ATTRIBUTOR_HSA-NEXT: ret void
;
%stof = addrspacecast ptr addrspace(5) %ptr to ptr
@@ -478,11 +476,16 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) #1 {
; No-op addrspacecast should not use queue ptr
define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %ptr) #1 {
-; HSA-LABEL: define {{[^@]+}}@use_global_to_flat_addrspacecast
-; HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT: [[STOF:%.*]] = addrspacecast ptr addrspace(1) [[PTR]] to ptr
-; HSA-NEXT: store volatile i32 0, ptr [[STOF]], align 4
-; HSA-NEXT: ret void
+; AKF_HSA-LABEL: define {{[^@]+}}@use_global_to_flat_addrspacecast
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT: [[STOF:%.*]] = addrspacecast ptr addrspace(1) [[PTR]] to ptr
+; AKF_HSA-NEXT: store volatile i32 0, ptr [[STOF]], align 4
+; AKF_HSA-NEXT: ret void
+;
+; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_global_to_flat_addrspacecast
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; ATTRIBUTOR_HSA-NEXT: store volatile i32 0, ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT: ret void
;
%stof = addrspacecast ptr addrspace(1) %ptr to ptr
store volatile i32 0, ptr %stof
@@ -490,11 +493,16 @@ define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %pt
}
define amdgpu_kernel void @use_constant_to_flat_addrspacecast(ptr addrspace(4) %ptr) #1 {
-; HSA-LABEL: define {{[^@]+}}@use_constant_to_flat_addrspacecast
-; HSA-SAME: (ptr addrspace(4) [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT: [[STOF:%.*]] = addrspacecast ptr addrspace(4) [[PTR]] to ptr
-; HSA-NEXT: [[LD:%.*]] = load volatile i32, ptr [[STOF]], align 4
-; HSA-NEXT: ret void
+; AKF_HSA-LABEL: define {{[^@]+}}@use_constant_to_flat_addrspacecast
+; AKF_HSA-SAME: (ptr addrspace(4) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT: [[STOF:%.*]] = addrspacecast ptr addrspace(4) [[PTR]] to ptr
+; AKF_HSA-NEXT: [[LD:%.*]] = load volatile i32, ptr [[STOF]], align 4
+; AKF_HSA-NEXT: ret void
+;
+; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_constant_to_flat_addrspacecast
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(4) [[PTR:%.*]]) #[[ATTR1]] {
+; ATTRIBUTOR_HSA-NEXT: [[LD:%.*]] = load volatile i32, ptr addrspace(4) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT: ret void
;
%stof = addrspacecast ptr addrspace(4) %ptr to ptr
%ld = load volatile i32, ptr %stof
diff --git a/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll b/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
index 8ef2d89e..032ec65 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
@@ -38,15 +38,9 @@ define amdgpu_kernel void @kern_indirect_use_queue_ptr(i32) #1 {
}
; GCN-LABEL: {{^}}use_queue_ptr_addrspacecast:
-; CIVI: s_load_dword [[APERTURE_LOAD:s[0-9]+]], s[4:5], 0x0
-; CIVI: v_mov_b32_e32 v[[LO:[0-9]+]], 16
-; CIVI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE_LOAD]]
+; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0
+; GCN-DAG: ds_write_b32 v[[LO]], v[[LO]] offset:16
-; GFX9: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_shared_base
-; GFX9-DAG: v_mov_b32_e32 v[[VGPR_HI:[0-9]+]], s[[HI]]
-; GFX9: {{flat|global}}_store_dword v{{\[[0-9]+}}:[[VGPR_HI]]]
-
-; CIVI: {{flat|global}}_store_dword v[[[LO]]:[[HI]]]
define hidden void @use_queue_ptr_addrspacecast() #1 {
%asc = addrspacecast ptr addrspace(3) inttoptr (i32 16 to ptr addrspace(3)) to ptr
store volatile i32 0, ptr %asc
diff --git a/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir b/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir
index 2cd7b8a..6ec2961 100644
--- a/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir
+++ b/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-co-u32.mir
@@ -23,10 +23,35 @@ body: |
bb.0:
; MUBUFW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0
; MUBUFW64: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 $sgpr32, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 12, %stack.0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__inline_imm__fi_offset0_live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 4, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0_live_vcc
+ ; MUBUFW64: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0_live_vcc
; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 $sgpr32, implicit $exec
; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
@@ -49,35 +74,87 @@ body: |
bb.0:
; GFX7-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
; GFX7: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 16, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX8: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 16, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX900: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 16, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX90A: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 16, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX10: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 16, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; FLATSCRW64: $sgpr4 = S_ADD_I32 $sgpr32, 16, implicit-def $scc
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 12, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ ; GFX7-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
+ ; GFX7: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX7-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 16, killed $vgpr1, 0, implicit $exec
; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX8-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX8-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
; GFX8: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX8-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 16, killed $vgpr1, 0, implicit $exec
; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX900-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX900-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
; GFX900: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 16, killed $vgpr1, implicit $exec
; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX90A-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX90A-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
; GFX90A: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 16, killed $vgpr1, implicit $exec
; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX10-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; GFX10-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
; GFX10: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 16, killed $vgpr1, implicit $exec
; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm___fi_offset_inline_imm_live_vcc
; FLATSCRW64: $sgpr4 = S_ADD_I32 $sgpr32, 16, implicit-def $scc
; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
@@ -100,10 +177,35 @@ body: |
bb.0:
; MUBUFW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0
; MUBUFW64: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 $sgpr32, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 68, %stack.0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__literal__fi_offset0_live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 4, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0_live_vcc
+ ; MUBUFW64: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0_live_vcc
; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 $sgpr32, implicit $exec
; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
@@ -126,35 +228,87 @@ body: |
bb.0:
; GFX7-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
; GFX7: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 32, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX8: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 32, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX900: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 32, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX90A: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 32, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX10: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 32, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; FLATSCRW64: $sgpr4 = S_ADD_I32 $sgpr32, 32, implicit-def $scc
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 68, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ ; GFX7-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
+ ; GFX7: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX7-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 32, killed $vgpr1, 0, implicit $exec
; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX8-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX8-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
; GFX8: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX8-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 32, killed $vgpr1, 0, implicit $exec
; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX900-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX900-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
; GFX900: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 32, killed $vgpr1, implicit $exec
; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX90A-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX90A-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
; GFX90A: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 32, killed $vgpr1, implicit $exec
; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; GFX10-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; GFX10-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
; GFX10: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 32, killed $vgpr1, implicit $exec
; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
;
- ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__literal__fi_offset0__offset_inlineimm_live_vcc
; FLATSCRW64: $sgpr4 = S_ADD_I32 $sgpr32, 32, implicit-def $scc
; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 68, killed $vgpr1, implicit-def $vcc, implicit $exec
@@ -180,17 +334,17 @@ body: |
; MUBUFW64: liveins: $vgpr1
; MUBUFW64-NEXT: {{ $}}
; MUBUFW64-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
;
; FLATSCRW64-LABEL: name: v_add_co_u32_e32__vgpr__fi_offset0
; FLATSCRW64: liveins: $vgpr1
; FLATSCRW64-NEXT: {{ $}}
; FLATSCRW64-NEXT: $vgpr2 = V_MOV_B32_e32 $sgpr32, implicit $exec
- ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
- renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, %stack.0, implicit-def $vcc, implicit $exec
- SI_RETURN implicit $vgpr0, implicit $vcc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, %stack.0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
...
@@ -210,16 +364,16 @@ body: |
; MUBUFW64: liveins: $vgpr1
; MUBUFW64-NEXT: {{ $}}
; MUBUFW64-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def $vcc, implicit $exec
- ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
;
; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_offset0__vgpr
; FLATSCRW64: liveins: $vgpr1
; FLATSCRW64-NEXT: {{ $}}
- ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr32, $vgpr1, implicit-def $vcc, implicit $exec
- ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
- renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.0, $vgpr1, implicit-def $vcc, implicit $exec
- SI_RETURN implicit $vgpr0, implicit $vcc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr32, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.0, $vgpr1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
...
@@ -240,53 +394,53 @@ body: |
; GFX7: liveins: $vgpr1
; GFX7-NEXT: {{ $}}
; GFX7-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; GFX7-NEXT: $sgpr4 = S_MOV_B32 128
- ; GFX7-NEXT: $vgpr2, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr2, 0, implicit $exec
- ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr2, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr2, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
;
; GFX8-LABEL: name: v_add_co_u32_e32__vgpr__fi_literal_offset
; GFX8: liveins: $vgpr1
; GFX8-NEXT: {{ $}}
; GFX8-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; GFX8-NEXT: $sgpr4 = S_MOV_B32 128
- ; GFX8-NEXT: $vgpr2, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr2, 0, implicit $exec
- ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr2, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr2, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
;
; GFX900-LABEL: name: v_add_co_u32_e32__vgpr__fi_literal_offset
; GFX900: liveins: $vgpr1
; GFX900-NEXT: {{ $}}
; GFX900-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX900-NEXT: $vgpr2 = V_ADD_U32_e32 128, killed $vgpr2, implicit $exec
- ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
;
; GFX90A-LABEL: name: v_add_co_u32_e32__vgpr__fi_literal_offset
; GFX90A: liveins: $vgpr1
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX90A-NEXT: $vgpr2 = V_ADD_U32_e32 128, killed $vgpr2, implicit $exec
- ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
;
; GFX10-LABEL: name: v_add_co_u32_e32__vgpr__fi_literal_offset
; GFX10: liveins: $vgpr1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX10-NEXT: $vgpr2 = V_ADD_U32_e32 128, killed $vgpr2, implicit $exec
- ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
;
; FLATSCRW64-LABEL: name: v_add_co_u32_e32__vgpr__fi_literal_offset
; FLATSCRW64: liveins: $vgpr1
; FLATSCRW64-NEXT: {{ $}}
; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
; FLATSCRW64-NEXT: $vgpr2 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
- ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def $vcc, implicit $exec
- ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
- renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, %stack.1, implicit-def $vcc, implicit $exec
- SI_RETURN implicit $vgpr0, implicit $vcc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
...
@@ -307,52 +461,52 @@ body: |
; GFX7: liveins: $vgpr1
; GFX7-NEXT: {{ $}}
; GFX7-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; GFX7-NEXT: $sgpr4 = S_MOV_B32 128
- ; GFX7-NEXT: $vgpr2, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr2, 0, implicit $exec
- ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr2, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr2, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
;
; GFX8-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr
; GFX8: liveins: $vgpr1
; GFX8-NEXT: {{ $}}
; GFX8-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; GFX8-NEXT: $sgpr4 = S_MOV_B32 128
- ; GFX8-NEXT: $vgpr2, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr2, 0, implicit $exec
- ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr2, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr2, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
;
; GFX900-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr
; GFX900: liveins: $vgpr1
; GFX900-NEXT: {{ $}}
; GFX900-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX900-NEXT: $vgpr2 = V_ADD_U32_e32 128, killed $vgpr2, implicit $exec
- ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
;
; GFX90A-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr
; GFX90A: liveins: $vgpr1
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX90A-NEXT: $vgpr2 = V_ADD_U32_e32 128, killed $vgpr2, implicit $exec
- ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
;
; GFX10-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr
; GFX10: liveins: $vgpr1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: $vgpr2 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX10-NEXT: $vgpr2 = V_ADD_U32_e32 128, killed $vgpr2, implicit $exec
- ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $vgpr2, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
;
; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr
; FLATSCRW64: liveins: $vgpr1
; FLATSCRW64-NEXT: {{ $}}
; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
- ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $sgpr4, $vgpr1, implicit-def $vcc, implicit $exec
- ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
- renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.1, $vgpr1, implicit-def $vcc, implicit $exec
- SI_RETURN implicit $vgpr0, implicit $vcc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 killed $sgpr4, $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.1, $vgpr1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
...
@@ -373,53 +527,53 @@ body: |
; GFX7: liveins: $sgpr8
; GFX7-NEXT: {{ $}}
; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; GFX7-NEXT: $sgpr4 = S_MOV_B32 128
- ; GFX7-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr1, 0, implicit $exec
- ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
;
; GFX8-LABEL: name: v_add_co_u32_e32__sgpr__fi_literal_offset
; GFX8: liveins: $sgpr8
; GFX8-NEXT: {{ $}}
; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
- ; GFX8-NEXT: $sgpr4 = S_MOV_B32 128
- ; GFX8-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr1, 0, implicit $exec
- ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
;
; GFX900-LABEL: name: v_add_co_u32_e32__sgpr__fi_literal_offset
; GFX900: liveins: $sgpr8
; GFX900-NEXT: {{ $}}
; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
- ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
;
; GFX90A-LABEL: name: v_add_co_u32_e32__sgpr__fi_literal_offset
; GFX90A: liveins: $sgpr8
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
- ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
;
; GFX10-LABEL: name: v_add_co_u32_e32__sgpr__fi_literal_offset
; GFX10: liveins: $sgpr8
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
- ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def $vcc, implicit $exec
- ; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
;
; FLATSCRW64-LABEL: name: v_add_co_u32_e32__sgpr__fi_literal_offset
; FLATSCRW64: liveins: $sgpr8
; FLATSCRW64-NEXT: {{ $}}
; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
- ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def $vcc, implicit $exec
- ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
- renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, %stack.1, implicit-def $vcc, implicit $exec
- SI_RETURN implicit $vgpr0, implicit $vcc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
...
@@ -448,6 +602,54 @@ body: |
...
---
+name: v_add_co_u32_e64__inline_imm__fi_offset0__clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 4, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset0__clamp
+ ; MUBUFW64: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, killed $vgpr1, 1, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset0__clamp
+ ; FLATSCRW64: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, $sgpr32, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, %stack.0, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__inline_imm__fi_offset0__live_vcc_clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 4, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset0__live_vcc_clamp
+ ; MUBUFW64: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 12, killed $vgpr1, 1, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset0__live_vcc_clamp
+ ; FLATSCRW64: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 12, $sgpr32, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 12, %stack.0, 1, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vcc
+
+...
+
+---
name: v_add_co_u32_e64__fi_literal_offset__sgpr
tracksRegLiveness: true
stack:
@@ -527,3 +729,1186 @@ body: |
SI_RETURN implicit $vgpr0, implicit $vcc
...
+
+---
+name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $sgpr8
+ ; GFX7-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX7: liveins: $sgpr8
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $sgpr4 = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 1, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX8: liveins: $sgpr8
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $sgpr4 = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 1, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX900: liveins: $sgpr8
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 1, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX90A: liveins: $sgpr8
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 1, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX10: liveins: $sgpr8
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 1, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX940-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX940: liveins: $sgpr8
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; GFX940-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
+ ; GFX940-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 1, implicit $exec
+ ; GFX940-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX11-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX11: liveins: $sgpr8
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; GFX11-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $sgpr8, 1, implicit $exec
+ ; GFX11-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX12-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr_clamp
+ ; GFX12: liveins: $sgpr8
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; GFX12-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $sgpr8, 1, implicit $exec
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 %stack.1, $sgpr8, 1, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vcc
+
+...
+
+---
+name: v_add_co_u32_e64__fi_literal_offset__vgpr
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr8
+
+ ; GFX7-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr
+ ; GFX7: liveins: $vgpr8
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr
+ ; GFX8: liveins: $vgpr8
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr
+ ; GFX900: liveins: $vgpr8
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr
+ ; GFX90A: liveins: $vgpr8
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr
+ ; GFX10: liveins: $vgpr8
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $vgpr8, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, renamable dead $vcc = V_ADD_CO_U32_e64 %stack.1, $vgpr8, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr8
+
+ ; GFX7-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+ ; GFX7: liveins: $vgpr8
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+ ; GFX8: liveins: $vgpr8
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+ ; GFX900: liveins: $vgpr8
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+ ; GFX90A: liveins: $vgpr8
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+ ; GFX10: liveins: $vgpr8
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__clamp
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $vgpr8, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, renamable dead $vcc = V_ADD_CO_U32_e64 %stack.1, $vgpr8, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr8
+
+ ; GFX7-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+ ; GFX7: liveins: $vgpr8
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $sgpr4 = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+ ; GFX8: liveins: $vgpr8
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $sgpr4 = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $sgpr4_sgpr5 = V_ADD_CO_U32_e64 killed $sgpr4, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+ ; GFX900: liveins: $vgpr8
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+ ; GFX90A: liveins: $vgpr8
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+ ; GFX10: liveins: $vgpr8
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $vgpr8, 0, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__fi_literal_offset__vgpr__live_vcc
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $vgpr8, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 %stack.1, $vgpr8, 0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vcc
+
+...
+
+---
+name: v_add_co_u32_e32__inline_imm__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0__kernel
+ ; MUBUFW64: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0__kernel
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 12, %stack.0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__inline_imm__fi_offset0__kernel__live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0__kernel__live_vcc
+ ; MUBUFW64: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset0__kernel__live_vcc
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ renamable $vgpr0 = V_ADD_CO_U32_e32 12, %stack.0, implicit-def $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vcc
+
+...
+
+---
+name: v_add_co_u32_e32__inline_imm__fi_offset_literal__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 40, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset_literal__kernel
+ ; MUBUFW64: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__inline_imm__fi_offset_literal__kernel
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 12, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 12, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 40, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; GFX7-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX7: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX7-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX7-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, killed $vgpr1, 1, implicit $exec
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX8-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX8-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, killed $vgpr1, 1, implicit $exec
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX900: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX900-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX900-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, killed $vgpr1, 1, implicit $exec
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX90A: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, killed $vgpr1, 1, implicit $exec
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, 72, 1, implicit $exec
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX940-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX940: $sgpr4 = S_MOV_B32 72
+ ; GFX940-NEXT: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, killed $sgpr4, 1, implicit $exec
+ ; GFX940-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX11-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX11: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, 72, 1, implicit $exec
+ ; GFX11-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: v_add_co_u32_e64__inline_imm__fi_offset_literal__kernel__clamp
+ ; GFX12: renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, 72, 1, implicit $exec
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, dead $vcc = V_ADD_CO_U32_e64 12, %stack.1, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr252_vgpr253, $vgpr254, $vgpr255, $sgpr8
+
+ ; GFX7-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX7: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 0, implicit $exec
+ ; GFX7-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX7-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX8: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 0, implicit $exec
+ ; GFX8-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX8-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX900: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 0, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX900-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX90A: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 0, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX90A-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX10: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 0, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX10-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX940-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX940: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: SCRATCH_STORE_DWORD_SADDR killed $vgpr1, $sgpr32, 132, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX940-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; GFX940-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
+ ; GFX940-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr1, $sgpr8, 0, implicit $exec
+ ; GFX940-NEXT: $vgpr1 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 132, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX940-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX940-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX11-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX11: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; GFX11-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $sgpr8, 0, implicit $exec
+ ; GFX11-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX11-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: v_add_co_u32_e64__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX12: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; GFX12-NEXT: renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr4, $sgpr8, 0, implicit $exec
+ ; GFX12-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, renamable $vcc = V_ADD_CO_U32_e64 %stack.1, $sgpr8, 0, implicit $exec
+
+ S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr252_vgpr253, $vgpr254, $vgpr255, $sgpr8
+
+ ; GFX7-LABEL: name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX7: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX7-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX8: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX8-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX900: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX900-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX90A: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX90A-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+ ; GFX10: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX10-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_literal_offset__sgpr__scavenge_spill_required
+ ; FLATSCRW64: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: SCRATCH_STORE_DWORD_SADDR killed $vgpr1, $sgpr32, 132, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.2, addrspace 5)
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: $vgpr1 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 132, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.2, addrspace 5)
+ ; FLATSCRW64-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr8, %stack.1, implicit-def dead $vcc, implicit $exec
+
+ S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr252_vgpr253, $vgpr254, $vgpr255, $sgpr8
+
+ ; GFX7-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+ ; GFX7: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX7-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX7-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX7-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX7-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX7-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX7-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX7-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX8-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+ ; GFX8: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX8-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX8-NEXT: $vcc_lo = S_MOV_B32 128
+ ; GFX8-NEXT: $vgpr1, dead $vcc = V_ADD_CO_U32_e64 killed $vcc_lo, killed $vgpr1, 0, implicit $exec
+ ; GFX8-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX8-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX8-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX8-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX900-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+ ; GFX900: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX900-NEXT: {{ $}}
+ ; GFX900-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX900-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX900-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX900-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX900-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX900-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX90A-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+ ; GFX90A: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX90A-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX90A-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX90A-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX90A-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX10-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+ ; GFX10: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GFX10-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; GFX10-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; GFX10-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 132, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GFX10-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; GFX10-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_literal_offset__vgpr__scavenge_spill_required
+ ; FLATSCRW64: liveins: $sgpr8, $vgpr254, $vgpr255, $vgpr252_vgpr253, $vgpr248_vgpr249_vgpr250_vgpr251, $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: SCRATCH_STORE_DWORD_SADDR killed $vgpr1, $sgpr32, 132, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.2, addrspace 5)
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr4, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: $vgpr1 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 132, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.2, addrspace 5)
+ ; FLATSCRW64-NEXT: S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr8, %stack.1, implicit-def dead $vcc, implicit $exec
+
+ S_NOP 0, implicit $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, implicit $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, implicit $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, implicit $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, implicit $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79, implicit $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95, implicit $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111, implicit $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127, implicit $vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143, implicit $vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159, implicit $vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175, implicit $vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184_vgpr185_vgpr186_vgpr187_vgpr188_vgpr189_vgpr190_vgpr191, implicit $vgpr192_vgpr193_vgpr194_vgpr195_vgpr196_vgpr197_vgpr198_vgpr199_vgpr200_vgpr201_vgpr202_vgpr203_vgpr204_vgpr205_vgpr206_vgpr207, implicit $vgpr208_vgpr209_vgpr210_vgpr211_vgpr212_vgpr213_vgpr214_vgpr215_vgpr216_vgpr217_vgpr218_vgpr219_vgpr220_vgpr221_vgpr222_vgpr223, implicit $vgpr224_vgpr225_vgpr226_vgpr227_vgpr228_vgpr229_vgpr230_vgpr231_vgpr232_vgpr233_vgpr234_vgpr235_vgpr236_vgpr237_vgpr238_vgpr239, implicit $vgpr240_vgpr241_vgpr242_vgpr243_vgpr244_vgpr245_vgpr246_vgpr247, implicit $vgpr248_vgpr249_vgpr250_vgpr251, implicit $vgpr252_vgpr253, implicit $vgpr254, implicit $vgpr255
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr1
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+ ; MUBUFW64: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+ ; FLATSCRW64: liveins: $vgpr1
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr1, killed $vgpr2, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ renamable $vgpr0 = V_ADD_CO_U32_e32 renamable $vgpr1, %stack.0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+
+...
+
+---
+name: v_add_co_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr1
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+ ; MUBUFW64: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 $vgpr1, 0, 0, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+ ; FLATSCRW64: liveins: $vgpr1
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 $vgpr1, 0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 renamable $vgpr1, %stack.0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+
+...
+
+---
+name: v_add_co_u32_e64__kernel__other_vgpr_live_after__fi_offset0
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr1
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__kernel__other_vgpr_live_after__fi_offset0
+ ; MUBUFW64: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 0, $vgpr1, 0, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__kernel__other_vgpr_live_after__fi_offset0
+ ; FLATSCRW64: liveins: $vgpr1
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 0, $vgpr1, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ renamable $vgpr0, renamable $sgpr8_sgpr9 = V_ADD_CO_U32_e64 %stack.0, renamable $vgpr1, 0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+
+...
+
+---
+name: v_add_co_u32_e32__identity_vgpr__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__identity_vgpr__fi_offset0__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__identity_vgpr__fi_offset0__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, %stack.0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 0, $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 0, $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.0, $vgpr0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__identity_vgpr__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__identity_vgpr__fi_offset0__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 $vgpr0, 0, 0, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__identity_vgpr__fi_offset0__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 $vgpr0, 0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, renamable dead $vcc = V_ADD_CO_U32_e64 $vgpr0, %stack.0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__fi_offset0__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__fi_offset0__identity_vgpr__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 0, $vgpr0, 0, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__fi_offset0__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 0, $vgpr0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, renamable dead $vcc = V_ADD_CO_U32_e64 %stack.0, $vgpr0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel_live_vcc
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel_live_vcc
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 0, $vgpr0, implicit-def $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_offset0__identity_vgpr__kernel_live_vcc
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 0, $vgpr0, implicit-def $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vcc
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.0, $vgpr0, implicit-def $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vcc
+
+...
+
+---
+name: v_add_co_u32_e32__identity_vgpr__fi_offset32__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__identity_vgpr__fi_offset32__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__identity_vgpr__fi_offset32__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+
+---
+name: v_add_co_u32_e32__identity_vgpr__fi_offset72__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__identity_vgpr__fi_offset72__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__identity_vgpr__fi_offset72__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, killed $vgpr1, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 $vgpr0, %stack.1, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_offset72__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__fi_offset72__identity_vgpr__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 72, $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_offset72__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 72, $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.1, $vgpr0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e32__fi_offset32__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e32__fi_offset32__identity_vgpr__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 32, $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e32__fi_offset32__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 32, $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_CO_U32_e32 %stack.1, $vgpr0, implicit-def dead $vcc, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_co_u32_e64__identity_vgpr__fi_offset32__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUFW64-LABEL: name: v_add_co_u32_e64__identity_vgpr__fi_offset32__kernel
+ ; MUBUFW64: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: {{ $}}
+ ; MUBUFW64-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 $vgpr0, 32, 0, implicit $exec
+ ; MUBUFW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_co_u32_e64__identity_vgpr__fi_offset32__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0, dead renamable $vcc = V_ADD_CO_U32_e64 $vgpr0, 32, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0, renamable dead $vcc = V_ADD_CO_U32_e64 $vgpr0, %stack.1, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-u32.mir b/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-u32.mir
index 2d62d42..af6823c 100644
--- a/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-u32.mir
+++ b/llvm/test/CodeGen/AMDGPU/eliminate-frame-index-v-add-u32.mir
@@ -467,3 +467,1280 @@ body: |
SI_RETURN implicit $vgpr0
...
+
+---
+name: v_add_u32_e64__vgpr__fi_literal_offset
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset
+ ; MUBUF: liveins: $vgpr8
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUF-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $vgpr1, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset
+ ; MUBUFW32: liveins: $vgpr8
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $vgpr1 = V_LSHRREV_B32_e64 5, $sgpr32, implicit $exec
+ ; MUBUFW32-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $vgpr1, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $sgpr4, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset
+ ; FLATSCRW32: liveins: $vgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $sgpr4, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, %stack.1, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__vgpr__fi_literal_offset__clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset__clamp
+ ; MUBUF: liveins: $vgpr8
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUF-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $vgpr1, 1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset__clamp
+ ; MUBUFW32: liveins: $vgpr8
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $vgpr1 = V_LSHRREV_B32_e64 5, $sgpr32, implicit $exec
+ ; MUBUFW32-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $vgpr1, 1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset__clamp
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $sgpr4, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__vgpr__fi_literal_offset__clamp
+ ; FLATSCRW32: liveins: $vgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, killed $sgpr4, 1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 $vgpr8, %stack.1, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__fi_literal_offset__vgpr__clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp
+ ; MUBUF: liveins: $vgpr8
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $vgpr1 = V_LSHRREV_B32_e64 6, $sgpr32, implicit $exec
+ ; MUBUF-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp
+ ; MUBUFW32: liveins: $vgpr8
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $vgpr1 = V_LSHRREV_B32_e64 5, $sgpr32, implicit $exec
+ ; MUBUFW32-NEXT: $vgpr1 = V_ADD_U32_e32 128, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 killed $sgpr4, $vgpr8, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp
+ ; FLATSCRW32: liveins: $vgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $sgpr4 = S_ADD_I32 $sgpr32, 128, implicit-def $scc
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 killed $sgpr4, $vgpr8, 1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 %stack.1, $vgpr8, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__fi_literal_offset__vgpr__clamp__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 128, alignment: 16 }
+ - { id: 1, size: 4, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp__kernel
+ ; MUBUF: liveins: $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 128, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 killed $vgpr1, $vgpr8, 1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp__kernel
+ ; MUBUFW32: liveins: $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 128, $vgpr8, 1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp__kernel
+ ; FLATSCRW64: liveins: $vgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $sgpr4 = S_MOV_B32 128
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 killed $sgpr4, $vgpr8, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__fi_literal_offset__vgpr__clamp__kernel
+ ; FLATSCRW32: liveins: $vgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 128, $vgpr8, 1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 %stack.1, $vgpr8, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e32__inline_imm__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUF-LABEL: name: v_add_u32_e32__inline_imm__fi_offset0__kernel
+ ; MUBUF: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__inline_imm__fi_offset0__kernel
+ ; MUBUFW32: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__inline_imm__fi_offset0__kernel
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__inline_imm__fi_offset0__kernel
+ ; FLATSCRW32: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 12, %stack.0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__inline_imm__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUF-LABEL: name: v_add_u32_e64__inline_imm__fi_offset0__kernel
+ ; MUBUF: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 12, 0, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__inline_imm__fi_offset0__kernel
+ ; MUBUFW32: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 12, 0, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__inline_imm__fi_offset0__kernel
+ ; FLATSCRW64: renamable $vgpr0 = V_ADD_U32_e64 12, 0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__inline_imm__fi_offset0__kernel
+ ; FLATSCRW32: renamable $vgpr0 = V_ADD_U32_e64 12, 0, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 12, %stack.0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+
+
+---
+name: v_add_u32_e32__inline_imm__fi_literal__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 80, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUF-LABEL: name: v_add_u32_e32__inline_imm__fi_literal__kernel
+ ; MUBUF: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__inline_imm__fi_literal__kernel
+ ; MUBUFW32: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__inline_imm__fi_literal__kernel
+ ; FLATSCRW64: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__inline_imm__fi_literal__kernel
+ ; FLATSCRW32: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 12, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 12, %stack.1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__inline_imm__fi_literal__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 80, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUF-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel
+ ; MUBUF: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel
+ ; MUBUFW32: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel
+ ; FLATSCRW64: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel
+ ; FLATSCRW32: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 12, %stack.1, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__fi_literal__inline_imm__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 80, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUF-LABEL: name: v_add_u32_e64__fi_literal__inline_imm__kernel
+ ; MUBUF: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 32, 12, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__fi_literal__inline_imm__kernel
+ ; MUBUFW32: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 32, 12, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__fi_literal__inline_imm__kernel
+ ; FLATSCRW64: renamable $vgpr0 = V_ADD_U32_e64 32, 12, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__fi_literal__inline_imm__kernel
+ ; FLATSCRW32: renamable $vgpr0 = V_ADD_U32_e64 32, 12, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 %stack.1, 12, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__inline_imm__fi_literal__kernel__clamp
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 80, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ ; MUBUF-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel__clamp
+ ; MUBUF: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel__clamp
+ ; MUBUFW32: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel__clamp
+ ; FLATSCRW64: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__inline_imm__fi_literal__kernel__clamp
+ ; FLATSCRW32: renamable $vgpr0 = V_ADD_U32_e64 12, 32, 1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 12, %stack.1, 1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: killed_reg_regression
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 80, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: killed_reg_regression
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_LSHLREV_B32_e32 2, killed $vgpr0, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr1 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr2 = V_MOV_B32_e32 15, implicit $exec
+ ; MUBUF-NEXT: SCRATCH_STORE_DWORD killed renamable $vgpr2, killed renamable $vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store (s32), addrspace 5)
+ ; MUBUF-NEXT: renamable $vgpr0 = V_SUB_U32_e32 0, killed $vgpr0, implicit $exec
+ ; MUBUF-NEXT: dead renamable $vgpr0 = SCRATCH_LOAD_DWORD killed renamable $vgpr0, 124, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32), addrspace 5)
+ ; MUBUF-NEXT: S_ENDPGM 0
+ ;
+ ; MUBUFW32-LABEL: name: killed_reg_regression
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_LSHLREV_B32_e32 2, killed $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr1 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr2 = V_MOV_B32_e32 15, implicit $exec
+ ; MUBUFW32-NEXT: SCRATCH_STORE_DWORD killed renamable $vgpr2, killed renamable $vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store (s32), addrspace 5)
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_SUB_U32_e32 0, killed $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: dead renamable $vgpr0 = SCRATCH_LOAD_DWORD killed renamable $vgpr0, 124, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32), addrspace 5)
+ ; MUBUFW32-NEXT: S_ENDPGM 0
+ ;
+ ; FLATSCRW64-LABEL: name: killed_reg_regression
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_LSHLREV_B32_e32 2, killed $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr1 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr2 = V_MOV_B32_e32 15, implicit $exec
+ ; FLATSCRW64-NEXT: SCRATCH_STORE_DWORD killed renamable $vgpr2, killed renamable $vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store (s32), addrspace 5)
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_SUB_U32_e32 0, killed $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: dead renamable $vgpr0 = SCRATCH_LOAD_DWORD killed renamable $vgpr0, 124, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32), addrspace 5)
+ ; FLATSCRW64-NEXT: S_ENDPGM 0
+ ;
+ ; FLATSCRW32-LABEL: name: killed_reg_regression
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_LSHLREV_B32_e32 2, killed $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr1 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr2 = V_MOV_B32_e32 15, implicit $exec
+ ; FLATSCRW32-NEXT: SCRATCH_STORE_DWORD killed renamable $vgpr2, killed renamable $vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store (s32), addrspace 5)
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_SUB_U32_e32 0, killed $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: dead renamable $vgpr0 = SCRATCH_LOAD_DWORD killed renamable $vgpr0, 124, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32), addrspace 5)
+ ; FLATSCRW32-NEXT: S_ENDPGM 0
+ renamable $vgpr0 = V_LSHLREV_B32_e32 2, killed $vgpr0, implicit $exec
+ renamable $vgpr1 = V_ADD_U32_e32 %stack.0, $vgpr0, implicit $exec
+ renamable $vgpr2 = V_MOV_B32_e32 15, implicit $exec
+ SCRATCH_STORE_DWORD killed renamable $vgpr2, killed renamable $vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store (s32), addrspace 5)
+ renamable $vgpr0 = V_SUB_U32_e32 %stack.0, killed $vgpr0, implicit $exec
+ dead renamable $vgpr0 = SCRATCH_LOAD_DWORD killed renamable $vgpr0, 124, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32), addrspace 5)
+ S_ENDPGM 0
+
+...
+
+---
+name: v_add_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr1
+ ; MUBUF-LABEL: name: v_add_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+ ; MUBUF: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr1, killed $vgpr2, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+ ; MUBUFW32: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr1, killed $vgpr2, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+ ; FLATSCRW64: liveins: $vgpr1
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr1, killed $vgpr2, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__kernel_fi_offset0__other_vgpr_live_after
+ ; FLATSCRW32: liveins: $vgpr1
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr1, killed $vgpr2, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ renamable $vgpr0 = V_ADD_U32_e32 renamable $vgpr1, %stack.0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+
+...
+
+---
+name: v_add_u32_e32__kernel_other_vgpr_live_after__fi_offset0
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr1
+ ; MUBUF-LABEL: name: v_add_u32_e32__kernel_other_vgpr_live_after__fi_offset0
+ ; MUBUF: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__kernel_other_vgpr_live_after__fi_offset0
+ ; MUBUFW32: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__kernel_other_vgpr_live_after__fi_offset0
+ ; FLATSCRW64: liveins: $vgpr1
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__kernel_other_vgpr_live_after__fi_offset0
+ ; FLATSCRW32: liveins: $vgpr1
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ renamable $vgpr0 = V_ADD_U32_e32 %stack.0, renamable $vgpr1, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+
+...
+
+---
+name: v_add_u32_e32__kernel_fi_offset0__sgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $sgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e32__kernel_fi_offset0__sgpr_live_after
+ ; MUBUF: liveins: $sgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__kernel_fi_offset0__sgpr_live_after
+ ; MUBUFW32: liveins: $sgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__kernel_fi_offset0__sgpr_live_after
+ ; FLATSCRW64: liveins: $sgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__kernel_fi_offset0__sgpr_live_after
+ ; FLATSCRW32: liveins: $sgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ renamable $vgpr0 = V_ADD_U32_e32 renamable $sgpr8, %stack.0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $sgpr8
+
+...
+
+---
+name: v_add_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr1
+ ; MUBUF-LABEL: name: v_add_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+ ; MUBUF: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr1, 0, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+ ; MUBUFW32: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr1, 0, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+ ; FLATSCRW64: liveins: $vgpr1
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr1, 0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__kernel_fi_offset0__other_vgpr_live_after
+ ; FLATSCRW32: liveins: $vgpr1
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr1, 0, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ renamable $vgpr0 = V_ADD_U32_e64 renamable $vgpr1, %stack.0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+
+...
+
+---
+name: v_add_u32_e32__kernel_fi_offset72__sgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 32, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $sgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e32__kernel_fi_offset72__sgpr_live_after
+ ; MUBUF: liveins: $sgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__kernel_fi_offset72__sgpr_live_after
+ ; MUBUFW32: liveins: $sgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__kernel_fi_offset72__sgpr_live_after
+ ; FLATSCRW64: liveins: $sgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__kernel_fi_offset72__sgpr_live_after
+ ; FLATSCRW32: liveins: $sgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $sgpr8, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ renamable $vgpr0 = V_ADD_U32_e32 renamable $sgpr8, %stack.1, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $sgpr8
+
+...
+
+---
+name: v_add_u32_e64__kernel_fi_offset72__sgpr_live_after
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 32, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $sgpr8
+ ; MUBUF-LABEL: name: v_add_u32_e64__kernel_fi_offset72__sgpr_live_after
+ ; MUBUF: liveins: $sgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $sgpr8, killed $vgpr1, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__kernel_fi_offset72__sgpr_live_after
+ ; MUBUFW32: liveins: $sgpr8, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $sgpr8, 72, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__kernel_fi_offset72__sgpr_live_after
+ ; FLATSCRW64: liveins: $sgpr8
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $sgpr8, killed $vgpr1, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__kernel_fi_offset72__sgpr_live_after
+ ; FLATSCRW32: liveins: $sgpr8
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $sgpr8, 72, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0, implicit $sgpr8
+ renamable $vgpr0 = V_ADD_U32_e64 renamable $sgpr8, %stack.1, 0, implicit $exec
+ SI_RETURN implicit $vgpr0, implicit $sgpr8
+
+...
+
+---
+name: v_add_u32_e32__identity_vgpr__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset0__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset0__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset0__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset0__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, %stack.0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; MUBUF-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 %stack.0, $vgpr0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__identity_vgpr__fi_offset0__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset0__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 0, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset0__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 0, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset0__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset0__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 0, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, %stack.0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__fi_offset0__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e64__fi_offset0__identity_vgpr__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 0, $vgpr0, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__fi_offset0__identity_vgpr__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 0, $vgpr0, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__fi_offset0__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 0, $vgpr0, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__fi_offset0__identity_vgpr__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 0, $vgpr0, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 %stack.0, $vgpr0, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; MUBUF-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, killed $vgpr0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, killed $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, killed $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__fi_offset0__identity_vgpr__kernel_kill
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 0, killed $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 %stack.0, killed $vgpr0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e32__identity_vgpr__fi_offset32__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset32__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset32__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset32__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset32__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $vgpr1 = V_MOV_B32_e32 32, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, %stack.1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+
+---
+name: v_add_u32_e32__identity_vgpr__fi_offset72__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset72__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset72__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset72__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__identity_vgpr__fi_offset72__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: $vgpr1 = V_MOV_B32_e32 72, implicit $exec
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, killed $vgpr1, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 $vgpr0, %stack.1, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e32__fi_offset72__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 72, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e32__fi_offset72__identity_vgpr__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 72, $vgpr0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__fi_offset72__identity_vgpr__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 72, $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__fi_offset72__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 72, $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__fi_offset72__identity_vgpr__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 72, $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 %stack.1, $vgpr0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e32__fi_offset32__identity_vgpr__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e32__fi_offset32__identity_vgpr__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e32 32, $vgpr0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e32__fi_offset32__identity_vgpr__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 32, $vgpr0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e32__fi_offset32__identity_vgpr__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e32 32, $vgpr0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e32__fi_offset32__identity_vgpr__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e32 32, $vgpr0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e32 %stack.1, $vgpr0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
+
+---
+name: v_add_u32_e64__identity_vgpr__fi_offset32__kernel
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 32, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo:
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ frameOffsetReg: '$sgpr33'
+ stackPtrOffsetReg: '$sgpr32'
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; MUBUF-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset32__kernel
+ ; MUBUF: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: {{ $}}
+ ; MUBUF-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUF-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 32, 0, implicit $exec
+ ; MUBUF-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; MUBUFW32-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset32__kernel
+ ; MUBUFW32: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: {{ $}}
+ ; MUBUFW32-NEXT: $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+ ; MUBUFW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 32, 0, implicit $exec
+ ; MUBUFW32-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW64-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset32__kernel
+ ; FLATSCRW64: liveins: $vgpr0
+ ; FLATSCRW64-NEXT: {{ $}}
+ ; FLATSCRW64-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 32, 0, implicit $exec
+ ; FLATSCRW64-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; FLATSCRW32-LABEL: name: v_add_u32_e64__identity_vgpr__fi_offset32__kernel
+ ; FLATSCRW32: liveins: $vgpr0
+ ; FLATSCRW32-NEXT: {{ $}}
+ ; FLATSCRW32-NEXT: renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, 32, 0, implicit $exec
+ ; FLATSCRW32-NEXT: SI_RETURN implicit $vgpr0
+ renamable $vgpr0 = V_ADD_U32_e64 $vgpr0, %stack.1, 0, implicit $exec
+ SI_RETURN implicit $vgpr0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-call-set-from-other-function.ll b/llvm/test/CodeGen/AMDGPU/indirect-call-set-from-other-function.ll
new file mode 100644
index 0000000..f419d89
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/indirect-call-set-from-other-function.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor %s -o - | FileCheck %s
+
+@g_fn = addrspace(1) global ptr null
+
+;.
+; CHECK: @g_fn = addrspace(1) global ptr null
+;.
+define void @set_fn(ptr %fn) {
+; CHECK-LABEL: define {{[^@]+}}@set_fn
+; CHECK-SAME: (ptr [[FN:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store ptr [[FN]], ptr addrspace(1) @g_fn, align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ store ptr %fn, ptr addrspace(1) @g_fn
+ ret void
+}
+
+define void @get_fn(ptr %fn) {
+; CHECK-LABEL: define {{[^@]+}}@get_fn
+; CHECK-SAME: (ptr [[FN:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr addrspace(1) @g_fn, align 8
+; CHECK-NEXT: store ptr [[LOAD]], ptr [[FN]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %load = load ptr, ptr addrspace(1) @g_fn
+ store ptr %load, ptr %fn
+ ret void
+}
+
+define void @foo() {
+; CHECK-LABEL: define {{[^@]+}}@foo
+; CHECK-SAME: () #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[FN:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: store ptr null, ptr addrspace(5) [[FN]], align 8
+; CHECK-NEXT: [[FN_CAST:%.*]] = addrspacecast ptr addrspace(5) [[FN]] to ptr
+; CHECK-NEXT: call void @get_fn(ptr [[FN_CAST]])
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr addrspace(5) [[FN]], align 8
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne ptr [[LOAD]], null
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[LOAD_1:%.*]] = load ptr, ptr addrspace(5) [[FN]], align 8
+; CHECK-NEXT: call void [[LOAD_1]]()
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ %fn = alloca ptr, addrspace(5)
+ store ptr null, ptr addrspace(5) %fn
+ %fn.cast = addrspacecast ptr addrspace(5) %fn to ptr
+ call void @get_fn(ptr %fn.cast)
+ %load = load ptr, ptr addrspace(5) %fn
+ %tobool = icmp ne ptr %load, null
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then:
+ %load.1 = load ptr, ptr addrspace(5) %fn
+ call void %load.1()
+ br label %if.end
+
+if.end:
+ ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 15f23ed..ee7f937 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -43,7 +43,6 @@
; GCN-O0-NEXT: FunctionPass Manager
; GCN-O0-NEXT: Expand Atomic instructions
; GCN-O0-NEXT: Remove unreachable blocks from the CFG
-; GCN-O0-NEXT: Expand vector predication intrinsics
; GCN-O0-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; GCN-O0-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O0-NEXT: Expand reduction intrinsics
@@ -222,7 +221,6 @@
; GCN-O1-NEXT: Constant Hoisting
; GCN-O1-NEXT: Replace intrinsics with calls to vector library
; GCN-O1-NEXT: Partially inline calls to library functions
-; GCN-O1-NEXT: Expand vector predication intrinsics
; GCN-O1-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; GCN-O1-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O1-NEXT: Expand reduction intrinsics
@@ -508,7 +506,6 @@
; GCN-O1-OPTS-NEXT: Constant Hoisting
; GCN-O1-OPTS-NEXT: Replace intrinsics with calls to vector library
; GCN-O1-OPTS-NEXT: Partially inline calls to library functions
-; GCN-O1-OPTS-NEXT: Expand vector predication intrinsics
; GCN-O1-OPTS-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; GCN-O1-OPTS-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O1-OPTS-NEXT: Expand reduction intrinsics
@@ -813,7 +810,6 @@
; GCN-O2-NEXT: Constant Hoisting
; GCN-O2-NEXT: Replace intrinsics with calls to vector library
; GCN-O2-NEXT: Partially inline calls to library functions
-; GCN-O2-NEXT: Expand vector predication intrinsics
; GCN-O2-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; GCN-O2-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O2-NEXT: Expand reduction intrinsics
@@ -1126,7 +1122,6 @@
; GCN-O3-NEXT: Constant Hoisting
; GCN-O3-NEXT: Replace intrinsics with calls to vector library
; GCN-O3-NEXT: Partially inline calls to library functions
-; GCN-O3-NEXT: Expand vector predication intrinsics
; GCN-O3-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; GCN-O3-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O3-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll
index 074489b..d085b3c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll
@@ -523,14 +523,23 @@ define amdgpu_ps void @s_buffer_load_imm_mergex2(<4 x i32> inreg %desc) {
; GFX67-NEXT: exp mrt0 v0, v1, v0, v0 done vm
; GFX67-NEXT: s_endpgm
;
-; GFX8910-LABEL: s_buffer_load_imm_mergex2:
-; GFX8910: ; %bb.0: ; %main_body
-; GFX8910-NEXT: s_buffer_load_dwordx2 s[0:1], s[0:3], 0x4
-; GFX8910-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8910-NEXT: v_mov_b32_e32 v0, s0
-; GFX8910-NEXT: v_mov_b32_e32 v1, s1
-; GFX8910-NEXT: exp mrt0 v0, v1, v0, v0 done vm
-; GFX8910-NEXT: s_endpgm
+; GFX8-LABEL: s_buffer_load_imm_mergex2:
+; GFX8: ; %bb.0: ; %main_body
+; GFX8-NEXT: s_buffer_load_dwordx2 s[0:1], s[0:3], 0x4
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: exp mrt0 v0, v1, v0, v0 done vm
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: s_buffer_load_imm_mergex2:
+; GFX910: ; %bb.0: ; %main_body
+; GFX910-NEXT: s_buffer_load_dwordx2 s[4:5], s[0:3], 0x4
+; GFX910-NEXT: s_waitcnt lgkmcnt(0)
+; GFX910-NEXT: v_mov_b32_e32 v0, s4
+; GFX910-NEXT: v_mov_b32_e32 v1, s5
+; GFX910-NEXT: exp mrt0 v0, v1, v0, v0 done vm
+; GFX910-NEXT: s_endpgm
;
; GFX11-LABEL: s_buffer_load_imm_mergex2:
; GFX11: ; %bb.0: ; %main_body
@@ -570,16 +579,27 @@ define amdgpu_ps void @s_buffer_load_imm_mergex4(<4 x i32> inreg %desc) {
; GFX67-NEXT: exp mrt0 v0, v1, v2, v3 done vm
; GFX67-NEXT: s_endpgm
;
-; GFX8910-LABEL: s_buffer_load_imm_mergex4:
-; GFX8910: ; %bb.0: ; %main_body
-; GFX8910-NEXT: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x8
-; GFX8910-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8910-NEXT: v_mov_b32_e32 v0, s0
-; GFX8910-NEXT: v_mov_b32_e32 v1, s1
-; GFX8910-NEXT: v_mov_b32_e32 v2, s2
-; GFX8910-NEXT: v_mov_b32_e32 v3, s3
-; GFX8910-NEXT: exp mrt0 v0, v1, v2, v3 done vm
-; GFX8910-NEXT: s_endpgm
+; GFX8-LABEL: s_buffer_load_imm_mergex4:
+; GFX8: ; %bb.0: ; %main_body
+; GFX8-NEXT: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x8
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: v_mov_b32_e32 v2, s2
+; GFX8-NEXT: v_mov_b32_e32 v3, s3
+; GFX8-NEXT: exp mrt0 v0, v1, v2, v3 done vm
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: s_buffer_load_imm_mergex4:
+; GFX910: ; %bb.0: ; %main_body
+; GFX910-NEXT: s_buffer_load_dwordx4 s[4:7], s[0:3], 0x8
+; GFX910-NEXT: s_waitcnt lgkmcnt(0)
+; GFX910-NEXT: v_mov_b32_e32 v0, s4
+; GFX910-NEXT: v_mov_b32_e32 v1, s5
+; GFX910-NEXT: v_mov_b32_e32 v2, s6
+; GFX910-NEXT: v_mov_b32_e32 v3, s7
+; GFX910-NEXT: exp mrt0 v0, v1, v2, v3 done vm
+; GFX910-NEXT: s_endpgm
;
; GFX11-LABEL: s_buffer_load_imm_mergex4:
; GFX11: ; %bb.0: ; %main_body
diff --git a/llvm/test/CodeGen/AMDGPU/merge-sbuffer-load.mir b/llvm/test/CodeGen/AMDGPU/merge-sbuffer-load.mir
index 1b2f672..02c1a32 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-sbuffer-load.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-sbuffer-load.mir
@@ -1,14 +1,31 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck %s -check-prefixes=CHECK,GFX10
# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck %s -check-prefixes=CHECK,GFX12
-# CHECK-LABEL: name: merge_s_buffer_load_x2
-# CHECK: S_BUFFER_LOAD_DWORDX2_IMM %0, 0, 0 :: (dereferenceable invariant load (s64), align 4)
+---
name: merge_s_buffer_load_x2
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x2
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %3:sreg_64_xexec = S_BUFFER_LOAD_DWORDX2_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s64), align 4)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY %3.sub0
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed %3.sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x2
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_BUFFER_LOAD_DWORDX2_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s64), align 4)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[S_BUFFER_LOAD_DWORDX2_IMM]].sub0
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX2_IMM]].sub1
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s32))
@@ -17,15 +34,19 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x1_x2
-# CHECK: S_BUFFER_LOAD_DWORD_IMM %0, 0, 0 :: (dereferenceable invariant load (s32))
-# CHECK: S_BUFFER_LOAD_DWORDX2_IMM %0, 4, 0 :: (dereferenceable invariant load (s64))
name: merge_s_buffer_load_x1_x2
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-LABEL: name: merge_s_buffer_load_x1_x2
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX2_IMM:%[0-9]+]]:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM [[COPY]], 4, 0 :: (dereferenceable invariant load (s64))
+ ; CHECK-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
%2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s64))
@@ -34,16 +55,28 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x2_x1
-# GFX10: S_BUFFER_LOAD_DWORDX2_IMM %0, 0, 0 :: (dereferenceable invariant load (s64))
-# GFX10: S_BUFFER_LOAD_DWORD_IMM %0, 8, 0 :: (dereferenceable invariant load (s32))
-# GFX12: S_BUFFER_LOAD_DWORDX3_IMM %0, 0, 0 :: (dereferenceable invariant load (s96), align 8)
name: merge_s_buffer_load_x2_x1
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x2_x1
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: [[S_BUFFER_LOAD_DWORDX2_IMM:%[0-9]+]]:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s64))
+ ; GFX10-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY]], 8, 0 :: (dereferenceable invariant load (s32))
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x2_x1
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s96), align 8)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY [[S_BUFFER_LOAD_DWORDX3_IMM]].sub0_sub1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX3_IMM]].sub2
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s64))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 8, 0 :: (dereferenceable invariant load (s32))
@@ -52,14 +85,37 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x4
-# CHECK: S_BUFFER_LOAD_DWORDX4_IMM %0, 0, 0 :: (dereferenceable invariant load (s128), align 4)
name: merge_s_buffer_load_x4
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x4
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %7:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s128), align 4)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY %7.sub0_sub1
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY killed %7.sub2_sub3
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY1]].sub0
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY1]].sub1
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY2]].sub0
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY2]].sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x4
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s128), align 4)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_IMM]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX4_IMM]].sub3
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY1]].sub0_sub1
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY1]].sub2
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s32))
@@ -70,15 +126,19 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x1_x3
-# CHECK: S_BUFFER_LOAD_DWORD_IMM %0, 0, 0 :: (dereferenceable invariant load (s32))
-# CHECK: S_BUFFER_LOAD_DWORDX3_IMM %0, 4, 0 :: (dereferenceable invariant load (s96), align 16)
name: merge_s_buffer_load_x1_x3
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-LABEL: name: merge_s_buffer_load_x1_x3
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM [[COPY]], 4, 0 :: (dereferenceable invariant load (s96), align 16)
+ ; CHECK-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
%2:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s96))
@@ -87,14 +147,20 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x3_x1
-# CHECK: S_BUFFER_LOAD_DWORDX4_IMM %0, 0, 0 :: (dereferenceable invariant load (s128))
name: merge_s_buffer_load_x3_x1
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-LABEL: name: merge_s_buffer_load_x3_x1
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s128))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_IMM]].sub0_sub1_sub2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX4_IMM]].sub3
+ ; CHECK-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s96))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 12, 0 :: (dereferenceable invariant load (s32))
@@ -103,14 +169,53 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x8
-# CHECK: S_BUFFER_LOAD_DWORDX8_IMM %0, 0, 0 :: (dereferenceable invariant load (s256), align 4)
name: merge_s_buffer_load_x8
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %15:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 4)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %15.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %15.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY1]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY killed [[COPY1]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY4]].sub0
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY4]].sub1
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_64_xexec = COPY killed [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY9]].sub0
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY9]].sub1
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY10]].sub0
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY10]].sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 4)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_96 = COPY [[COPY1]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY1]].sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_64_xexec = COPY [[COPY3]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY5]].sub1
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_96 = COPY [[COPY2]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY2]].sub3
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64_xexec = COPY [[COPY9]].sub0_sub1
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY9]].sub2
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY11]].sub0
+ ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY11]].sub1
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s32))
@@ -125,14 +230,53 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x8_reordered
-# CHECK: S_BUFFER_LOAD_DWORDX8_IMM %0, 0, 0 :: (dereferenceable invariant load (s256), align 4)
name: merge_s_buffer_load_x8_reordered
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_reordered
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %15:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 4)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %15.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %15.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY1]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY killed [[COPY1]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub1
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub0
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_64_xexec = COPY killed [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY7]].sub1
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY7]].sub0
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY4]].sub1
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY4]].sub0
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY8]].sub1
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY8]].sub0
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_reordered
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 4)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_96 = COPY [[COPY1]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY1]].sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_64_xexec = COPY [[COPY3]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY5]].sub1
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_96 = COPY [[COPY2]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY2]].sub3
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64_xexec = COPY [[COPY9]].sub0_sub1
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY9]].sub2
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY11]].sub1
+ ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY11]].sub0
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 20, 0 :: (dereferenceable invariant load (s32))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s32))
@@ -147,14 +291,37 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x8_out_of_x2
-# CHECK: S_BUFFER_LOAD_DWORDX8_IMM %0, 0, 0 :: (dereferenceable invariant load (s256), align 8)
name: merge_s_buffer_load_x8_out_of_x2
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x2
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %7:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 8)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %7.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %7.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY [[COPY1]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY1]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x2
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 8)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY [[COPY1]].sub0_sub1
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY1]].sub2_sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY [[COPY2]].sub2_sub3
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub0_sub1
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s64))
%2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 8, 0 :: (dereferenceable invariant load (s64))
@@ -165,14 +332,29 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x8_out_of_x4
-# CHECK: S_BUFFER_LOAD_DWORDX8_IMM %0, 0, 0 :: (dereferenceable invariant load (s256), align 16)
name: merge_s_buffer_load_x8_out_of_x4
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x4
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %3:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %3.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %3.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x4
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128))
%2:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s128))
@@ -181,14 +363,37 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_x8_mixed
-# CHECK: S_BUFFER_LOAD_DWORDX8_IMM %0, 0, 0 :: (dereferenceable invariant load (s256), align 16)
name: merge_s_buffer_load_x8_mixed
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_mixed
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %7:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %7.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %7.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_mixed
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub2_sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128))
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
@@ -199,14 +404,39 @@ body: |
...
---
-# CHECK-LABEL: name: merge_s_buffer_load_sgpr_imm
-# CHECK: S_BUFFER_LOAD_DWORDX4_SGPR_IMM %0, %1, 0, 0 :: (dereferenceable invariant load (s128), align 4)
name: merge_s_buffer_load_sgpr_imm
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX10-LABEL: name: merge_s_buffer_load_sgpr_imm
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX10-NEXT: early-clobber %8:sgpr_128 = S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec [[COPY]], [[COPY1]], 0, 0 :: (dereferenceable invariant load (s128), align 4)
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY %8.sub0_sub1
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY killed %8.sub2_sub3
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY2]].sub0
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY2]].sub1
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_sgpr_imm
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_SGPR_IMM [[COPY]], [[COPY1]], 0, 0 :: (dereferenceable invariant load (s128), align 4)
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM]].sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY2]].sub2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY4]].sub0
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY4]].sub1
+ ; GFX12-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32 = COPY $sgpr4
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM %0:sgpr_128, %1:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
@@ -218,15 +448,21 @@ body: |
...
---
-# CHECK-LABEL: name: no_merge_for_different_soffsets
-# CHECK: S_BUFFER_LOAD_DWORD_SGPR_IMM %0, %1, 4, 0 :: (dereferenceable invariant load (s32))
-# CHECK: S_BUFFER_LOAD_DWORD_SGPR_IMM %0, %2, 8, 0 :: (dereferenceable invariant load (s32))
name: no_merge_for_different_soffsets
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $sgpr5
+ ; CHECK-LABEL: name: no_merge_for_different_soffsets
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[COPY]], [[COPY1]], 4, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[COPY]], [[COPY2]], 8, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32 = COPY $sgpr4
%2:sreg_32 = COPY $sgpr5
@@ -237,15 +473,20 @@ body: |
...
---
-# CHECK-LABEL: name: no_merge_for_non_adjacent_offsets
-# CHECK: S_BUFFER_LOAD_DWORD_SGPR_IMM %0, %1, 4, 0 :: (dereferenceable invariant load (s32))
-# CHECK: S_BUFFER_LOAD_DWORD_SGPR_IMM %0, %1, 12, 0 :: (dereferenceable invariant load (s32))
name: no_merge_for_non_adjacent_offsets
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; CHECK-LABEL: name: no_merge_for_non_adjacent_offsets
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[COPY]], [[COPY1]], 4, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[COPY]], [[COPY1]], 12, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: S_ENDPGM 0
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sreg_32 = COPY $sgpr4
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM %0:sgpr_128, %1:sreg_32, 4, 0 :: (dereferenceable invariant load (s32))
@@ -253,4 +494,420 @@ body: |
S_ENDPGM 0
...
+
+# The constrained multi-dword buffer load merge tests.
+
+---
+name: merge_s_buffer_load_x1_x2ec
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; CHECK-LABEL: name: merge_s_buffer_load_x1_x2ec
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: early-clobber %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec [[COPY]], 4, 0 :: (dereferenceable invariant load (s64))
+ ; CHECK-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
+ early-clobber %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s64))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x2ec_x1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x2ec_x1
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s64))
+ ; GFX10-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY]], 8, 0 :: (dereferenceable invariant load (s32))
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x2ec_x1
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s96), align 8)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY [[S_BUFFER_LOAD_DWORDX3_IMM]].sub0_sub1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX3_IMM]].sub2
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s64))
+ %2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 8, 0 :: (dereferenceable invariant load (s32))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x1_x3ec
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; CHECK-LABEL: name: merge_s_buffer_load_x1_x3ec
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: early-clobber %2:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM_ec [[COPY]], 4, 0 :: (dereferenceable invariant load (s96), align 16)
+ ; CHECK-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
+ early-clobber %2:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM_ec %0:sgpr_128, 4, 0 :: (dereferenceable invariant load (s96))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x3ec_x1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; CHECK-LABEL: name: merge_s_buffer_load_x3ec_x1
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s128))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_IMM]].sub0_sub1_sub2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_BUFFER_LOAD_DWORDX4_IMM]].sub3
+ ; CHECK-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_96 = S_BUFFER_LOAD_DWORDX3_IMM_ec %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s96))
+ %2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 12, 0 :: (dereferenceable invariant load (s32))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x8_out_of_x2ec_reordered
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x2ec_reordered
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %7:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 8)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %7.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %7.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY [[COPY1]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY1]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x2ec_reordered
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 8)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY [[COPY1]].sub0_sub1
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY1]].sub2_sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY [[COPY2]].sub2_sub3
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub0_sub1
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s64))
+ early-clobber %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 8, 0 :: (dereferenceable invariant load (s64))
+ early-clobber %3:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s64))
+ early-clobber %4:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 24, 0 :: (dereferenceable invariant load (s64))
+
+ S_ENDPGM 0
+...
---
+
+name: merge_s_buffer_load_x8_out_of_x2ec_x2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x2ec_x2
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %7:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 8)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %7.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %7.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY [[COPY1]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY1]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x2ec_x2
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 8)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY [[COPY1]].sub0_sub1
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY1]].sub2_sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY [[COPY2]].sub2_sub3
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub0_sub1
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s64))
+ early-clobber %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 8, 0 :: (dereferenceable invariant load (s64))
+ %3:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s64))
+ %4:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 24, 0 :: (dereferenceable invariant load (s64))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x8_out_of_x4ec
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x4ec
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %3:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %3.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %3.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x4ec
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM_ec %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128))
+ early-clobber %2:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM_ec %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s128))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x8_out_of_x4ec_x4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x4ec_x4
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %3:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %3.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %3.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x4ec_x4
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM_ec %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128))
+ %2:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s128))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x8_out_of_x4_x4ec
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_out_of_x4_x4ec
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %3:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %3.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %3.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_out_of_x4_x4ec
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128))
+ early-clobber %2:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM_ec %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s128))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x8_mixed_including_ec_opcodes
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_x8_mixed_including_ec_opcodes
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: early-clobber %7:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM_ec [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY %7.sub0_sub1_sub2_sub3
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed %7.sub4_sub5_sub6_sub7
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub2_sub3
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_x8_mixed_including_ec_opcodes
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256), align 16)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[COPY2]].sub0_sub1
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY killed [[COPY2]].sub2_sub3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[COPY3]].sub1
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ early-clobber %1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM_ec %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128))
+ %2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
+ early-clobber %3:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM_ec %0:sgpr_128, 24, 0 :: (dereferenceable invariant load (s64))
+ %4:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0:sgpr_128, 20, 0 :: (dereferenceable invariant load (s32))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_sgpr_imm_x2ec_x2ec
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+
+ ; GFX10-LABEL: name: merge_s_buffer_load_sgpr_imm_x2ec_x2ec
+ ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX10-NEXT: early-clobber %4:sgpr_128 = S_BUFFER_LOAD_DWORDX4_SGPR_IMM_ec [[COPY]], [[COPY1]], 0, 0 :: (dereferenceable invariant load (s128), align 8)
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY %4.sub0_sub1
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY killed %4.sub2_sub3
+ ; GFX10-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: merge_s_buffer_load_sgpr_imm_x2ec_x2ec
+ ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_SGPR_IMM [[COPY]], [[COPY1]], 0, 0 :: (dereferenceable invariant load (s128), align 8)
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM]].sub0_sub1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY killed [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM]].sub2_sub3
+ ; GFX12-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sreg_32 = COPY $sgpr4
+ early-clobber %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec %0:sgpr_128, %1:sreg_32, 0, 0 :: (dereferenceable invariant load (s64))
+ early-clobber %3:sgpr_64 = S_BUFFER_LOAD_DWORDX2_SGPR_IMM_ec %0:sgpr_128, %1:sreg_32, 8, 0 :: (dereferenceable invariant load (s64))
+
+ S_ENDPGM 0
+...
+
+# No constrained opcode required when the MEM operand has met the required alignment.
+
+---
+
+name: merge_s_buffer_load_x2_x2_no_constrained_opc_needed
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; CHECK-LABEL: name: merge_s_buffer_load_x2_x2_no_constrained_opc_needed
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s128))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY [[S_BUFFER_LOAD_DWORDX4_IMM]].sub0_sub1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY killed [[S_BUFFER_LOAD_DWORDX4_IMM]].sub2_sub3
+ ; CHECK-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s64), align 16)
+ %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_IMM %0:sgpr_128, 8, 0 :: (dereferenceable invariant load (s64))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_x4_x4_no_constrained_opc_needed
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+
+ ; CHECK-LABEL: name: merge_s_buffer_load_x4_x4_no_constrained_opc_needed
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s256))
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[S_BUFFER_LOAD_DWORDX8_IMM]].sub0_sub1_sub2_sub3
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY killed [[S_BUFFER_LOAD_DWORDX8_IMM]].sub4_sub5_sub6_sub7
+ ; CHECK-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 0, 0 :: (dereferenceable invariant load (s128), align 32)
+ %2:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM %0:sgpr_128, 16, 0 :: (dereferenceable invariant load (s128))
+
+ S_ENDPGM 0
+...
+---
+
+name: merge_s_buffer_load_sgpr_imm_x2ec_x2ec_no_constrained_opc_needed
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+
+ ; CHECK-LABEL: name: merge_s_buffer_load_sgpr_imm_x2ec_x2ec_no_constrained_opc_needed
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM:%[0-9]+]]:sgpr_128 = S_BUFFER_LOAD_DWORDX4_SGPR_IMM [[COPY]], [[COPY1]], 0, 0 :: (dereferenceable invariant load (s128))
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM]].sub0_sub1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY killed [[S_BUFFER_LOAD_DWORDX4_SGPR_IMM]].sub2_sub3
+ ; CHECK-NEXT: S_ENDPGM 0
+ %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ %1:sreg_32 = COPY $sgpr4
+ %2:sgpr_64 = S_BUFFER_LOAD_DWORDX2_SGPR_IMM %0:sgpr_128, %1:sreg_32, 0, 0 :: (dereferenceable invariant load (s64), align 16)
+ %3:sgpr_64 = S_BUFFER_LOAD_DWORDX2_SGPR_IMM %0:sgpr_128, %1:sreg_32, 8, 0 :: (dereferenceable invariant load (s64))
+
+ S_ENDPGM 0
+...
diff --git a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
index e86ee1ad..3a6b048 100644
--- a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
@@ -34,9 +34,8 @@ define amdgpu_kernel void @test_simple_indirect_call() {
; ATTRIBUTOR_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
; ATTRIBUTOR_GCN-SAME: () #[[ATTR1:[0-9]+]] {
; ATTRIBUTOR_GCN-NEXT: [[FPTR:%.*]] = alloca ptr, align 8, addrspace(5)
-; ATTRIBUTOR_GCN-NEXT: [[FPTR_CAST:%.*]] = addrspacecast ptr addrspace(5) [[FPTR]] to ptr
-; ATTRIBUTOR_GCN-NEXT: store ptr @indirect, ptr [[FPTR_CAST]], align 8
-; ATTRIBUTOR_GCN-NEXT: [[FP:%.*]] = load ptr, ptr [[FPTR_CAST]], align 8
+; ATTRIBUTOR_GCN-NEXT: store ptr @indirect, ptr addrspace(5) [[FPTR]], align 8
+; ATTRIBUTOR_GCN-NEXT: [[FP:%.*]] = load ptr, ptr addrspace(5) [[FPTR]], align 8
; ATTRIBUTOR_GCN-NEXT: call void [[FP]]()
; ATTRIBUTOR_GCN-NEXT: ret void
;
@@ -75,12 +74,16 @@ define amdgpu_kernel void @test_simple_indirect_call() {
ret void
}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-stack-objects" }
;.
; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }
;.
-
-!llvm.module.flags = !{!0}
-!0 = !{i32 1, !"amdhsa_code_object_version", i32 500}
+; AKF_GCN: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
+;.
+; ATTRIBUTOR_GCN: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
+;.
diff --git a/llvm/test/CodeGen/ARM/O3-pipeline.ll b/llvm/test/CodeGen/ARM/O3-pipeline.ll
index aa92c7a..e74e4f2 100644
--- a/llvm/test/CodeGen/ARM/O3-pipeline.ll
+++ b/llvm/test/CodeGen/ARM/O3-pipeline.ll
@@ -38,7 +38,6 @@
; CHECK-NEXT: Constant Hoisting
; CHECK-NEXT: Replace intrinsics with calls to vector library
; CHECK-NEXT: Partially inline calls to library functions
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/ARM/setjmp-bti-basic.ll b/llvm/test/CodeGen/ARM/setjmp-bti-basic.ll
index 3b01e3e..7fe7015 100644
--- a/llvm/test/CodeGen/ARM/setjmp-bti-basic.ll
+++ b/llvm/test/CodeGen/ARM/setjmp-bti-basic.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi < %s | FileCheck %s --check-prefix=BTI
; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+no-bti-at-return-twice < %s | \
; RUN: FileCheck %s --check-prefix=NOBTI
@@ -20,11 +21,43 @@
define i32 @foo(i32 %x) "branch-target-enforcement" {
; BTI-LABEL: foo:
-; BTI: bl setjmp
-; BTI-NEXT: bti
+; BTI: @ %bb.0: @ %entry
+; BTI-NEXT: bti
+; BTI-NEXT: .save {r4, lr}
+; BTI-NEXT: push {r4, lr}
+; BTI-NEXT: mov r4, r0
+; BTI-NEXT: movw r0, :lower16:buf
+; BTI-NEXT: movt r0, :upper16:buf
+; BTI-NEXT: bl setjmp
+; BTI-NEXT: bti
+; BTI-NEXT: cmp r0, #0
+; BTI-NEXT: itt ne
+; BTI-NEXT: movne r0, #0
+; BTI-NEXT: popne {r4, pc}
+; BTI-NEXT: .LBB0_1: @ %if.else
+; BTI-NEXT: mov r0, r4
+; BTI-NEXT: bl bar
+; BTI-NEXT: mov r0, r4
+; BTI-NEXT: pop {r4, pc}
+;
; NOBTI-LABEL: foo:
-; NOBTI: bl setjmp
-; NOBTI-NOT: bti
+; NOBTI: @ %bb.0: @ %entry
+; NOBTI-NEXT: bti
+; NOBTI-NEXT: .save {r4, lr}
+; NOBTI-NEXT: push {r4, lr}
+; NOBTI-NEXT: mov r4, r0
+; NOBTI-NEXT: movw r0, :lower16:buf
+; NOBTI-NEXT: movt r0, :upper16:buf
+; NOBTI-NEXT: bl setjmp
+; NOBTI-NEXT: cmp r0, #0
+; NOBTI-NEXT: itt ne
+; NOBTI-NEXT: movne r0, #0
+; NOBTI-NEXT: popne {r4, pc}
+; NOBTI-NEXT: .LBB0_1: @ %if.else
+; NOBTI-NEXT: mov r0, r4
+; NOBTI-NEXT: bl bar
+; NOBTI-NEXT: mov r0, r4
+; NOBTI-NEXT: pop {r4, pc}
entry:
%call = call i32 @setjmp(ptr @buf) #0
@@ -40,6 +73,41 @@ if.end: ; preds = %entry, %if.else
ret i32 %x.addr.0
}
+;; Check that the BL to setjmp correctly clobbers LR
+
+define i32 @baz() "branch-target-enforcement" {
+; BTI-LABEL: baz:
+; BTI: @ %bb.0: @ %entry
+; BTI-NEXT: bti
+; BTI-NEXT: .save {r7, lr}
+; BTI-NEXT: push {r7, lr}
+; BTI-NEXT: .pad #160
+; BTI-NEXT: sub sp, #160
+; BTI-NEXT: mov r0, sp
+; BTI-NEXT: bl setjmp
+; BTI-NEXT: bti
+; BTI-NEXT: movs r0, #0
+; BTI-NEXT: add sp, #160
+; BTI-NEXT: pop {r7, pc}
+;
+; NOBTI-LABEL: baz:
+; NOBTI: @ %bb.0: @ %entry
+; NOBTI-NEXT: bti
+; NOBTI-NEXT: .save {r7, lr}
+; NOBTI-NEXT: push {r7, lr}
+; NOBTI-NEXT: .pad #160
+; NOBTI-NEXT: sub sp, #160
+; NOBTI-NEXT: mov r0, sp
+; NOBTI-NEXT: bl setjmp
+; NOBTI-NEXT: movs r0, #0
+; NOBTI-NEXT: add sp, #160
+; NOBTI-NEXT: pop {r7, pc}
+entry:
+ %outgoing_jb = alloca [20 x i64], align 8
+ %call = call i32 @setjmp(ptr %outgoing_jb) returns_twice
+ ret i32 0
+}
+
declare void @bar(i32)
declare i32 @setjmp(ptr) #0
diff --git a/llvm/test/CodeGen/BPF/objdump_atomics.ll b/llvm/test/CodeGen/BPF/objdump_atomics.ll
index 3ec364f..c4cb16b 100644
--- a/llvm/test/CodeGen/BPF/objdump_atomics.ll
+++ b/llvm/test/CodeGen/BPF/objdump_atomics.ll
@@ -2,7 +2,7 @@
; CHECK-LABEL: test_load_add_32
; CHECK: c3 21
-; CHECK: r2 = atomic_fetch_add((u32 *)(r1 + 0), r2)
+; CHECK: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2)
define void @test_load_add_32(ptr %p, i32 zeroext %v) {
entry:
atomicrmw add ptr %p, i32 %v seq_cst
diff --git a/llvm/test/CodeGen/BPF/objdump_cond_op.ll b/llvm/test/CodeGen/BPF/objdump_cond_op.ll
index 3b2e6c1..c64a0f2 100644
--- a/llvm/test/CodeGen/BPF/objdump_cond_op.ll
+++ b/llvm/test/CodeGen/BPF/objdump_cond_op.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=bpfel -filetype=obj -o - %s | llvm-objdump --no-print-imm-hex -d - | FileCheck %s
+; RUN: llc -mtriple=bpfel -filetype=obj -o - %s | llvm-objdump --no-print-imm-hex --mcpu=v1 -d - | FileCheck %s
; Source Code:
; int gbl;
diff --git a/llvm/test/CodeGen/BPF/objdump_imm_hex.ll b/llvm/test/CodeGen/BPF/objdump_imm_hex.ll
index 1760bb6..38b93e8 100644
--- a/llvm/test/CodeGen/BPF/objdump_imm_hex.ll
+++ b/llvm/test/CodeGen/BPF/objdump_imm_hex.ll
@@ -53,8 +53,8 @@ define i32 @test(i64, i64) local_unnamed_addr #0 {
%14 = phi i32 [ %12, %10 ], [ %7, %4 ]
%15 = phi i32 [ 2, %10 ], [ 1, %4 ]
store i32 %14, ptr @gbl, align 4
-; CHECK-DEC: 63 12 00 00 00 00 00 00 *(u32 *)(r2 + 0) = r1
-; CHECK-HEX: 63 12 00 00 00 00 00 00 *(u32 *)(r2 + 0x0) = r1
+; CHECK-DEC: 63 12 00 00 00 00 00 00 *(u32 *)(r2 + 0) = w1
+; CHECK-HEX: 63 12 00 00 00 00 00 00 *(u32 *)(r2 + 0x0) = w1
br label %16
; <label>:16: ; preds = %13, %8
diff --git a/llvm/test/CodeGen/BPF/objdump_static_var.ll b/llvm/test/CodeGen/BPF/objdump_static_var.ll
index a91074e..b743d82 100644
--- a/llvm/test/CodeGen/BPF/objdump_static_var.ll
+++ b/llvm/test/CodeGen/BPF/objdump_static_var.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=bpfel -filetype=obj -o - %s | llvm-objdump --no-print-imm-hex -d - | FileCheck --check-prefix=CHECK %s
-; RUN: llc -mtriple=bpfeb -filetype=obj -o - %s | llvm-objdump --no-print-imm-hex -d - | FileCheck --check-prefix=CHECK %s
+; RUN: llc -mtriple=bpfel -filetype=obj -o - %s | llvm-objdump --no-print-imm-hex --mcpu=v1 -d - | FileCheck --check-prefix=CHECK %s
+; RUN: llc -mtriple=bpfeb -filetype=obj -o - %s | llvm-objdump --no-print-imm-hex --mcpu=v1 -d - | FileCheck --check-prefix=CHECK %s
; src:
; static volatile long a = 2;
diff --git a/llvm/test/CodeGen/LoongArch/O0-pipeline.ll b/llvm/test/CodeGen/LoongArch/O0-pipeline.ll
index 138f0c8..38c1dbc 100644
--- a/llvm/test/CodeGen/LoongArch/O0-pipeline.ll
+++ b/llvm/test/CodeGen/LoongArch/O0-pipeline.ll
@@ -26,7 +26,6 @@
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/LoongArch/opt-pipeline.ll b/llvm/test/CodeGen/LoongArch/opt-pipeline.ll
index c5c5342..391888a 100644
--- a/llvm/test/CodeGen/LoongArch/opt-pipeline.ll
+++ b/llvm/test/CodeGen/LoongArch/opt-pipeline.ll
@@ -61,7 +61,6 @@
; LAXX-NEXT: Constant Hoisting
; LAXX-NEXT: Replace intrinsics with calls to vector library
; LAXX-NEXT: Partially inline calls to library functions
-; LAXX-NEXT: Expand vector predication intrinsics
; LAXX-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; LAXX-NEXT: Scalarize Masked Memory Intrinsics
; LAXX-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/M68k/pipeline.ll b/llvm/test/CodeGen/M68k/pipeline.ll
index 0481d5c..6aa66d0 100644
--- a/llvm/test/CodeGen/M68k/pipeline.ll
+++ b/llvm/test/CodeGen/M68k/pipeline.ll
@@ -32,7 +32,6 @@
; CHECK-NEXT: Constant Hoisting
; CHECK-NEXT: Replace intrinsics with calls to vector library
; CHECK-NEXT: Partially inline calls to library functions
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/and-srl.ll b/llvm/test/CodeGen/Mips/llvm-ir/and-srl.ll
new file mode 100644
index 0000000..988a0f5
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm-ir/and-srl.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=MIPS4
+; RUN: llc < %s -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=MIPS64R2
+
+define i64 @foo(i64 noundef %a) {
+; MIPS4-LABEL: foo:
+; MIPS4: # %bb.0: # %entry
+; MIPS4-NEXT: sll $1, $4, 0
+; MIPS4-NEXT: srl $1, $1, 2
+; MIPS4-NEXT: andi $1, $1, 7
+; MIPS4-NEXT: daddiu $2, $zero, 1
+; MIPS4-NEXT: jr $ra
+; MIPS4-NEXT: dsllv $2, $2, $1
+;
+; MIPS64R2-LABEL: foo:
+; MIPS64R2: # %bb.0: # %entry
+; MIPS64R2-NEXT: sll $1, $4, 0
+; MIPS64R2-NEXT: ext $1, $1, 2, 3
+; MIPS64R2-NEXT: daddiu $2, $zero, 1
+; MIPS64R2-NEXT: jr $ra
+; MIPS64R2-NEXT: dsllv $2, $2, $1
+entry:
+ %div1 = lshr i64 %a, 2
+ %and = and i64 %div1, 7
+ %shl = shl nuw nsw i64 1, %and
+ ret i64 %shl
+}
diff --git a/llvm/test/CodeGen/NVPTX/fence-proxy-tensormap.ll b/llvm/test/CodeGen/NVPTX/fence-proxy-tensormap.ll
new file mode 100644
index 0000000..83a2ca4
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fence-proxy-tensormap.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx83 | FileCheck --check-prefixes=CHECK %s
+; RUN: %if ptxas-12.3 %{ llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx83 | %ptxas-verify -arch=sm_90 %}
+
+; CHECK-LABEL: test_fence_proxy_tensormap_generic_release
+define void @test_fence_proxy_tensormap_generic_release() {
+ ; CHECK: fence.proxy.tensormap::generic.release.cta;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.release.cta();
+
+ ; CHECK: fence.proxy.tensormap::generic.release.cluster;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.release.cluster();
+
+ ; CHECK: fence.proxy.tensormap::generic.release.gpu;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.release.gpu();
+
+ ; CHECK: fence.proxy.tensormap::generic.release.sys;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.release.sys();
+
+ ret void
+}
+
+; CHECK-LABEL: test_fence_proxy_tensormap_generic_acquire
+define void @test_fence_proxy_tensormap_generic_acquire(ptr addrspace(0) %addr) {
+ ; CHECK: fence.proxy.tensormap::generic.acquire.cta [%rd{{[0-9]+}}], 128;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.cta(ptr addrspace(0) %addr, i32 128);
+
+ ; CHECK: fence.proxy.tensormap::generic.acquire.cluster [%rd{{[0-9]+}}], 128;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.cluster(ptr addrspace(0) %addr, i32 128);
+
+ ; CHECK: fence.proxy.tensormap::generic.acquire.gpu [%rd{{[0-9]+}}], 128;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.gpu(ptr addrspace(0) %addr, i32 128);
+
+ ; CHECK: fence.proxy.tensormap::generic.acquire.sys [%rd{{[0-9]+}}], 128;
+ call void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.sys(ptr addrspace(0) %addr, i32 128);
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll b/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll
index 68915b0..9cea33d 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll
@@ -1,169 +1,7 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx82 | FileCheck %s
; RUN: %if ptxas-12.2 %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx82 | %ptxas-verify -arch=sm_70 %}
-; CHECK-LABEL: generic_plain
-define void @generic_plain(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr {
- ; CHECK: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load i8, ptr %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i8 %a.add, ptr %a
-
- ; CHECK: ld.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load i16, ptr %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i16 %b.add, ptr %b
-
- ; CHECK: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load i32, ptr %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store i32 %c.add, ptr %c
-
- ; CHECK: ld.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load i64, ptr %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store i64 %d.add, ptr %d
-
- ; CHECK: ld.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load float, ptr %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store float %e.add, ptr %c
-
- ; CHECK: ld.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load double, ptr %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store double %f.add, ptr %c
-
- ret void
-}
-
-; CHECK-LABEL: generic_volatile
-define void @generic_volatile(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr {
- ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load volatile i8, ptr %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i8 %a.add, ptr %a
-
- ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load volatile i16, ptr %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i16 %b.add, ptr %b
-
- ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load volatile i32, ptr %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store volatile i32 %c.add, ptr %c
-
- ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load volatile i64, ptr %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store volatile i64 %d.add, ptr %d
-
- ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load volatile float, ptr %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store volatile float %e.add, ptr %c
-
- ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load volatile double, ptr %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store volatile double %f.add, ptr %c
-
- ret void
-}
-
-; CHECK-LABEL: generic_unordered
-define void @generic_unordered(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
- ; CHECK: ld.relaxed.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr %a unordered, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.relaxed.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr %a unordered, align 1
-
- ; CHECK: ld.relaxed.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr %b unordered, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.relaxed.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr %b unordered, align 2
-
- ; CHECK: ld.relaxed.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr %c unordered, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.relaxed.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr %c unordered, align 4
-
- ; CHECK: ld.relaxed.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr %d unordered, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.relaxed.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr %d unordered, align 8
-
- ; CHECK: ld.relaxed.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.relaxed.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr %e unordered, align 4
-
- ; CHECK: ld.relaxed.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr %e unordered, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.relaxed.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr %e unordered, align 8
-
- ret void
-}
-
-; CHECK-LABEL: generic_monotonic
-define void @generic_monotonic(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
- ; CHECK: ld.relaxed.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr %a monotonic, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.relaxed.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr %a monotonic, align 1
-
- ; CHECK: ld.relaxed.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr %b monotonic, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.relaxed.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr %b monotonic, align 2
-
- ; CHECK: ld.relaxed.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr %c monotonic, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.relaxed.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr %c monotonic, align 4
-
- ; CHECK: ld.relaxed.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr %d monotonic, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.relaxed.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr %d monotonic, align 8
-
- ; CHECK: ld.relaxed.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr %e monotonic, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.relaxed.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr %e monotonic, align 4
-
- ; CHECK: ld.relaxed.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr %e monotonic, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.relaxed.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr %e monotonic, align 8
-
- ret void
-}
+;; generic statespace
; CHECK-LABEL: generic_acq_rel
define void @generic_acq_rel(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
@@ -206,335 +44,154 @@ define void @generic_acq_rel(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnam
ret void
}
-; CHECK-LABEL: generic_unordered_volatile
-define void @generic_unordered_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
- ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr %a unordered, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr %a unordered, align 1
-
- ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr %b unordered, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr %b unordered, align 2
-
- ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr %c unordered, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr %c unordered, align 4
-
- ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr %d unordered, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr %d unordered, align 8
-
- ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr %e unordered, align 4
-
- ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr %e unordered, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr %e unordered, align 8
-
- ret void
-}
-
-; CHECK-LABEL: generic_monotonic_volatile
-define void @generic_monotonic_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
- ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr %a monotonic, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr %a monotonic, align 1
-
- ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr %b monotonic, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr %b monotonic, align 2
-
- ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr %c monotonic, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr %c monotonic, align 4
-
- ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr %d monotonic, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr %d monotonic, align 8
-
- ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr %e monotonic, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr %e monotonic, align 4
-
- ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr %e monotonic, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr %e monotonic, align 8
-
- ret void
-}
-
-;; global statespace
-
-; CHECK-LABEL: global_plain
-define void @global_plain(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d) local_unnamed_addr {
- ; CHECK: ld.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load i8, ptr addrspace(1) %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i8 %a.add, ptr addrspace(1) %a
-
- ; CHECK: ld.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load i16, ptr addrspace(1) %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i16 %b.add, ptr addrspace(1) %b
-
- ; CHECK: ld.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load i32, ptr addrspace(1) %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store i32 %c.add, ptr addrspace(1) %c
-
- ; CHECK: ld.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load i64, ptr addrspace(1) %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store i64 %d.add, ptr addrspace(1) %d
-
- ; CHECK: ld.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load float, ptr addrspace(1) %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store float %e.add, ptr addrspace(1) %c
-
- ; CHECK: ld.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load double, ptr addrspace(1) %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store double %f.add, ptr addrspace(1) %c
-
- ret void
-}
-
-; CHECK-LABEL: global_volatile
-define void @global_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d) local_unnamed_addr {
- ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load volatile i8, ptr addrspace(1) %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i8 %a.add, ptr addrspace(1) %a
-
- ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load volatile i16, ptr addrspace(1) %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i16 %b.add, ptr addrspace(1) %b
-
- ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load volatile i32, ptr addrspace(1) %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store volatile i32 %c.add, ptr addrspace(1) %c
-
- ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load volatile i64, ptr addrspace(1) %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store volatile i64 %d.add, ptr addrspace(1) %d
-
- ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load volatile float, ptr addrspace(1) %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store volatile float %e.add, ptr addrspace(1) %c
-
- ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load volatile double, ptr addrspace(1) %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store volatile double %f.add, ptr addrspace(1) %c
-
- ret void
-}
-
-; CHECK-LABEL: global_unordered
-define void @global_unordered(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(1) %a unordered, align 1
+; CHECK-LABEL: generic_acq_rel_volatile
+define void @generic_acq_rel_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a acquire, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(1) %a unordered, align 1
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a release, align 1
- ; CHECK: ld.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(1) %b unordered, align 2
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b acquire, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(1) %b unordered, align 2
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b release, align 2
- ; CHECK: ld.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(1) %c unordered, align 4
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c acquire, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(1) %c unordered, align 4
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c release, align 4
- ; CHECK: ld.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(1) %d unordered, align 8
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d acquire, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(1) %d unordered, align 8
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d release, align 8
- ; CHECK: ld.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(1) %e unordered, align 4
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e acquire, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(1) %e unordered, align 4
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e release, align 4
- ; CHECK: ld.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(1) %e unordered, align 8
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e acquire, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(1) %e unordered, align 8
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e release, align 8
ret void
}
-; CHECK-LABEL: global_monotonic
-define void @global_monotonic(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(1) %a monotonic, align 1
+; CHECK-LABEL: generic_sc
+define void @generic_sc(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a seq_cst, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(1) %a monotonic, align 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a seq_cst, align 1
- ; CHECK: ld.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(1) %b monotonic, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b seq_cst, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(1) %b monotonic, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b seq_cst, align 2
- ; CHECK: ld.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(1) %c monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c seq_cst, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(1) %c monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c seq_cst, align 4
- ; CHECK: ld.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(1) %d monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d seq_cst, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(1) %d monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d seq_cst, align 8
- ; CHECK: ld.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(1) %e monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(1) %e monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e seq_cst, align 4
- ; CHECK: ld.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(1) %e monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(1) %e monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: global_unordered_volatile
-define void @global_unordered_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(1) %a unordered, align 1
+; CHECK-LABEL: generic_sc_volatile
+define void @generic_sc_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a seq_cst, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(1) %a unordered, align 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a seq_cst, align 1
- ; CHECK: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(1) %b unordered, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b seq_cst, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(1) %b unordered, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b seq_cst, align 2
- ; CHECK: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(1) %c unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c seq_cst, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(1) %c unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c seq_cst, align 4
- ; CHECK: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(1) %d unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d seq_cst, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(1) %d unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d seq_cst, align 8
- ; CHECK: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(1) %e unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(1) %e unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e seq_cst, align 4
- ; CHECK: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(1) %e unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(1) %e unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: global_monotonic_volatile
-define void @global_monotonic_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(1) %a monotonic, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(1) %a monotonic, align 1
-
- ; CHECK: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(1) %b monotonic, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(1) %b monotonic, align 2
-
- ; CHECK: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(1) %c monotonic, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(1) %c monotonic, align 4
-
- ; CHECK: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(1) %d monotonic, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(1) %d monotonic, align 8
-
- ; CHECK: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(1) %e monotonic, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(1) %e monotonic, align 4
-
- ; CHECK: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(1) %e monotonic, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(1) %e monotonic, align 8
-
- ret void
-}
+;; global statespace
; CHECK-LABEL: global_acq_rel
define void @global_acq_rel(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
@@ -618,253 +275,113 @@ define void @global_acq_rel_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, p
ret void
}
-;; shared statespace
-
-; CHECK-LABEL: shared_plain
-define void @shared_plain(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d) local_unnamed_addr {
- ; CHECK: ld.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load i8, ptr addrspace(3) %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i8 %a.add, ptr addrspace(3) %a
-
- ; CHECK: ld.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load i16, ptr addrspace(3) %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i16 %b.add, ptr addrspace(3) %b
-
- ; CHECK: ld.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load i32, ptr addrspace(3) %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store i32 %c.add, ptr addrspace(3) %c
-
- ; CHECK: ld.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load i64, ptr addrspace(3) %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store i64 %d.add, ptr addrspace(3) %d
-
- ; CHECK: ld.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load float, ptr addrspace(3) %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store float %e.add, ptr addrspace(3) %c
-
- ; CHECK: ld.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load double, ptr addrspace(3) %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store double %f.add, ptr addrspace(3) %c
-
- ret void
-}
-
-; CHECK-LABEL: shared_volatile
-define void @shared_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d) local_unnamed_addr {
- ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load volatile i8, ptr addrspace(3) %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i8 %a.add, ptr addrspace(3) %a
-
- ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load volatile i16, ptr addrspace(3) %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i16 %b.add, ptr addrspace(3) %b
-
- ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load volatile i32, ptr addrspace(3) %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store volatile i32 %c.add, ptr addrspace(3) %c
-
- ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load volatile i64, ptr addrspace(3) %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store volatile i64 %d.add, ptr addrspace(3) %d
-
- ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load volatile float, ptr addrspace(3) %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store volatile float %e.add, ptr addrspace(3) %c
-
- ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load volatile double, ptr addrspace(3) %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store volatile double %f.add, ptr addrspace(3) %c
-
- ret void
-}
-
-; CHECK-LABEL: shared_unordered
-define void @shared_unordered(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; CHECK: ld.relaxed.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(3) %a unordered, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.relaxed.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(3) %a unordered, align 1
-
- ; CHECK: ld.relaxed.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(3) %b unordered, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.relaxed.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(3) %b unordered, align 2
-
- ; CHECK: ld.relaxed.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(3) %c unordered, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.relaxed.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(3) %c unordered, align 4
-
- ; CHECK: ld.relaxed.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(3) %d unordered, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.relaxed.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(3) %d unordered, align 8
-
- ; CHECK: ld.relaxed.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(3) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.relaxed.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(3) %e unordered, align 4
-
- ; CHECK: ld.relaxed.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(3) %e unordered, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.relaxed.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(3) %e unordered, align 8
-
- ret void
-}
-
-; CHECK-LABEL: shared_unordered_volatile
-define void @shared_unordered_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(3) %a unordered, align 1
+; CHECK-LABEL: global_seq_cst
+define void @global_seq_cst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(3) %a unordered, align 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a seq_cst, align 1
- ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(3) %b unordered, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b seq_cst, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(3) %b unordered, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b seq_cst, align 2
- ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(3) %c unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c seq_cst, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(3) %c unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c seq_cst, align 4
- ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(3) %d unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d seq_cst, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(3) %d unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d seq_cst, align 8
- ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(3) %e unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(3) %e unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e seq_cst, align 4
- ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(3) %e unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(3) %e unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: shared_monotonic
-define void @shared_monotonic(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; CHECK: ld.relaxed.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(3) %a monotonic, align 1
+; CHECK-LABEL: global_seq_cst_volatile
+define void @global_seq_cst_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.relaxed.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(3) %a monotonic, align 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a seq_cst, align 1
- ; CHECK: ld.relaxed.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(3) %b monotonic, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b seq_cst, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.relaxed.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(3) %b monotonic, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b seq_cst, align 2
- ; CHECK: ld.relaxed.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(3) %c monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c seq_cst, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.relaxed.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(3) %c monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c seq_cst, align 4
- ; CHECK: ld.relaxed.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(3) %d monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d seq_cst, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.relaxed.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(3) %d monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d seq_cst, align 8
- ; CHECK: ld.relaxed.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(3) %e monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.relaxed.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(3) %e monotonic, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e seq_cst, align 4
- ; CHECK: ld.relaxed.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(3) %e monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.relaxed.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(3) %e monotonic, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: shared_monotonic_volatile
-define void @shared_monotonic_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(3) %a monotonic, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(3) %a monotonic, align 1
-
- ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(3) %b monotonic, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(3) %b monotonic, align 2
-
- ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(3) %c monotonic, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(3) %c monotonic, align 4
-
- ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(3) %d monotonic, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(3) %d monotonic, align 8
-
- ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(3) %e monotonic, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(3) %e monotonic, align 4
-
- ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(3) %e monotonic, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(3) %e monotonic, align 8
-
- ret void
-}
+;; shared statespace
; CHECK-LABEL: shared_acq_rel
define void @shared_acq_rel(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
@@ -948,332 +465,291 @@ define void @shared_acq_rel_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, p
ret void
}
-;; local statespace
-
-; CHECK-LABEL: local_plain
-define void @local_plain(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
- ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load i8, ptr addrspace(5) %a
- %a.add = add i8 %a.load, 1
- ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i8 %a.add, ptr addrspace(5) %a
-
- ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load i16, ptr addrspace(5) %b
- %b.add = add i16 %b.load, 1
- ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store i16 %b.add, ptr addrspace(5) %b
-
- ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load i32, ptr addrspace(5) %c
- %c.add = add i32 %c.load, 1
- ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store i32 %c.add, ptr addrspace(5) %c
-
- ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load i64, ptr addrspace(5) %d
- %d.add = add i64 %d.load, 1
- ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store i64 %d.add, ptr addrspace(5) %d
-
- ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load float, ptr addrspace(5) %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store float %e.add, ptr addrspace(5) %c
-
- ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load double, ptr addrspace(5) %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store double %f.add, ptr addrspace(5) %c
-
- ret void
-}
-
-; CHECK-LABEL: local_volatile
-define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
- ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load volatile i8, ptr addrspace(5) %a
+; CHECK-LABEL: shared_seq_cst
+define void @shared_seq_cst(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i8 %a.add, ptr addrspace(5) %a
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a seq_cst, align 1
- ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load volatile i16, ptr addrspace(5) %b
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b seq_cst, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store volatile i16 %b.add, ptr addrspace(5) %b
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b seq_cst, align 2
- ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load volatile i32, ptr addrspace(5) %c
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c seq_cst, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store volatile i32 %c.add, ptr addrspace(5) %c
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c seq_cst, align 4
- ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load volatile i64, ptr addrspace(5) %d
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d seq_cst, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store volatile i64 %d.add, ptr addrspace(5) %d
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d seq_cst, align 8
- ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load volatile float, ptr addrspace(5) %c
- %e.add = fadd float %e.load, 1.
- ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store volatile float %e.add, ptr addrspace(5) %c
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e seq_cst, align 4
+ %e.add = fadd float %e.load, 1.0
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e seq_cst, align 4
- ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load volatile double, ptr addrspace(5) %c
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store volatile double %f.add, ptr addrspace(5) %c
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: local_unordered
-define void @local_unordered(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(5) %a unordered, align 1
+; CHECK-LABEL: shared_seq_cst_volatile
+define void @shared_seq_cst_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(5) %a unordered, align 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a seq_cst, align 1
- ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(5) %b unordered, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b seq_cst, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(5) %b unordered, align 2
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b seq_cst, align 2
- ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(5) %c unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c seq_cst, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(5) %c unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c seq_cst, align 4
- ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(5) %d unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d seq_cst, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(5) %d unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d seq_cst, align 8
- ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(5) %e unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(5) %e unordered, align 4
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e seq_cst, align 4
- ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(5) %e unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(5) %e unordered, align 8
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: local_unordered_volatile
-define void @local_unordered_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(5) %a unordered, align 1
- %a.add = add i8 %a.load, 1
- ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(5) %a unordered, align 1
-
- ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(5) %b unordered, align 2
- %b.add = add i16 %b.load, 1
- ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(5) %b unordered, align 2
-
- ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(5) %c unordered, align 4
- %c.add = add i32 %c.load, 1
- ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(5) %c unordered, align 4
-
- ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(5) %d unordered, align 8
- %d.add = add i64 %d.load, 1
- ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(5) %d unordered, align 8
-
- ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(5) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
- ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(5) %e unordered, align 4
-
- ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(5) %e unordered, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(5) %e unordered, align 8
+;; local statespace
- ret void
-}
+; CHECK-LABEL: local_acq_rel
+define void @local_acq_rel(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using PTX atomic operations.
-; CHECK-LABEL: local_monotonic
-define void @local_monotonic(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(5) %a monotonic, align 1
+ %a.load = load atomic i8, ptr addrspace(5) %a acquire, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(5) %a monotonic, align 1
+ store atomic i8 %a.add, ptr addrspace(5) %a release, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(5) %b monotonic, align 2
+ %b.load = load atomic i16, ptr addrspace(5) %b acquire, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(5) %b monotonic, align 2
+ store atomic i16 %b.add, ptr addrspace(5) %b release, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(5) %c monotonic, align 4
+ %c.load = load atomic i32, ptr addrspace(5) %c acquire, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(5) %c monotonic, align 4
+ store atomic i32 %c.add, ptr addrspace(5) %c release, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(5) %d monotonic, align 8
+ %d.load = load atomic i64, ptr addrspace(5) %d acquire, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(5) %d monotonic, align 8
+ store atomic i64 %d.add, ptr addrspace(5) %d release, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(5) %e monotonic, align 4
+ %e.load = load atomic float, ptr addrspace(5) %e acquire, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(5) %e monotonic, align 4
+ store atomic float %e.add, ptr addrspace(5) %e release, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(5) %e monotonic, align 8
+ %f.load = load atomic double, ptr addrspace(5) %e acquire, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(5) %e monotonic, align 8
+ store atomic double %f.add, ptr addrspace(5) %e release, align 8
ret void
}
-; CHECK-LABEL: local_monotonic_volatile
-define void @local_monotonic_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+; CHECK-LABEL: local_acq_rel_volatile
+define void @local_acq_rel_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using PTX atomic operations.
+
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(5) %a monotonic, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a acquire, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(5) %a monotonic, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a release, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(5) %b monotonic, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b acquire, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(5) %b monotonic, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b release, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(5) %c monotonic, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c acquire, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(5) %c monotonic, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c release, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(5) %d monotonic, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d acquire, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(5) %d monotonic, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d release, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(5) %e monotonic, align 4
+ %e.load = load atomic volatile float, ptr addrspace(5) %e acquire, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(5) %e monotonic, align 4
+ store atomic volatile float %e.add, ptr addrspace(5) %e release, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(5) %e monotonic, align 8
+ %f.load = load atomic volatile double, ptr addrspace(5) %e acquire, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(5) %e monotonic, align 8
+ store atomic volatile double %f.add, ptr addrspace(5) %e release, align 8
ret void
}
-; CHECK-LABEL: local_acq_rel
-define void @local_acq_rel(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+; CHECK-LABEL: local_seq_cst
+define void @local_seq_cst(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using PTX atomic operations.
+
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(5) %a acquire, align 1
+ %a.load = load atomic i8, ptr addrspace(5) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(5) %a release, align 1
+ store atomic i8 %a.add, ptr addrspace(5) %a seq_cst, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(5) %b acquire, align 2
+ %b.load = load atomic i16, ptr addrspace(5) %b seq_cst, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(5) %b release, align 2
+ store atomic i16 %b.add, ptr addrspace(5) %b seq_cst, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(5) %c acquire, align 4
+ %c.load = load atomic i32, ptr addrspace(5) %c seq_cst, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(5) %c release, align 4
+ store atomic i32 %c.add, ptr addrspace(5) %c seq_cst, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(5) %d acquire, align 8
+ %d.load = load atomic i64, ptr addrspace(5) %d seq_cst, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(5) %d release, align 8
+ store atomic i64 %d.add, ptr addrspace(5) %d seq_cst, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(5) %e acquire, align 4
+ %e.load = load atomic float, ptr addrspace(5) %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(5) %e release, align 4
+ store atomic float %e.add, ptr addrspace(5) %e seq_cst, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(5) %e acquire, align 8
+ %f.load = load atomic double, ptr addrspace(5) %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(5) %e release, align 8
+ store atomic double %f.add, ptr addrspace(5) %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: local_acq_rel_volatile
-define void @local_acq_rel_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+; CHECK-LABEL: local_seq_cst_volatile
+define void @local_seq_cst_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using PTX atomic operations.
+
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(5) %a acquire, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(5) %a release, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a seq_cst, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(5) %b acquire, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b seq_cst, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(5) %b release, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b seq_cst, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(5) %c acquire, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c seq_cst, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(5) %c release, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c seq_cst, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(5) %d acquire, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d seq_cst, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(5) %d release, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d seq_cst, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(5) %e acquire, align 4
+ %e.load = load atomic volatile float, ptr addrspace(5) %e seq_cst, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(5) %e release, align 4
+ store atomic volatile float %e.add, ptr addrspace(5) %e seq_cst, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(5) %e acquire, align 8
+ %f.load = load atomic volatile double, ptr addrspace(5) %e seq_cst, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(5) %e release, align 8
+ store atomic volatile double %f.add, ptr addrspace(5) %e seq_cst, align 8
+
+ ; TODO: LLVM IR Verifier does not support atomics on vector types.
ret void
}
+
+; TODO: add plain,atomic,volatile,atomic volatile tests
+; for .const and .param statespaces \ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/load-store.ll b/llvm/test/CodeGen/NVPTX/load-store.ll
index 4c5e092..aac73f7 100644
--- a/llvm/test/CodeGen/NVPTX/load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store.ll
@@ -1,5 +1,13 @@
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck -check-prefixes=CHECK,SM60 %s
; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx82 | FileCheck %s -check-prefixes=CHECK,SM70
+; RUN: %if ptxas-12.2 %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx82 | %ptxas-verify -arch=sm_70 %}
+
+; TODO: add i1, <8 x i8>, and <6 x i8> vector tests.
+
+; TODO: add test for vectors that exceed 128-bit length
+; Per https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#vectors
+; vectors cannot exceed 128-bit in length, i.e., .v4.u64 is not allowed.
; generic statespace
@@ -36,10 +44,76 @@ define void @generic_plain(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr {
store float %e.add, ptr %c
; CHECK: ld.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load double, ptr %c
+ %f.load = load double, ptr %d
%f.add = fadd double %f.load, 1.
; CHECK: st.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store double %f.add, ptr %c
+ store double %f.add, ptr %d
+
+ ; TODO: make the lowering of this weak vector ops consistent with
+ ; the ones of the next tests. This test lowers to a weak PTX
+ ; vector op, but next test lowers to a vector PTX op.
+ ; CHECK: ld.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load <2 x i8>, ptr %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <2 x i8> %h.add, ptr %b
+
+ ; TODO: make the lowering of this weak vector ops consistent with
+ ; the ones of the previous test. This test lowers to a weak
+ ; PTX scalar op, but prior test lowers to a vector PTX op.
+ ; CHECK: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load <4 x i8>, ptr %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <4 x i8> %i.add, ptr %c
+
+ ; CHECK: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load <2 x i16>, ptr %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <2 x i16> %j.add, ptr %c
+
+ ; CHECK: ld.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load <4 x i16>, ptr %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <4 x i16> %k.add, ptr %d
+
+ ; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load <2 x i32>, ptr %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <2 x i32> %l.add, ptr %d
+
+ ; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load <4 x i32>, ptr %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <4 x i32> %m.add, ptr %d
+
+ ; CHECK: ld.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load <2 x i64>, ptr %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store <2 x i64> %n.add, ptr %d
+
+ ; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load <2 x float>, ptr %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <2 x float> %o.add, ptr %d
+
+ ; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load <4 x float>, ptr %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <4 x float> %p.add, ptr %d
+
+ ; CHECK: ld.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load <2 x double>, ptr %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store <2 x double> %q.add, ptr %d
ret void
}
@@ -82,45 +156,136 @@ define void @generic_volatile(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr
; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store volatile double %f.add, ptr %c
+ ; TODO: volatile, atomic, and volatile atomic memory operations on vector types.
+ ; Currently, LLVM:
+ ; - does not allow atomic operations on vectors.
+ ; - it allows volatile operations but not clear what that means.
+ ; Following both semantics make sense in general and PTX supports both:
+ ; - volatile/atomic/volatile atomic applies to the whole vector
+ ; - volatile/atomic/volatile atomic applies elementwise
+ ; Actions required:
+ ; - clarify LLVM semantics for volatile on vectors and align the NVPTX backend with those
+ ; Below tests show that the current implementation picks the semantics in an inconsistent way
+ ; * volatile <2 x i8> lowers to "elementwise volatile"
+ ; * <4 x i8> lowers to "full vector volatile"
+ ; - provide support for vector atomics, e.g., by extending LLVM IR or via intrinsics
+ ; - update tests in load-store-sm70.ll as well.
+
+ ; TODO: make this operation consistent with the one for <4 x i8>
+ ; This operation lowers to a "element wise volatile PTX operation".
+ ; CHECK: ld.volatile.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load volatile <2 x i8>, ptr %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.volatile.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile <2 x i8> %h.add, ptr %b
+
+ ; TODO: make this operation consistent with the one for <2 x i8>
+ ; This operation lowers to a "full vector volatile PTX operation".
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load volatile <4 x i8>, ptr %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile <4 x i8> %i.add, ptr %c
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load volatile <2 x i16>, ptr %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile <2 x i16> %j.add, ptr %c
+
+ ; CHECK: ld.volatile.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load volatile <4 x i16>, ptr %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.volatile.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile <4 x i16> %k.add, ptr %d
+
+ ; CHECK: ld.volatile.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load volatile <2 x i32>, ptr %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.volatile.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile <2 x i32> %l.add, ptr %d
+
+ ; CHECK: ld.volatile.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load volatile <4 x i32>, ptr %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.volatile.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile <4 x i32> %m.add, ptr %d
+
+ ; CHECK: ld.volatile.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load volatile <2 x i64>, ptr %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.volatile.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store volatile <2 x i64> %n.add, ptr %d
+
+ ; CHECK: ld.volatile.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load volatile <2 x float>, ptr %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.volatile.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile <2 x float> %o.add, ptr %d
+
+ ; CHECK: ld.volatile.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load volatile <4 x float>, ptr %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.volatile.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile <4 x float> %p.add, ptr %d
+
+ ; CHECK: ld.volatile.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load volatile <2 x double>, ptr %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.volatile.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store volatile <2 x double> %q.add, ptr %d
+
ret void
}
; CHECK-LABEL: generic_monotonic
define void @generic_monotonic(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
- ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr %a monotonic, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i8 %a.add, ptr %a monotonic, align 1
- ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic i16, ptr %b monotonic, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i16 %b.add, ptr %b monotonic, align 2
- ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic i32, ptr %c monotonic, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic i32 %c.add, ptr %c monotonic, align 4
- ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic i64, ptr %d monotonic, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic i64 %d.add, ptr %d monotonic, align 8
- ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr %e monotonic, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.relaxed.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr %e monotonic, align 4
- ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr %e monotonic, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr %e monotonic, align 8
ret void
@@ -169,40 +334,52 @@ define void @generic_monotonic_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e)
; CHECK-LABEL: generic_unordered
define void @generic_unordered(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
- ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr %a unordered, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i8 %a.add, ptr %a unordered, align 1
- ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic i16, ptr %b unordered, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i16 %b.add, ptr %b unordered, align 2
- ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic i32, ptr %c unordered, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic i32 %c.add, ptr %c unordered, align 4
- ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic i64, ptr %d unordered, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic i64 %d.add, ptr %d unordered, align 8
- ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr %e unordered, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.relaxed.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr %e unordered, align 4
- ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr %e unordered, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr %e unordered, align 8
ret void
@@ -289,6 +466,66 @@ define void @global_plain(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspac
; CHECK: st.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store double %f.add, ptr addrspace(1) %c
+ ; CHECK: ld.global.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load <2 x i8>, ptr addrspace(1) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.global.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <2 x i8> %h.add, ptr addrspace(1) %b
+
+ ; CHECK: ld.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load <4 x i8>, ptr addrspace(1) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <4 x i8> %i.add, ptr addrspace(1) %c
+
+ ; CHECK: ld.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load <2 x i16>, ptr addrspace(1) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <2 x i16> %j.add, ptr addrspace(1) %c
+
+ ; CHECK: ld.global.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load <4 x i16>, ptr addrspace(1) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.global.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <4 x i16> %k.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.global.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load <2 x i32>, ptr addrspace(1) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.global.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <2 x i32> %l.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.global.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load <4 x i32>, ptr addrspace(1) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.global.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <4 x i32> %m.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.global.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load <2 x i64>, ptr addrspace(1) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.global.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store <2 x i64> %n.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.global.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load <2 x float>, ptr addrspace(1) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.global.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <2 x float> %o.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.global.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load <4 x float>, ptr addrspace(1) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.global.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <4 x float> %p.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.global.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load <2 x double>, ptr addrspace(1) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.global.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store <2 x double> %q.add, ptr addrspace(1) %d
+
ret void
}
@@ -330,45 +567,117 @@ define void @global_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrs
; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store volatile double %f.add, ptr addrspace(1) %c
+ ; CHECK: ld.volatile.global.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load volatile <2 x i8>, ptr addrspace(1) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.volatile.global.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile<2 x i8> %h.add, ptr addrspace(1) %b
+
+ ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load volatile <4 x i8>, ptr addrspace(1) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile<4 x i8> %i.add, ptr addrspace(1) %c
+
+ ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load volatile <2 x i16>, ptr addrspace(1) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile<2 x i16> %j.add, ptr addrspace(1) %c
+
+ ; CHECK: ld.volatile.global.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load volatile <4 x i16>, ptr addrspace(1) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.volatile.global.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile<4 x i16> %k.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.volatile.global.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load volatile <2 x i32>, ptr addrspace(1) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.volatile.global.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile<2 x i32> %l.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.volatile.global.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load volatile <4 x i32>, ptr addrspace(1) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.volatile.global.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile<4 x i32> %m.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.volatile.global.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load volatile <2 x i64>, ptr addrspace(1) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.volatile.global.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store volatile<2 x i64> %n.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.volatile.global.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load volatile <2 x float>, ptr addrspace(1) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.volatile.global.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile<2 x float> %o.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.volatile.global.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load volatile <4 x float>, ptr addrspace(1) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.volatile.global.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile<4 x float> %p.add, ptr addrspace(1) %d
+
+ ; CHECK: ld.volatile.global.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load volatile <2 x double>, ptr addrspace(1) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.volatile.global.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store volatile<2 x double> %q.add, ptr addrspace(1) %d
+
ret void
}
; CHECK-LABEL: global_monotonic
define void @global_monotonic(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(1) %a monotonic, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i8 %a.add, ptr addrspace(1) %a monotonic, align 1
- ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic i16, ptr addrspace(1) %b monotonic, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i16 %b.add, ptr addrspace(1) %b monotonic, align 2
- ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic i32, ptr addrspace(1) %c monotonic, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic i32 %c.add, ptr addrspace(1) %c monotonic, align 4
- ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic i64, ptr addrspace(1) %d monotonic, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic i64 %d.add, ptr addrspace(1) %d monotonic, align 8
- ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(1) %e monotonic, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(1) %e monotonic, align 4
- ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr addrspace(1) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr addrspace(1) %e monotonic, align 8
ret void
@@ -376,40 +685,52 @@ define void @global_monotonic(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addr
; CHECK-LABEL: global_monotonic_volatile
define void @global_monotonic_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(1) %a monotonic, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic volatile i8 %a.add, ptr addrspace(1) %a monotonic, align 1
- ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic volatile i16, ptr addrspace(1) %b monotonic, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic volatile i16 %b.add, ptr addrspace(1) %b monotonic, align 2
- ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic volatile i32, ptr addrspace(1) %c monotonic, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic volatile i32 %c.add, ptr addrspace(1) %c monotonic, align 4
- ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic volatile i64, ptr addrspace(1) %d monotonic, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic volatile i64 %d.add, ptr addrspace(1) %d monotonic, align 8
- ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(1) %e monotonic, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(1) %e monotonic, align 4
- ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic volatile double, ptr addrspace(1) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic volatile double %f.add, ptr addrspace(1) %e monotonic, align 8
ret void
@@ -417,40 +738,52 @@ define void @global_monotonic_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b,
; CHECK-LABEL: global_unordered
define void @global_unordered(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(1) %a unordered, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i8 %a.add, ptr addrspace(1) %a unordered, align 1
- ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic i16, ptr addrspace(1) %b unordered, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i16 %b.add, ptr addrspace(1) %b unordered, align 2
- ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic i32, ptr addrspace(1) %c unordered, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic i32 %c.add, ptr addrspace(1) %c unordered, align 4
- ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic i64, ptr addrspace(1) %d unordered, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic i64 %d.add, ptr addrspace(1) %d unordered, align 8
- ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(1) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(1) %e unordered, align 4
- ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr addrspace(1) %e unordered, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr addrspace(1) %e unordered, align 8
ret void
@@ -458,40 +791,52 @@ define void @global_unordered(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addr
; CHECK-LABEL: global_unordered_volatile
define void @global_unordered_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(1) %a unordered, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic volatile i8 %a.add, ptr addrspace(1) %a unordered, align 1
- ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic volatile i16, ptr addrspace(1) %b unordered, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic volatile i16 %b.add, ptr addrspace(1) %b unordered, align 2
- ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic volatile i32, ptr addrspace(1) %c unordered, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic volatile i32 %c.add, ptr addrspace(1) %c unordered, align 4
- ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic volatile i64, ptr addrspace(1) %d unordered, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic volatile i64 %d.add, ptr addrspace(1) %d unordered, align 8
- ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(1) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(1) %e unordered, align 4
- ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic volatile double, ptr addrspace(1) %e unordered, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic volatile double %f.add, ptr addrspace(1) %e unordered, align 8
ret void
@@ -537,6 +882,66 @@ define void @shared_plain(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspac
; CHECK: st.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store double %f.add, ptr addrspace(3) %c
+ ; CHECK: ld.shared.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load <2 x i8>, ptr addrspace(3) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.shared.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <2 x i8> %h.add, ptr addrspace(3) %b
+
+ ; CHECK: ld.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load <4 x i8>, ptr addrspace(3) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <4 x i8> %i.add, ptr addrspace(3) %c
+
+ ; CHECK: ld.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load <2 x i16>, ptr addrspace(3) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <2 x i16> %j.add, ptr addrspace(3) %c
+
+ ; CHECK: ld.shared.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load <4 x i16>, ptr addrspace(3) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.shared.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <4 x i16> %k.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.shared.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load <2 x i32>, ptr addrspace(3) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.shared.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <2 x i32> %l.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.shared.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load <4 x i32>, ptr addrspace(3) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.shared.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <4 x i32> %m.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.shared.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load <2 x i64>, ptr addrspace(3) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.shared.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store <2 x i64> %n.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.shared.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load <2 x float>, ptr addrspace(3) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.shared.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <2 x float> %o.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.shared.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load <4 x float>, ptr addrspace(3) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.shared.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <4 x float> %p.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.shared.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load <2 x double>, ptr addrspace(3) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.shared.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store <2 x double> %q.add, ptr addrspace(3) %d
+
ret void
}
@@ -578,45 +983,119 @@ define void @shared_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrs
; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store volatile double %f.add, ptr addrspace(3) %c
+ ; CHECK: ld.volatile.shared.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load volatile <2 x i8>, ptr addrspace(3) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.volatile.shared.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile <2 x i8> %h.add, ptr addrspace(3) %b
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load volatile <4 x i8>, ptr addrspace(3) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile <4 x i8> %i.add, ptr addrspace(3) %c
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load volatile <2 x i16>, ptr addrspace(3) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile <2 x i16> %j.add, ptr addrspace(3) %c
+
+ ; CHECK: ld.volatile.shared.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load volatile <4 x i16>, ptr addrspace(3) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.volatile.shared.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile <4 x i16> %k.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.volatile.shared.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load volatile <2 x i32>, ptr addrspace(3) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.volatile.shared.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile <2 x i32> %l.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.volatile.shared.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load volatile <4 x i32>, ptr addrspace(3) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.volatile.shared.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile <4 x i32> %m.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.volatile.shared.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load volatile <2 x i64>, ptr addrspace(3) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.volatile.shared.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store volatile <2 x i64> %n.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.volatile.shared.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load volatile <2 x float>, ptr addrspace(3) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.volatile.shared.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile <2 x float> %o.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.volatile.shared.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load volatile <4 x float>, ptr addrspace(3) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.volatile.shared.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile <4 x float> %p.add, ptr addrspace(3) %d
+
+ ; CHECK: ld.volatile.shared.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load volatile <2 x double>, ptr addrspace(3) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.volatile.shared.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store volatile <2 x double> %q.add, ptr addrspace(3) %d
+
ret void
}
; CHECK-LABEL: shared_monotonic
define void @shared_monotonic(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; TODO: optimize .sys.shared to .cta.shared or .cluster.shared.
+
+ ; SM60: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(3) %a monotonic, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i8 %a.add, ptr addrspace(3) %a monotonic, align 1
- ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic i16, ptr addrspace(3) %b monotonic, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i16 %b.add, ptr addrspace(3) %b monotonic, align 2
- ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic i32, ptr addrspace(3) %c monotonic, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic i32 %c.add, ptr addrspace(3) %c monotonic, align 4
- ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic i64, ptr addrspace(3) %d monotonic, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic i64 %d.add, ptr addrspace(3) %d monotonic, align 8
- ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(3) %e monotonic, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(3) %e monotonic, align 4
- ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr addrspace(3) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr addrspace(3) %e monotonic, align 8
ret void
@@ -665,40 +1144,54 @@ define void @shared_monotonic_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b,
; CHECK-LABEL: shared_unordered
define void @shared_unordered(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; TODO: optimize .sys.shared to .cta.shared or .cluster.shared.
+
+ ; SM60: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(3) %a unordered, align 1
%a.add = add i8 %a.load, 1
- ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i8 %a.add, ptr addrspace(3) %a unordered, align 1
- ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%b.load = load atomic i16, ptr addrspace(3) %b unordered, align 2
%b.add = add i16 %b.load, 1
- ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM60: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
store atomic i16 %b.add, ptr addrspace(3) %b unordered, align 2
- ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
%c.load = load atomic i32, ptr addrspace(3) %c unordered, align 4
%c.add = add i32 %c.load, 1
- ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM60: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
store atomic i32 %c.add, ptr addrspace(3) %c unordered, align 4
- ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
%d.load = load atomic i64, ptr addrspace(3) %d unordered, align 8
%d.add = add i64 %d.load, 1
- ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM60: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
store atomic i64 %d.add, ptr addrspace(3) %d unordered, align 8
- ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(3) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
- ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM60: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(3) %e unordered, align 4
- ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM60: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ ; SM70: ld.relaxed.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr addrspace(3) %e unordered, align 8
%f.add = fadd double %f.load, 1.
- ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM60: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ ; SM70: st.relaxed.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr addrspace(3) %e unordered, align 8
ret void
@@ -785,11 +1278,74 @@ define void @local_plain(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store double %f.add, ptr addrspace(5) %c
+ ; CHECK: ld.local.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load <2 x i8>, ptr addrspace(5) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.local.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <2 x i8> %h.add, ptr addrspace(5) %b
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load <4 x i8>, ptr addrspace(5) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <4 x i8> %i.add, ptr addrspace(5) %c
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load <2 x i16>, ptr addrspace(5) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <2 x i16> %j.add, ptr addrspace(5) %c
+
+ ; CHECK: ld.local.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load <4 x i16>, ptr addrspace(5) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.local.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <4 x i16> %k.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load <2 x i32>, ptr addrspace(5) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.local.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <2 x i32> %l.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load <4 x i32>, ptr addrspace(5) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.local.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <4 x i32> %m.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load <2 x i64>, ptr addrspace(5) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.local.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store <2 x i64> %n.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load <2 x float>, ptr addrspace(5) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.local.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <2 x float> %o.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load <4 x float>, ptr addrspace(5) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.local.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <4 x float> %p.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load <2 x double>, ptr addrspace(5) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.local.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store <2 x double> %q.add, ptr addrspace(5) %d
+
ret void
}
; CHECK-LABEL: local_volatile
define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using volatile operations.
+
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load volatile i8, ptr addrspace(5) %a
%a.add = add i8 %a.load, 1
@@ -826,11 +1382,74 @@ define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrsp
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store volatile double %f.add, ptr addrspace(5) %c
+ ; CHECK: ld.local.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load volatile <2 x i8>, ptr addrspace(5) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.local.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile <2 x i8> %h.add, ptr addrspace(5) %b
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load volatile <4 x i8>, ptr addrspace(5) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile <4 x i8> %i.add, ptr addrspace(5) %c
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load volatile <2 x i16>, ptr addrspace(5) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile <2 x i16> %j.add, ptr addrspace(5) %c
+
+ ; CHECK: ld.local.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load volatile <4 x i16>, ptr addrspace(5) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.local.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store volatile <4 x i16> %k.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load volatile <2 x i32>, ptr addrspace(5) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.local.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile <2 x i32> %l.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load volatile <4 x i32>, ptr addrspace(5) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.local.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store volatile <4 x i32> %m.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load volatile <2 x i64>, ptr addrspace(5) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.local.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store volatile <2 x i64> %n.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load volatile <2 x float>, ptr addrspace(5) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.local.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile <2 x float> %o.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load volatile <4 x float>, ptr addrspace(5) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.local.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store volatile <4 x float> %p.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load volatile <2 x double>, ptr addrspace(5) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.local.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store volatile <2 x double> %q.add, ptr addrspace(5) %d
+
ret void
}
; CHECK-LABEL: local_monotonic
define void @local_monotonic(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using PTX atomic operations.
+
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(5) %a monotonic, align 1
%a.add = add i8 %a.load, 1
@@ -872,6 +1491,9 @@ define void @local_monotonic(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrs
; CHECK-LABEL: local_monotonic_volatile
define void @local_monotonic_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by generating atomic or volatile operations
+
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(5) %a monotonic, align 1
%a.add = add i8 %a.load, 1
@@ -992,3 +1614,6 @@ define void @local_unordered_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b,
ret void
}
+
+; TODO: add plain,atomic,volatile,atomic volatile tests
+; for .const and .param statespaces \ No newline at end of file
diff --git a/llvm/test/CodeGen/PowerPC/O0-pipeline.ll b/llvm/test/CodeGen/PowerPC/O0-pipeline.ll
index f1c9da0..70b421f 100644
--- a/llvm/test/CodeGen/PowerPC/O0-pipeline.ll
+++ b/llvm/test/CodeGen/PowerPC/O0-pipeline.ll
@@ -25,7 +25,6 @@
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/PowerPC/O3-pipeline.ll b/llvm/test/CodeGen/PowerPC/O3-pipeline.ll
index be0fbf3..f4f4927 100644
--- a/llvm/test/CodeGen/PowerPC/O3-pipeline.ll
+++ b/llvm/test/CodeGen/PowerPC/O3-pipeline.ll
@@ -62,7 +62,6 @@
; CHECK-NEXT: Constant Hoisting
; CHECK-NEXT: Replace intrinsics with calls to vector library
; CHECK-NEXT: Partially inline calls to library functions
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/PowerPC/aix-base-pointer.ll b/llvm/test/CodeGen/PowerPC/aix-base-pointer.ll
index ab222d7..5e66e5e 100644
--- a/llvm/test/CodeGen/PowerPC/aix-base-pointer.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-base-pointer.ll
@@ -6,6 +6,7 @@
; Use an overaligned buffer to force base-pointer usage. Test verifies:
; - base pointer register (r30) is saved/defined/restored.
+; - frame pointer register (r31) is saved/defined/restored.
; - stack frame is allocated with correct alignment.
; - Address of %AlignedBuffer is calculated based off offset from the stack
; pointer.
@@ -25,7 +26,9 @@ declare void @callee(ptr)
; 32BIT: subfic 0, 0, -224
; 32BIT: stwux 1, 1, 0
; 32BIT: addi 3, 1, 64
+; 32BIT: stw 31, -12(30)
; 32BIT: bl .callee
+; 32BIT: lwz 31, -12(30)
; 32BIT: mr 1, 30
; 32BIT: lwz 30, -16(1)
@@ -36,6 +39,8 @@ declare void @callee(ptr)
; 64BIT: subfic 0, 0, -288
; 64BIT: stdux 1, 1, 0
; 64BIT: addi 3, 1, 128
+; 64BIT: std 31, -16(30)
; 64BIT: bl .callee
+; 64BIT: ld 31, -16(30)
; 64BIT: mr 1, 30
; 64BIT: ld 30, -24(1)
diff --git a/llvm/test/CodeGen/PowerPC/builtins-bcd-assist.ll b/llvm/test/CodeGen/PowerPC/builtins-bcd-assist.ll
new file mode 100644
index 0000000..cc5d6be
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/builtins-bcd-assist.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux \
+; RUN: --ppc-asm-full-reg-names -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix \
+; RUN: --ppc-asm-full-reg-names -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc-unknown-aix \
+; RUN: --ppc-asm-full-reg-names -mcpu=pwr7 < %s | FileCheck %s --check-prefix=CHECK-AIX32
+
+define dso_local i64 @cdtbcd_test(i64 noundef %ll) {
+; CHECK-LABEL: cdtbcd_test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cdtbcd r3, r3
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+; CHECK-AIX32-LABEL: cdtbcd_test:
+; CHECK-AIX32: # %bb.0: # %entry
+; CHECK-AIX32-NEXT: li r3, 0
+; CHECK-AIX32-NEXT: cdtbcd r4, r4
+; CHECK-AIX32-NEXT: blr
+entry:
+ %conv = trunc i64 %ll to i32
+ %0 = tail call i32 @llvm.ppc.cdtbcd(i32 %conv)
+ %conv1 = zext i32 %0 to i64
+ ret i64 %conv1
+}
+
+define dso_local zeroext i32 @cdtbcd_test_ui(i32 noundef zeroext %ui) {
+; CHECK-LABEL: cdtbcd_test_ui:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cdtbcd r3, r3
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+; CHECK-AIX32-LABEL: cdtbcd_test_ui:
+; CHECK-AIX32: # %bb.0: # %entry
+; CHECK-AIX32-NEXT: cdtbcd r3, r3
+; CHECK-AIX32-NEXT: blr
+entry:
+ %0 = tail call i32 @llvm.ppc.cdtbcd(i32 %ui)
+ ret i32 %0
+}
+
+define dso_local i64 @cbcdtd_test(i64 noundef %ll) {
+; CHECK-LABEL: cbcdtd_test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cbcdtd r3, r3
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+; CHECK-AIX32-LABEL: cbcdtd_test:
+; CHECK-AIX32: # %bb.0: # %entry
+; CHECK-AIX32-NEXT: li r3, 0
+; CHECK-AIX32-NEXT: cbcdtd r4, r4
+; CHECK-AIX32-NEXT: blr
+entry:
+ %conv = trunc i64 %ll to i32
+ %0 = tail call i32@llvm.ppc.cbcdtd(i32 %conv)
+ %conv1 = zext i32 %0 to i64
+ ret i64 %conv1
+}
+
+define dso_local zeroext i32 @cbcdtd_test_ui(i32 noundef zeroext %ui) {
+; CHECK-LABEL: cbcdtd_test_ui:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cbcdtd r3, r3
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+; CHECK-AIX32-LABEL: cbcdtd_test_ui:
+; CHECK-AIX32: # %bb.0: # %entry
+; CHECK-AIX32-NEXT: cbcdtd r3, r3
+; CHECK-AIX32-NEXT: blr
+entry:
+ %0 = tail call i32 @llvm.ppc.cbcdtd(i32 %ui)
+ ret i32 %0
+}
+
+define dso_local i64 @addg6s_test(i64 noundef %ll, i64 noundef %ll2) {
+; CHECK-LABEL: addg6s_test:
+; CHECK: bb.0: # %entry
+; CHECK-NEXT: addg6s r3, r3, r4
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+; CHECK-AIX32-LABEL: addg6s_test:
+; CHECK-AIX32: # %bb.0: # %entry
+; CHECK-AIX32-NEXT: li r3, 0
+; CHECK-AIX32-NEXT: addg6s r4, r4, r6
+; CHECK-AIX32-NEXT: blr
+entry:
+ %conv = trunc i64 %ll to i32
+ %conv1 = trunc i64 %ll2 to i32
+ %0 = tail call i32 @llvm.ppc.addg6s(i32 %conv, i32 %conv1)
+ %conv2 = zext i32 %0 to i64
+ ret i64 %conv2
+}
+
+define dso_local zeroext i32 @addg6s_test_ui(i32 noundef zeroext %ui, i32 noundef zeroext %ui2) {
+; CHECK-LABEL: addg6s_test_ui:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addg6s r3, r3, r4
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+; CHECK-AIX32-LABEL: addg6s_test_ui:
+; CHECK-AIX32: # %bb.0: # %entry
+; CHECK-AIX32-NEXT: addg6s r3, r3, r4
+; CHECK-AIX32-NEXT: blr
+entry:
+ %0 = tail call i32 @llvm.ppc.addg6s(i32 %ui, i32 %ui2)
+ ret i32 %0
+}
+
+declare i32 @llvm.ppc.cdtbcd(i32)
+declare i32 @llvm.ppc.cbcdtd(i32)
+declare i32 @llvm.ppc.addg6s(i32, i32)
diff --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.ll
new file mode 100644
index 0000000..d188f60
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-bcd-assist.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux \
+; RUN: --ppc-asm-full-reg-names -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix \
+; RUN: --ppc-asm-full-reg-names -mcpu=pwr7 < %s | FileCheck %s
+
+define i64 @cdtbcd_test(i64 noundef %ll) {
+; CHECK-LABEL: cdtbcd_test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cdtbcd r3, r3
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.ppc.cdtbcdd(i64 %ll)
+ ret i64 %0
+}
+
+define zeroext i32 @cdtbcd_test_ui(i32 noundef zeroext %ui) {
+; CHECK-LABEL: cdtbcd_test_ui:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cdtbcd r3, r3
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+entry:
+ %conv = zext i32 %ui to i64
+ %0 = tail call i64 @llvm.ppc.cdtbcdd(i64 %conv)
+ %conv1 = trunc i64 %0 to i32
+ ret i32 %conv1
+}
+
+define i64 @cbcdtd_test(i64 noundef %ll) {
+; CHECK-LABEL: cbcdtd_test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cbcdtd r3, r3
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.ppc.cbcdtdd(i64 %ll)
+ ret i64 %0
+}
+
+define zeroext i32 @cbcdtd_test_ui(i32 noundef zeroext %ui) {
+; CHECK-LABEL: cbcdtd_test_ui:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cbcdtd r3, r3
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+entry:
+ %conv = zext i32 %ui to i64
+ %0 = tail call i64 @llvm.ppc.cbcdtdd(i64 %conv)
+ %conv1 = trunc i64 %0 to i32
+ ret i32 %conv1
+}
+
+define i64 @addg6s_test(i64 noundef %ll, i64 noundef %ll2) {
+; CHECK-LABEL: addg6s_test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addg6s r3, r3, r4
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.ppc.addg6sd(i64 %ll, i64 %ll2)
+ ret i64 %0
+}
+
+define zeroext i32 @addg6s_test_ui(i32 noundef zeroext %ui, i32 noundef zeroext %ui2) {
+; CHECK-LABEL: addg6s_test_ui:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addg6s r3, r3, r4
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+entry:
+ %conv = zext i32 %ui to i64
+ %conv1 = zext i32 %ui2 to i64
+ %0 = tail call i64 @llvm.ppc.addg6sd(i64 %conv, i64 %conv1)
+ %conv2 = trunc i64 %0 to i32
+ ret i32 %conv2
+}
+
+declare i64 @llvm.ppc.cdtbcdd(i64)
+declare i64 @llvm.ppc.cbcdtdd(i64)
+declare i64 @llvm.ppc.addg6sd(i64, i64)
diff --git a/llvm/test/CodeGen/PowerPC/common-chain.ll b/llvm/test/CodeGen/PowerPC/common-chain.ll
index ccf0e45..b71a360 100644
--- a/llvm/test/CodeGen/PowerPC/common-chain.ll
+++ b/llvm/test/CodeGen/PowerPC/common-chain.ll
@@ -743,219 +743,214 @@ define signext i32 @spill_reduce_succ(ptr %input1, ptr %input2, ptr %output, i64
; CHECK-NEXT: std r9, -184(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r8, -176(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r7, -168(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r4, -160(r1) # 8-byte Folded Spill
+; CHECK-NEXT: std r3, -160(r1) # 8-byte Folded Spill
; CHECK-NEXT: ble cr0, .LBB7_7
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: sldi r4, r6, 2
-; CHECK-NEXT: li r6, 1
-; CHECK-NEXT: mr r0, r10
-; CHECK-NEXT: std r10, -192(r1) # 8-byte Folded Spill
-; CHECK-NEXT: cmpdi r4, 1
-; CHECK-NEXT: iselgt r4, r4, r6
-; CHECK-NEXT: addi r7, r4, -1
-; CHECK-NEXT: clrldi r6, r4, 63
-; CHECK-NEXT: cmpldi r7, 3
+; CHECK-NEXT: sldi r6, r6, 2
+; CHECK-NEXT: li r7, 1
+; CHECK-NEXT: mr r30, r10
+; CHECK-NEXT: cmpdi r6, 1
+; CHECK-NEXT: iselgt r7, r6, r7
+; CHECK-NEXT: addi r8, r7, -1
+; CHECK-NEXT: clrldi r6, r7, 63
+; CHECK-NEXT: cmpldi r8, 3
; CHECK-NEXT: blt cr0, .LBB7_4
; CHECK-NEXT: # %bb.2: # %for.body.preheader.new
-; CHECK-NEXT: ld r0, -192(r1) # 8-byte Folded Reload
-; CHECK-NEXT: ld r30, -184(r1) # 8-byte Folded Reload
-; CHECK-NEXT: ld r8, -176(r1) # 8-byte Folded Reload
-; CHECK-NEXT: rldicl r7, r4, 62, 2
-; CHECK-NEXT: ld r9, -168(r1) # 8-byte Folded Reload
-; CHECK-NEXT: add r11, r0, r30
-; CHECK-NEXT: add r4, r0, r0
-; CHECK-NEXT: mulli r23, r0, 24
-; CHECK-NEXT: add r14, r0, r8
-; CHECK-NEXT: sldi r12, r0, 5
-; CHECK-NEXT: add r31, r0, r9
-; CHECK-NEXT: sldi r9, r9, 3
-; CHECK-NEXT: sldi r18, r0, 4
-; CHECK-NEXT: sldi r8, r8, 3
-; CHECK-NEXT: add r10, r4, r4
-; CHECK-NEXT: sldi r4, r30, 3
-; CHECK-NEXT: sldi r11, r11, 3
-; CHECK-NEXT: add r26, r12, r9
-; CHECK-NEXT: add r16, r18, r9
-; CHECK-NEXT: add r29, r12, r8
-; CHECK-NEXT: add r19, r18, r8
-; CHECK-NEXT: add r30, r12, r4
-; CHECK-NEXT: mr r20, r4
-; CHECK-NEXT: std r4, -200(r1) # 8-byte Folded Spill
-; CHECK-NEXT: ld r4, -160(r1) # 8-byte Folded Reload
-; CHECK-NEXT: add r15, r5, r11
-; CHECK-NEXT: sldi r11, r14, 3
-; CHECK-NEXT: add r29, r5, r29
-; CHECK-NEXT: add r28, r3, r26
-; CHECK-NEXT: add r19, r5, r19
-; CHECK-NEXT: add r21, r23, r9
-; CHECK-NEXT: add r24, r23, r8
-; CHECK-NEXT: add r14, r5, r11
-; CHECK-NEXT: sldi r11, r31, 3
-; CHECK-NEXT: add r25, r23, r20
-; CHECK-NEXT: add r20, r18, r20
-; CHECK-NEXT: add r30, r5, r30
-; CHECK-NEXT: add r18, r3, r16
-; CHECK-NEXT: add r24, r5, r24
-; CHECK-NEXT: add r23, r3, r21
-; CHECK-NEXT: add r27, r4, r26
-; CHECK-NEXT: add r22, r4, r21
-; CHECK-NEXT: add r17, r4, r16
-; CHECK-NEXT: add r2, r4, r11
-; CHECK-NEXT: rldicl r4, r7, 2, 1
-; CHECK-NEXT: sub r7, r8, r9
-; CHECK-NEXT: ld r8, -200(r1) # 8-byte Folded Reload
+; CHECK-NEXT: ld r14, -168(r1) # 8-byte Folded Reload
+; CHECK-NEXT: mulli r24, r30, 24
+; CHECK-NEXT: ld r16, -184(r1) # 8-byte Folded Reload
+; CHECK-NEXT: ld r15, -176(r1) # 8-byte Folded Reload
+; CHECK-NEXT: ld r3, -160(r1) # 8-byte Folded Reload
+; CHECK-NEXT: rldicl r0, r7, 62, 2
+; CHECK-NEXT: sldi r11, r30, 5
+; CHECK-NEXT: sldi r19, r30, 4
+; CHECK-NEXT: sldi r7, r14, 3
+; CHECK-NEXT: add r14, r30, r14
+; CHECK-NEXT: sldi r10, r16, 3
+; CHECK-NEXT: sldi r12, r15, 3
+; CHECK-NEXT: add r16, r30, r16
+; CHECK-NEXT: add r15, r30, r15
+; CHECK-NEXT: add r27, r11, r7
+; CHECK-NEXT: add r22, r24, r7
+; CHECK-NEXT: add r17, r19, r7
+; CHECK-NEXT: sldi r2, r14, 3
+; CHECK-NEXT: add r26, r24, r10
+; CHECK-NEXT: add r25, r24, r12
+; CHECK-NEXT: add r21, r19, r10
+; CHECK-NEXT: add r20, r19, r12
+; CHECK-NEXT: add r8, r11, r10
+; CHECK-NEXT: sldi r16, r16, 3
+; CHECK-NEXT: add r29, r5, r27
+; CHECK-NEXT: add r28, r4, r27
+; CHECK-NEXT: add r27, r3, r27
+; CHECK-NEXT: add r24, r5, r22
+; CHECK-NEXT: add r23, r4, r22
+; CHECK-NEXT: add r22, r3, r22
+; CHECK-NEXT: add r19, r5, r17
+; CHECK-NEXT: add r18, r4, r17
+; CHECK-NEXT: add r17, r3, r17
+; CHECK-NEXT: add r14, r5, r2
+; CHECK-NEXT: add r31, r4, r2
+; CHECK-NEXT: add r2, r3, r2
+; CHECK-NEXT: add r9, r5, r8
+; CHECK-NEXT: add r8, r11, r12
; CHECK-NEXT: add r26, r5, r26
; CHECK-NEXT: add r25, r5, r25
; CHECK-NEXT: add r21, r5, r21
; CHECK-NEXT: add r20, r5, r20
; CHECK-NEXT: add r16, r5, r16
-; CHECK-NEXT: add r31, r5, r11
-; CHECK-NEXT: add r11, r3, r11
-; CHECK-NEXT: addi r4, r4, -4
-; CHECK-NEXT: rldicl r4, r4, 62, 2
-; CHECK-NEXT: sub r8, r8, r9
-; CHECK-NEXT: li r9, 0
-; CHECK-NEXT: addi r4, r4, 1
-; CHECK-NEXT: mtctr r4
+; CHECK-NEXT: add r8, r5, r8
+; CHECK-NEXT: rldicl r3, r0, 2, 1
+; CHECK-NEXT: addi r3, r3, -4
+; CHECK-NEXT: sub r0, r12, r7
+; CHECK-NEXT: sub r12, r10, r7
+; CHECK-NEXT: li r7, 0
+; CHECK-NEXT: mr r10, r30
+; CHECK-NEXT: sldi r15, r15, 3
+; CHECK-NEXT: add r15, r5, r15
+; CHECK-NEXT: rldicl r3, r3, 62, 2
+; CHECK-NEXT: addi r3, r3, 1
+; CHECK-NEXT: mtctr r3
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB7_3: # %for.body
; CHECK-NEXT: #
-; CHECK-NEXT: lfd f0, 0(r11)
-; CHECK-NEXT: lfd f1, 0(r2)
-; CHECK-NEXT: add r0, r0, r10
-; CHECK-NEXT: xsmuldp f0, f0, f1
+; CHECK-NEXT: lfd f0, 0(r2)
; CHECK-NEXT: lfd f1, 0(r31)
+; CHECK-NEXT: add r3, r10, r30
+; CHECK-NEXT: add r3, r3, r30
+; CHECK-NEXT: xsmuldp f0, f0, f1
+; CHECK-NEXT: lfd f1, 0(r14)
+; CHECK-NEXT: add r3, r3, r30
+; CHECK-NEXT: add r10, r3, r30
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfd f0, 0(r31)
-; CHECK-NEXT: add r31, r31, r12
-; CHECK-NEXT: lfdx f0, r11, r7
-; CHECK-NEXT: lfdx f1, r2, r7
+; CHECK-NEXT: stfd f0, 0(r14)
+; CHECK-NEXT: add r14, r14, r11
+; CHECK-NEXT: lfdx f0, r2, r0
+; CHECK-NEXT: lfdx f1, r31, r0
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r14, r9
+; CHECK-NEXT: lfdx f1, r15, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r14, r9
-; CHECK-NEXT: lfdx f0, r11, r8
-; CHECK-NEXT: lfdx f1, r2, r8
-; CHECK-NEXT: add r11, r11, r12
-; CHECK-NEXT: add r2, r2, r12
+; CHECK-NEXT: stfdx f0, r15, r7
+; CHECK-NEXT: lfdx f0, r2, r12
+; CHECK-NEXT: lfdx f1, r31, r12
+; CHECK-NEXT: add r2, r2, r11
+; CHECK-NEXT: add r31, r31, r11
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r15, r9
+; CHECK-NEXT: lfdx f1, r16, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r15, r9
-; CHECK-NEXT: lfd f0, 0(r18)
-; CHECK-NEXT: lfd f1, 0(r17)
+; CHECK-NEXT: stfdx f0, r16, r7
+; CHECK-NEXT: lfd f0, 0(r17)
+; CHECK-NEXT: lfd f1, 0(r18)
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r16, r9
+; CHECK-NEXT: lfdx f1, r19, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r16, r9
-; CHECK-NEXT: lfdx f0, r18, r7
-; CHECK-NEXT: lfdx f1, r17, r7
+; CHECK-NEXT: stfdx f0, r19, r7
+; CHECK-NEXT: lfdx f0, r17, r0
+; CHECK-NEXT: lfdx f1, r18, r0
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r19, r9
+; CHECK-NEXT: lfdx f1, r20, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r19, r9
-; CHECK-NEXT: lfdx f0, r18, r8
-; CHECK-NEXT: lfdx f1, r17, r8
-; CHECK-NEXT: add r18, r18, r12
-; CHECK-NEXT: add r17, r17, r12
+; CHECK-NEXT: stfdx f0, r20, r7
+; CHECK-NEXT: lfdx f0, r17, r12
+; CHECK-NEXT: lfdx f1, r18, r12
+; CHECK-NEXT: add r17, r17, r11
+; CHECK-NEXT: add r18, r18, r11
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r20, r9
+; CHECK-NEXT: lfdx f1, r21, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r20, r9
-; CHECK-NEXT: lfd f0, 0(r23)
-; CHECK-NEXT: lfd f1, 0(r22)
+; CHECK-NEXT: stfdx f0, r21, r7
+; CHECK-NEXT: lfd f0, 0(r22)
+; CHECK-NEXT: lfd f1, 0(r23)
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r21, r9
+; CHECK-NEXT: lfdx f1, r24, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r21, r9
-; CHECK-NEXT: lfdx f0, r23, r7
-; CHECK-NEXT: lfdx f1, r22, r7
+; CHECK-NEXT: stfdx f0, r24, r7
+; CHECK-NEXT: lfdx f0, r22, r0
+; CHECK-NEXT: lfdx f1, r23, r0
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r24, r9
+; CHECK-NEXT: lfdx f1, r25, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r24, r9
-; CHECK-NEXT: lfdx f0, r23, r8
-; CHECK-NEXT: lfdx f1, r22, r8
-; CHECK-NEXT: add r23, r23, r12
-; CHECK-NEXT: add r22, r22, r12
+; CHECK-NEXT: stfdx f0, r25, r7
+; CHECK-NEXT: lfdx f0, r22, r12
+; CHECK-NEXT: lfdx f1, r23, r12
+; CHECK-NEXT: add r22, r22, r11
+; CHECK-NEXT: add r23, r23, r11
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r25, r9
+; CHECK-NEXT: lfdx f1, r26, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r25, r9
-; CHECK-NEXT: lfd f0, 0(r28)
-; CHECK-NEXT: lfd f1, 0(r27)
+; CHECK-NEXT: stfdx f0, r26, r7
+; CHECK-NEXT: lfd f0, 0(r27)
+; CHECK-NEXT: lfd f1, 0(r28)
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r26, r9
+; CHECK-NEXT: lfdx f1, r29, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r26, r9
-; CHECK-NEXT: lfdx f0, r28, r7
-; CHECK-NEXT: lfdx f1, r27, r7
+; CHECK-NEXT: stfdx f0, r29, r7
+; CHECK-NEXT: lfdx f0, r27, r0
+; CHECK-NEXT: lfdx f1, r28, r0
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r29, r9
+; CHECK-NEXT: lfdx f1, r8, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r29, r9
-; CHECK-NEXT: lfdx f0, r28, r8
-; CHECK-NEXT: lfdx f1, r27, r8
-; CHECK-NEXT: add r28, r28, r12
-; CHECK-NEXT: add r27, r27, r12
+; CHECK-NEXT: stfdx f0, r8, r7
+; CHECK-NEXT: lfdx f0, r27, r12
+; CHECK-NEXT: lfdx f1, r28, r12
+; CHECK-NEXT: add r27, r27, r11
+; CHECK-NEXT: add r28, r28, r11
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r30, r9
+; CHECK-NEXT: lfdx f1, r9, r7
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r30, r9
-; CHECK-NEXT: add r9, r9, r12
+; CHECK-NEXT: stfdx f0, r9, r7
+; CHECK-NEXT: add r7, r7, r11
; CHECK-NEXT: bdnz .LBB7_3
; CHECK-NEXT: .LBB7_4: # %for.cond.cleanup.loopexit.unr-lcssa
-; CHECK-NEXT: ld r7, -192(r1) # 8-byte Folded Reload
; CHECK-NEXT: cmpldi r6, 0
; CHECK-NEXT: beq cr0, .LBB7_7
; CHECK-NEXT: # %bb.5: # %for.body.epil.preheader
-; CHECK-NEXT: ld r4, -184(r1) # 8-byte Folded Reload
-; CHECK-NEXT: ld r29, -160(r1) # 8-byte Folded Reload
-; CHECK-NEXT: mr r30, r3
-; CHECK-NEXT: sldi r7, r7, 3
-; CHECK-NEXT: add r4, r0, r4
-; CHECK-NEXT: sldi r4, r4, 3
-; CHECK-NEXT: add r3, r5, r4
-; CHECK-NEXT: add r8, r29, r4
-; CHECK-NEXT: add r9, r30, r4
-; CHECK-NEXT: ld r4, -176(r1) # 8-byte Folded Reload
-; CHECK-NEXT: add r4, r0, r4
-; CHECK-NEXT: sldi r4, r4, 3
-; CHECK-NEXT: add r10, r5, r4
-; CHECK-NEXT: add r11, r29, r4
-; CHECK-NEXT: add r12, r30, r4
-; CHECK-NEXT: ld r4, -168(r1) # 8-byte Folded Reload
-; CHECK-NEXT: add r4, r0, r4
-; CHECK-NEXT: sldi r0, r4, 3
-; CHECK-NEXT: add r5, r5, r0
-; CHECK-NEXT: add r4, r29, r0
-; CHECK-NEXT: add r30, r30, r0
-; CHECK-NEXT: li r0, 0
+; CHECK-NEXT: ld r3, -184(r1) # 8-byte Folded Reload
+; CHECK-NEXT: ld r0, -160(r1) # 8-byte Folded Reload
+; CHECK-NEXT: sldi r8, r30, 3
+; CHECK-NEXT: add r3, r10, r3
+; CHECK-NEXT: sldi r3, r3, 3
+; CHECK-NEXT: add r7, r5, r3
+; CHECK-NEXT: add r9, r4, r3
+; CHECK-NEXT: add r11, r0, r3
+; CHECK-NEXT: ld r3, -176(r1) # 8-byte Folded Reload
+; CHECK-NEXT: add r3, r10, r3
+; CHECK-NEXT: sldi r3, r3, 3
+; CHECK-NEXT: add r12, r5, r3
+; CHECK-NEXT: add r30, r4, r3
+; CHECK-NEXT: add r29, r0, r3
+; CHECK-NEXT: ld r3, -168(r1) # 8-byte Folded Reload
+; CHECK-NEXT: add r3, r10, r3
+; CHECK-NEXT: li r10, 0
+; CHECK-NEXT: sldi r3, r3, 3
+; CHECK-NEXT: add r5, r5, r3
+; CHECK-NEXT: add r4, r4, r3
+; CHECK-NEXT: add r3, r0, r3
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB7_6: # %for.body.epil
; CHECK-NEXT: #
-; CHECK-NEXT: lfdx f0, r30, r0
-; CHECK-NEXT: lfdx f1, r4, r0
+; CHECK-NEXT: lfdx f0, r3, r10
+; CHECK-NEXT: lfdx f1, r4, r10
; CHECK-NEXT: addi r6, r6, -1
; CHECK-NEXT: cmpldi r6, 0
; CHECK-NEXT: xsmuldp f0, f0, f1
; CHECK-NEXT: lfd f1, 0(r5)
; CHECK-NEXT: xsadddp f0, f1, f0
; CHECK-NEXT: stfd f0, 0(r5)
-; CHECK-NEXT: add r5, r5, r7
-; CHECK-NEXT: lfdx f0, r12, r0
-; CHECK-NEXT: lfdx f1, r11, r0
+; CHECK-NEXT: add r5, r5, r8
+; CHECK-NEXT: lfdx f0, r29, r10
+; CHECK-NEXT: lfdx f1, r30, r10
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r10, r0
+; CHECK-NEXT: lfdx f1, r12, r10
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r10, r0
-; CHECK-NEXT: lfdx f0, r9, r0
-; CHECK-NEXT: lfdx f1, r8, r0
+; CHECK-NEXT: stfdx f0, r12, r10
+; CHECK-NEXT: lfdx f0, r11, r10
+; CHECK-NEXT: lfdx f1, r9, r10
; CHECK-NEXT: xsmuldp f0, f0, f1
-; CHECK-NEXT: lfdx f1, r3, r0
+; CHECK-NEXT: lfdx f1, r7, r10
; CHECK-NEXT: xsadddp f0, f1, f0
-; CHECK-NEXT: stfdx f0, r3, r0
-; CHECK-NEXT: add r0, r0, r7
+; CHECK-NEXT: stfdx f0, r7, r10
+; CHECK-NEXT: add r10, r10, r8
; CHECK-NEXT: bne cr0, .LBB7_6
; CHECK-NEXT: .LBB7_7: # %for.cond.cleanup
; CHECK-NEXT: ld r2, -152(r1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
index fd2ba49..9be03d5 100644
--- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
@@ -26,7 +26,6 @@
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
@@ -69,6 +68,7 @@
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: Stack Frame Layout Analysis
+; CHECK-NEXT: RISC-V Indirect Branch Tracking
; CHECK-NEXT: RISC-V pseudo instruction expansion pass
; CHECK-NEXT: RISC-V atomic pseudo instruction expansion pass
; CHECK-NEXT: Unpack machine instruction bundles
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index d6d0cca6..7bad290 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -62,7 +62,6 @@
; CHECK-NEXT: Constant Hoisting
; CHECK-NEXT: Replace intrinsics with calls to vector library
; CHECK-NEXT: Partially inline calls to library functions
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
@@ -195,6 +194,7 @@
; CHECK-NEXT: Stack Frame Layout Analysis
; CHECK-NEXT: RISC-V Zcmp move merging pass
; CHECK-NEXT: RISC-V Zcmp Push/Pop optimization pass
+; CHECK-NEXT: RISC-V Indirect Branch Tracking
; CHECK-NEXT: RISC-V pseudo instruction expansion pass
; CHECK-NEXT: RISC-V atomic pseudo instruction expansion pass
; CHECK-NEXT: Unpack machine instruction bundles
diff --git a/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll b/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll
index 9d57ca7..0e87d8d 100644
--- a/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll
+++ b/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll
@@ -8,6 +8,7 @@
define void @above_threshold(i32 signext %in, ptr %out) nounwind {
; CHECK-LABEL: above_threshold:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lpad 0
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: li a2, 5
; CHECK-NEXT: bltu a2, a0, .LBB0_9
diff --git a/llvm/test/CodeGen/RISCV/lpad.ll b/llvm/test/CodeGen/RISCV/lpad.ll
new file mode 100644
index 0000000..de82a9e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/lpad.ll
@@ -0,0 +1,101 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv32 -mattr=+experimental-zicfilp < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple riscv64 -mattr=+experimental-zicfilp < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+; Check indirectbr.
+@__const.indirctbr.addr = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@indirctbr, %labelA), ptr blockaddress(@indirctbr, %labelB)], align 8
+define void @indirctbr(i32 %i, ptr %p) {
+; RV32-LABEL: indirctbr:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: lpad 0
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: lui a2, %hi(.L__const.indirctbr.addr)
+; RV32-NEXT: addi a2, a2, %lo(.L__const.indirctbr.addr)
+; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: lw a0, 0(a0)
+; RV32-NEXT: jr a0
+; RV32-NEXT: .p2align 2
+; RV32-NEXT: .Ltmp0: # Block address taken
+; RV32-NEXT: .LBB0_1: # %labelA
+; RV32-NEXT: lpad 0
+; RV32-NEXT: li a0, 1
+; RV32-NEXT: sw a0, 0(a1)
+; RV32-NEXT: .p2align 2
+; RV32-NEXT: .Ltmp1: # Block address taken
+; RV32-NEXT: .LBB0_2: # %labelB
+; RV32-NEXT: lpad 0
+; RV32-NEXT: li a0, 2
+; RV32-NEXT: sw a0, 0(a1)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: indirctbr:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: lpad 0
+; RV64-NEXT: sext.w a0, a0
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: lui a2, %hi(.L__const.indirctbr.addr)
+; RV64-NEXT: addi a2, a2, %lo(.L__const.indirctbr.addr)
+; RV64-NEXT: add a0, a2, a0
+; RV64-NEXT: ld a0, 0(a0)
+; RV64-NEXT: jr a0
+; RV64-NEXT: .p2align 2
+; RV64-NEXT: .Ltmp0: # Block address taken
+; RV64-NEXT: .LBB0_1: # %labelA
+; RV64-NEXT: lpad 0
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: sw a0, 0(a1)
+; RV64-NEXT: .p2align 2
+; RV64-NEXT: .Ltmp1: # Block address taken
+; RV64-NEXT: .LBB0_2: # %labelB
+; RV64-NEXT: lpad 0
+; RV64-NEXT: li a0, 2
+; RV64-NEXT: sw a0, 0(a1)
+; RV64-NEXT: ret
+entry:
+ %arrayidx = getelementptr inbounds [2 x ptr], ptr @__const.indirctbr.addr, i64 0, i32 %i
+ %0 = load ptr, ptr %arrayidx
+ indirectbr ptr %0, [label %labelA, label %labelB]
+
+labelA: ; preds = %entry
+ store volatile i32 1, ptr %p
+ br label %labelB
+
+labelB: ; preds = %labelA, %entry
+ store volatile i32 2, ptr %p
+ ret void
+}
+
+; Check external linkage function.
+define void @external() {
+; CHECK-LABEL: external:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lpad 0
+; CHECK-NEXT: ret
+ ret void
+}
+
+; Check internal linkage function.
+define internal void @internal() {
+; CHECK-LABEL: internal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ ret void
+}
+
+; Check internal linkage function with taken address.
+@foo = constant ptr @internal2
+define internal void @internal2() {
+; CHECK-LABEL: internal2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lpad 0
+; CHECK-NEXT: ret
+ ret void
+}
+
+; Check interrupt function does not need landing pad.
+define void @interrupt() "interrupt"="user" {
+; CHECK-LABEL: interrupt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mret
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
index 85663f0..b763e11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin \
; RUN: -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin \
; RUN: -verify-machineinstrs | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
@@ -817,3 +817,136 @@ entry:
ret <vscale x 8 x double> %a
}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vcompress_vm_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vcompress.vm v8, v9, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vcompress_vm_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vcompress.vm v8, v9, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vcompress_vm_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vcompress.vm v8, v9, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vcompress_vm_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: vcompress.vm v8, v10, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vcompress_vm_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: vcompress.vm v8, v12, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vcompress_vm_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT: vcompress.vm v8, v16, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
new file mode 100644
index 0000000..c8a0489
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s
+
+define <vscale x 1 x i64> @all_ones(<vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl) {
+; CHECK-LABEL: all_ones:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> splat (i1 true), <vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @all_zeroes(<vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl) {
+; CHECK-LABEL: all_zeroes:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> splat (i1 false), <vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll
index d11e172..5d700e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
; RUN: -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
; RUN: -verify-machineinstrs | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.iXLen(
@@ -4820,3 +4820,785 @@ entry:
ret <vscale x 8 x double> %a
}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i16>,
+ iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat> undef,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i16>,
+ iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat> undef,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i16>,
+ iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v10, v8, v9
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat> undef,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i16>,
+ iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vrgather.vv v12, v8, v10
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat> undef,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i16>,
+ iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vrgather.vv v16, v8, v12
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat> undef,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i16>,
+ iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vrgather.vv v24, v8, v16
+; CHECK-NEXT: vmv.v.v v8, v24
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat> undef,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vrgather.vx v9, v8, a0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat> undef,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vrgather.vx v9, v8, a0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat> undef,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf1bf16XLen(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vx v9, v8, a0
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat> undef,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vrgather.vx v10, v8, a0
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat> undef,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen,
+ iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vrgather.vx v12, v8, a0
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat> undef,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen,
+ iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vrgather.vx v16, v8, a0
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat> undef,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vrgather.vi v9, v8, 9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat> undef,
+ <vscale x 1 x bfloat> %0,
+ iXLen 9,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vrgather.vi v9, v8, 9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat> undef,
+ <vscale x 2 x bfloat> %0,
+ iXLen 9,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 9,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vi v9, v8, 9
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat> undef,
+ <vscale x 4 x bfloat> %0,
+ iXLen 9,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 9,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vrgather.vi v10, v8, 9
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat> undef,
+ <vscale x 8 x bfloat> %0,
+ iXLen 9,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 9,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vrgather.vi v12, v8, 9
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat> undef,
+ <vscale x 16 x bfloat> %0,
+ iXLen 9,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 9,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vrgather.vi v16, v8, 9
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat> undef,
+ <vscale x 32 x bfloat> %0,
+ iXLen 9,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 9,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
index 4749cc6..0d96fbf 100644
--- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
@@ -276,9 +276,8 @@ define i64 @sraiw_andi(i32 signext %0, i32 signext %1) nounwind {
; RV64-LABEL: sraiw_andi:
; RV64: # %bb.0: # %entry
; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: slli a0, a0, 32
-; RV64-NEXT: srai a0, a0, 2
-; RV64-NEXT: srli a0, a0, 61
+; RV64-NEXT: sraiw a0, a0, 31
+; RV64-NEXT: andi a0, a0, 7
; RV64-NEXT: ret
entry:
%3 = add i32 %0, %1
diff --git a/llvm/test/CodeGen/WebAssembly/offset.ll b/llvm/test/CodeGen/WebAssembly/offset.ll
index 65de341..763c60c 100644
--- a/llvm/test/CodeGen/WebAssembly/offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/offset.ll
@@ -40,6 +40,26 @@ define i32 @load_i32_with_folded_gep_offset(ptr %p) {
ret i32 %t
}
+; Same for nusw.
+
+; CHECK-LABEL: load_i32_with_folded_gep_offset_nusw:
+; CHECK: i32.load $push0=, 24($0){{$}}
+define i32 @load_i32_with_folded_gep_offset_nusw(ptr %p) {
+ %s = getelementptr nusw i32, ptr %p, i32 6
+ %t = load i32, ptr %s
+ ret i32 %t
+}
+
+; For nuw we don't need the offset to be positive.
+
+; CHECK-LABEL: load_i32_with_folded_gep_offset_nuw:
+; CHECK: i32.load $push0=, -24($0){{$}}
+define i32 @load_i32_with_folded_gep_offset_nuw(ptr %p) {
+ %s = getelementptr nuw i32, ptr %p, i32 -6
+ %t = load i32, ptr %s
+ ret i32 %t
+}
+
; We can't fold a negative offset though, even with an inbounds gep.
; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll
index 29d3c27..98b8638 100644
--- a/llvm/test/CodeGen/X86/O0-pipeline.ll
+++ b/llvm/test/CodeGen/X86/O0-pipeline.ll
@@ -26,7 +26,6 @@
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/X86/apx/and.ll b/llvm/test/CodeGen/X86/apx/and.ll
index 51858ad..23aed77 100644
--- a/llvm/test/CodeGen/X86/apx/and.ll
+++ b/llvm/test/CodeGen/X86/apx/and.ll
@@ -482,17 +482,17 @@ define i1 @andflag16rr(i16 %a, i16 %b) {
define i1 @andflag32rr(i32 %a, i32 %b) {
; CHECK-LABEL: andflag32rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: andl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x21,0xf7]
+; CHECK-NEXT: andl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x21,0xfe]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag32rr:
; NF: # %bb.0:
-; NF-NEXT: andl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x21,0xf7]
+; NF-NEXT: andl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x21,0xfe]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = and i32 %a, %b ; 0xff << 50
@@ -504,17 +504,17 @@ define i1 @andflag32rr(i32 %a, i32 %b) {
define i1 @andflag64rr(i64 %a, i64 %b) {
; CHECK-LABEL: andflag64rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: andq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x21,0xf7]
+; CHECK-NEXT: andq %rdi, %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x21,0xfe]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag64rr:
; NF: # %bb.0:
-; NF-NEXT: andq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x21,0xf7]
+; NF-NEXT: andq %rdi, %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x21,0xfe]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, %b ; 0xff << 50
@@ -578,17 +578,17 @@ define i1 @andflag16rm(ptr %ptr, i16 %b) {
define i1 @andflag32rm(ptr %ptr, i32 %b) {
; CHECK-LABEL: andflag32rm:
; CHECK: # %bb.0:
-; CHECK-NEXT: andl (%rdi), %esi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x23,0x37]
+; CHECK-NEXT: andl (%rdi), %esi # EVEX TO LEGACY Compression encoding: [0x23,0x37]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag32rm:
; NF: # %bb.0:
-; NF-NEXT: andl (%rdi), %esi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x23,0x37]
+; NF-NEXT: andl (%rdi), %esi # EVEX TO LEGACY Compression encoding: [0x23,0x37]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%a = load i32, ptr %ptr
@@ -601,17 +601,17 @@ define i1 @andflag32rm(ptr %ptr, i32 %b) {
define i1 @andflag64rm(ptr %ptr, i64 %b) {
; CHECK-LABEL: andflag64rm:
; CHECK: # %bb.0:
-; CHECK-NEXT: andq (%rdi), %rsi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x23,0x37]
+; CHECK-NEXT: andq (%rdi), %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x23,0x37]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag64rm:
; NF: # %bb.0:
-; NF-NEXT: andq (%rdi), %rsi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x23,0x37]
+; NF-NEXT: andq (%rdi), %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x23,0x37]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%a = load i64, ptr %ptr
@@ -672,19 +672,19 @@ define i1 @andflag16ri(i16 %a) {
define i1 @andflag32ri(i32 %a) {
; CHECK-LABEL: andflag32ri:
; CHECK: # %bb.0:
-; CHECK-NEXT: andl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xe7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: andl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xe7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag32ri:
; NF: # %bb.0:
-; NF-NEXT: andl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xe7,0x40,0xe2,0x01,0x00]
+; NF-NEXT: andl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xe7,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = and i32 %a, 123456 ; 0xff << 50
@@ -696,19 +696,19 @@ define i1 @andflag32ri(i32 %a) {
define i1 @andflag64ri(i64 %a) {
; CHECK-LABEL: andflag64ri:
; CHECK: # %bb.0:
-; CHECK-NEXT: andq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xe7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: andq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xe7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag64ri:
; NF: # %bb.0:
-; NF-NEXT: andq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xe7,0x40,0xe2,0x01,0x00]
+; NF-NEXT: andq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xe7,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 123456 ; 0xff << 50
@@ -743,17 +743,17 @@ define i1 @andflag16ri8(i16 %a) {
define i1 @andflag32ri8(i32 %a) {
; CHECK-LABEL: andflag32ri8:
; CHECK: # %bb.0:
-; CHECK-NEXT: andl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xe7,0x7b]
+; CHECK-NEXT: andl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xe7,0x7b]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag32ri8:
; NF: # %bb.0:
-; NF-NEXT: andl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xe7,0x7b]
+; NF-NEXT: andl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xe7,0x7b]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = and i32 %a, 123 ; 0xff << 50
@@ -765,17 +765,17 @@ define i1 @andflag32ri8(i32 %a) {
define i1 @andflag64ri8(i64 %a) {
; CHECK-LABEL: andflag64ri8:
; CHECK: # %bb.0:
-; CHECK-NEXT: andq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xe7,0x7b]
+; CHECK-NEXT: andq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xe7,0x7b]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: andflag64ri8:
; NF: # %bb.0:
-; NF-NEXT: andq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xe7,0x7b]
+; NF-NEXT: andq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xe7,0x7b]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 123 ; 0xff << 50
diff --git a/llvm/test/CodeGen/X86/apx/cmov.ll b/llvm/test/CodeGen/X86/apx/cmov.ll
index 7a6a63f..7b84612 100644
--- a/llvm/test/CodeGen/X86/apx/cmov.ll
+++ b/llvm/test/CodeGen/X86/apx/cmov.ll
@@ -5,10 +5,10 @@ define i8 @cmov8(i8 %a, i8 %b, i8 %x, ptr %y.ptr) {
; CHECK-LABEL: cmov8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb %sil, %dil # encoding: [0x40,0x38,0xf7]
-; CHECK-NEXT: cmoval %edi, %edx, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x47,0xd7]
-; CHECK-NEXT: movzbl (%rcx), %ecx # encoding: [0x0f,0xb6,0x09]
-; CHECK-NEXT: cmovbel %edx, %ecx # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xca]
-; CHECK-NEXT: addb %cl, %al # EVEX TO LEGACY Compression encoding: [0x00,0xc8]
+; CHECK-NEXT: cmovbel %edx, %edi # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xfa]
+; CHECK-NEXT: movzbl (%rcx), %eax # encoding: [0x0f,0xb6,0x01]
+; CHECK-NEXT: cmovbel %edx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xc2]
+; CHECK-NEXT: addb %dil, %al # EVEX TO LEGACY Compression encoding: [0x40,0x00,0xf8]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%cond = icmp ugt i8 %a, %b
@@ -23,9 +23,9 @@ define i16 @cmov16(i16 %a, i16 %b, i16 %x, ptr %y.ptr) {
; CHECK-LABEL: cmov16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
-; CHECK-NEXT: cmoval %edi, %edx, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x47,0xd7]
-; CHECK-NEXT: cmovaw (%rcx), %dx, %cx # encoding: [0x62,0xf4,0x75,0x18,0x47,0x11]
-; CHECK-NEXT: addw %cx, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x01,0xc8]
+; CHECK-NEXT: cmovbel %edx, %edi # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xfa]
+; CHECK-NEXT: cmovaw (%rcx), %dx, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x47,0x11]
+; CHECK-NEXT: addw %di, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x01,0xf8]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%cond = icmp ugt i16 %a, %b
@@ -41,8 +41,8 @@ define i32 @cmov32(i32 %a, i32 %b, i32 %x, ptr %y.ptr) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
; CHECK-NEXT: cmoval %edi, %edx, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x47,0xd7]
-; CHECK-NEXT: cmoval (%rcx), %edx, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x47,0x11]
-; CHECK-NEXT: addl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x01,0xc8]
+; CHECK-NEXT: cmoval (%rcx), %edx # EVEX TO LEGACY Compression encoding: [0x0f,0x47,0x11]
+; CHECK-NEXT: addl %edx, %eax # EVEX TO LEGACY Compression encoding: [0x01,0xd0]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%cond = icmp ugt i32 %a, %b
@@ -58,8 +58,8 @@ define i64 @cmov64(i64 %a, i64 %b, i64 %x, ptr %y.ptr) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
; CHECK-NEXT: cmovaq %rdi, %rdx, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x47,0xd7]
-; CHECK-NEXT: cmovaq (%rcx), %rdx, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x47,0x11]
-; CHECK-NEXT: addq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x01,0xc8]
+; CHECK-NEXT: cmovaq (%rcx), %rdx # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x47,0x11]
+; CHECK-NEXT: addq %rdx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x01,0xd0]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%cond = icmp ugt i64 %a, %b
diff --git a/llvm/test/CodeGen/X86/apx/mul-i1024.ll b/llvm/test/CodeGen/X86/apx/mul-i1024.ll
index 2b99c44..a4d15a1 100644
--- a/llvm/test/CodeGen/X86/apx/mul-i1024.ll
+++ b/llvm/test/CodeGen/X86/apx/mul-i1024.ll
@@ -1041,41 +1041,41 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: pushq %r13
; EGPR-NDD-NEXT: pushq %r12
; EGPR-NDD-NEXT: pushq %rbx
-; EGPR-NDD-NEXT: subq $104, %rsp
+; EGPR-NDD-NEXT: subq $96, %rsp
; EGPR-NDD-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %rsi, %r15
; EGPR-NDD-NEXT: movq %rdi, %r20
-; EGPR-NDD-NEXT: movq (%rdi), %r16
-; EGPR-NDD-NEXT: movq 8(%rdi), %r14
+; EGPR-NDD-NEXT: movq (%rdi), %r17
+; EGPR-NDD-NEXT: movq 8(%rdi), %r11
; EGPR-NDD-NEXT: movq 24(%rdi), %r9
; EGPR-NDD-NEXT: movq 16(%rdi), %r10
; EGPR-NDD-NEXT: movq 40(%rdi), %rdi
-; EGPR-NDD-NEXT: movq 32(%r20), %r11
-; EGPR-NDD-NEXT: movq 56(%r20), %r17
-; EGPR-NDD-NEXT: movq 48(%r20), %r15
-; EGPR-NDD-NEXT: movq 24(%rsi), %r18
+; EGPR-NDD-NEXT: movq 32(%r20), %r16
+; EGPR-NDD-NEXT: movq 56(%r20), %r18
+; EGPR-NDD-NEXT: movq 48(%r20), %r23
+; EGPR-NDD-NEXT: movq 24(%rsi), %r14
; EGPR-NDD-NEXT: movq 16(%rsi), %r24
; EGPR-NDD-NEXT: movq (%rsi), %r22
; EGPR-NDD-NEXT: movq 8(%rsi), %r21
-; EGPR-NDD-NEXT: movq %rsi, %r23
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: movq %r23, %rax
; EGPR-NDD-NEXT: mulq %r22
; EGPR-NDD-NEXT: movq %rdx, %r25
; EGPR-NDD-NEXT: movq %rax, %r19
-; EGPR-NDD-NEXT: movq %r17, %rax
+; EGPR-NDD-NEXT: movq %r18, %rax
; EGPR-NDD-NEXT: mulq %r22
-; EGPR-NDD-NEXT: addq %r25, %rax, %rcx
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rsi
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: addq %rax, %r25
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
+; EGPR-NDD-NEXT: movq %r23, %rax
; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %rcx, %rax, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %rsi, %rcx
+; EGPR-NDD-NEXT: addq %r25, %rax, %rsi
+; EGPR-NDD-NEXT: adcq %rdx, %rcx
; EGPR-NDD-NEXT: setb %al
-; EGPR-NDD-NEXT: movzbl %al, %esi
-; EGPR-NDD-NEXT: movq %r17, %rax
+; EGPR-NDD-NEXT: movzbl %al, %r8d
+; EGPR-NDD-NEXT: movq %r18, %rax
; EGPR-NDD-NEXT: mulq %r21
; EGPR-NDD-NEXT: addq %rcx, %rax, %r27
-; EGPR-NDD-NEXT: adcq %rdx, %rsi
-; EGPR-NDD-NEXT: movq %r11, %rax
+; EGPR-NDD-NEXT: adcq %rdx, %r8
+; EGPR-NDD-NEXT: movq %r16, %rax
; EGPR-NDD-NEXT: mulq %r22
; EGPR-NDD-NEXT: movq %rdx, %r26
; EGPR-NDD-NEXT: movq %rax, %r25
@@ -1083,7 +1083,7 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: mulq %r22
; EGPR-NDD-NEXT: addq %r26, %rax, %rcx
; EGPR-NDD-NEXT: adcq $0, %rdx, %r26
-; EGPR-NDD-NEXT: movq %r11, %rax
+; EGPR-NDD-NEXT: movq %r16, %rax
; EGPR-NDD-NEXT: mulq %r21
; EGPR-NDD-NEXT: addq %rax, %rcx
; EGPR-NDD-NEXT: adcq %rdx, %r26
@@ -1094,58 +1094,59 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: addq %r26, %rax
; EGPR-NDD-NEXT: adcq %r28, %rdx
; EGPR-NDD-NEXT: addq %rax, %r19, %r28
-; EGPR-NDD-NEXT: adcq %rdx, %r8
+; EGPR-NDD-NEXT: adcq %rdx, %rsi, %r29
; EGPR-NDD-NEXT: adcq $0, %r27
-; EGPR-NDD-NEXT: adcq $0, %rsi, %r29
-; EGPR-NDD-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movq %r11, %rax
+; EGPR-NDD-NEXT: adcq $0, %r8
+; EGPR-NDD-NEXT: movq %r16, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %r16, %rax
; EGPR-NDD-NEXT: mulq %r24
; EGPR-NDD-NEXT: movq %rdx, %r19
; EGPR-NDD-NEXT: movq %rax, %r26
; EGPR-NDD-NEXT: movq %rdi, %rax
; EGPR-NDD-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: mulq %r24
-; EGPR-NDD-NEXT: addq %r19, %rax, %rsi
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r19
-; EGPR-NDD-NEXT: movq %r11, %rax
-; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %rsi, %rax, %r30
-; EGPR-NDD-NEXT: adcq %rdx, %r19, %rsi
+; EGPR-NDD-NEXT: addq %rax, %r19
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rsi
+; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: mulq %r14
+; EGPR-NDD-NEXT: addq %rax, %r19
+; EGPR-NDD-NEXT: adcq %rdx, %rsi
; EGPR-NDD-NEXT: setb %al
-; EGPR-NDD-NEXT: movzbl %al, %r19d
+; EGPR-NDD-NEXT: movzbl %al, %r30d
; EGPR-NDD-NEXT: movq %rdi, %rax
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %rsi, %rax
-; EGPR-NDD-NEXT: adcq %r19, %rdx
+; EGPR-NDD-NEXT: adcq %r30, %rdx
; EGPR-NDD-NEXT: addq %r28, %r26, %rsi
-; EGPR-NDD-NEXT: adcq %r8, %r30, %r28
+; EGPR-NDD-NEXT: adcq %r29, %r19, %r28
; EGPR-NDD-NEXT: adcq $0, %rax
; EGPR-NDD-NEXT: adcq $0, %rdx
-; EGPR-NDD-NEXT: addq %rax, %r27, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %r29, %r27
+; EGPR-NDD-NEXT: addq %rax, %r27
+; EGPR-NDD-NEXT: adcq %rdx, %r8
; EGPR-NDD-NEXT: setb %al
; EGPR-NDD-NEXT: movzbl %al, %r31d
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: movq %r23, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %r23, %rax
; EGPR-NDD-NEXT: mulq %r24
; EGPR-NDD-NEXT: movq %rdx, %r19
; EGPR-NDD-NEXT: movq %rax, %r26
-; EGPR-NDD-NEXT: movq %r17, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movq %r17, %rax
+; EGPR-NDD-NEXT: movq %r18, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %r18, %rax
; EGPR-NDD-NEXT: mulq %r24
; EGPR-NDD-NEXT: addq %rax, %r19
; EGPR-NDD-NEXT: adcq $0, %rdx, %r29
-; EGPR-NDD-NEXT: movq %r15, %rax
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: movq %r23, %rax
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %rax, %r19
; EGPR-NDD-NEXT: adcq %rdx, %r29
; EGPR-NDD-NEXT: setb %al
; EGPR-NDD-NEXT: movzbl %al, %r30d
-; EGPR-NDD-NEXT: movq %r17, %rax
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: movq %r18, %rax
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %r29, %rax
; EGPR-NDD-NEXT: adcq %r30, %rdx
-; EGPR-NDD-NEXT: addq %r8, %r26, %r29
-; EGPR-NDD-NEXT: adcq %r27, %r19, %r30
+; EGPR-NDD-NEXT: addq %r27, %r26, %r29
+; EGPR-NDD-NEXT: adcq %r8, %r19, %r30
; EGPR-NDD-NEXT: adcq %rax, %r31
; EGPR-NDD-NEXT: adcq $0, %rdx, %rdi
; EGPR-NDD-NEXT: movq %r10, %rax
@@ -1154,69 +1155,69 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: movq %rax, %r26
; EGPR-NDD-NEXT: movq %r9, %rax
; EGPR-NDD-NEXT: mulq %r22
-; EGPR-NDD-NEXT: addq %r19, %rax, %r8
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r19
+; EGPR-NDD-NEXT: addq %rax, %r19
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r8
; EGPR-NDD-NEXT: movq %r10, %rax
; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %rax, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %r19
+; EGPR-NDD-NEXT: addq %rax, %r19
+; EGPR-NDD-NEXT: adcq %rdx, %r8
; EGPR-NDD-NEXT: setb %al
; EGPR-NDD-NEXT: movzbl %al, %r27d
; EGPR-NDD-NEXT: movq %r9, %rax
; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %rax, %r19
+; EGPR-NDD-NEXT: addq %rax, %r8
; EGPR-NDD-NEXT: adcq %r27, %rdx, %rbx
-; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: movq %r17, %rax
; EGPR-NDD-NEXT: mulq %r22
; EGPR-NDD-NEXT: movq %rdx, %r27
; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: movq %r11, %rax
; EGPR-NDD-NEXT: mulq %r22
; EGPR-NDD-NEXT: addq %rax, %r27
; EGPR-NDD-NEXT: adcq $0, %rdx, %r12
-; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: movq %r17, %rax
; EGPR-NDD-NEXT: mulq %r21
; EGPR-NDD-NEXT: addq %r27, %rax
; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %rdx, %r12, %r27
-; EGPR-NDD-NEXT: setb %bpl
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: adcq %rdx, %r12
+; EGPR-NDD-NEXT: setb %r27b
+; EGPR-NDD-NEXT: movq %r11, %rax
; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %r27, %rax
-; EGPR-NDD-NEXT: movzbl %bpl, %r27d
+; EGPR-NDD-NEXT: addq %r12, %rax
+; EGPR-NDD-NEXT: movzbl %r27b, %r27d
; EGPR-NDD-NEXT: adcq %r27, %rdx
; EGPR-NDD-NEXT: addq %rax, %r26, %r12
-; EGPR-NDD-NEXT: adcq %rdx, %r8
-; EGPR-NDD-NEXT: adcq $0, %r19
+; EGPR-NDD-NEXT: adcq %rdx, %r19
+; EGPR-NDD-NEXT: adcq $0, %r8
; EGPR-NDD-NEXT: adcq $0, %rbx
-; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: movq %r17, %rax
; EGPR-NDD-NEXT: mulq %r24
; EGPR-NDD-NEXT: movq %rdx, %r26
; EGPR-NDD-NEXT: movq %rax, %r27
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: movq %r11, %rax
; EGPR-NDD-NEXT: mulq %r24
; EGPR-NDD-NEXT: addq %rax, %r26
; EGPR-NDD-NEXT: adcq $0, %rdx, %r13
-; EGPR-NDD-NEXT: movq %r16, %rax
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: movq %r17, %rax
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %rax, %r26
; EGPR-NDD-NEXT: adcq %rdx, %r13
; EGPR-NDD-NEXT: setb %bpl
-; EGPR-NDD-NEXT: movq %r14, %rax
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: movq %r11, %rax
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %r13, %rax
; EGPR-NDD-NEXT: movzbl %bpl, %r13d
; EGPR-NDD-NEXT: adcq %r13, %rdx
-; EGPR-NDD-NEXT: addq %r12, %r27, %r11
-; EGPR-NDD-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %r26, %r8
-; EGPR-NDD-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: addq %r12, %r27
+; EGPR-NDD-NEXT: movq %r27, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %r26, %r19
+; EGPR-NDD-NEXT: movq %r19, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: adcq $0, %rax
; EGPR-NDD-NEXT: adcq $0, %rdx
-; EGPR-NDD-NEXT: addq %rax, %r19, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %rbx, %r19
-; EGPR-NDD-NEXT: setb %bl
-; EGPR-NDD-NEXT: movq %r10, %r17
+; EGPR-NDD-NEXT: addq %rax, %r8
+; EGPR-NDD-NEXT: adcq %rdx, %rbx
+; EGPR-NDD-NEXT: setb %r19b
+; EGPR-NDD-NEXT: movq %r10, %r16
; EGPR-NDD-NEXT: movq %r10, %rax
; EGPR-NDD-NEXT: mulq %r24
; EGPR-NDD-NEXT: movq %rdx, %r26
@@ -1226,32 +1227,31 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: addq %rax, %r26
; EGPR-NDD-NEXT: adcq $0, %rdx, %r12
; EGPR-NDD-NEXT: movq %r10, %rax
-; EGPR-NDD-NEXT: movq %r18, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %rax, %r26
; EGPR-NDD-NEXT: adcq %rdx, %r12
; EGPR-NDD-NEXT: setb %bpl
; EGPR-NDD-NEXT: movq %r9, %rax
-; EGPR-NDD-NEXT: mulq %r18
+; EGPR-NDD-NEXT: mulq %r14
; EGPR-NDD-NEXT: addq %r12, %rax
; EGPR-NDD-NEXT: movzbl %bpl, %r12d
; EGPR-NDD-NEXT: adcq %r12, %rdx
; EGPR-NDD-NEXT: addq %r27, %r8
-; EGPR-NDD-NEXT: adcq %r26, %r19
-; EGPR-NDD-NEXT: movzbl %bl, %r26d
-; EGPR-NDD-NEXT: adcq %r26, %rax
+; EGPR-NDD-NEXT: adcq %r26, %rbx
+; EGPR-NDD-NEXT: movzbl %r19b, %r19d
+; EGPR-NDD-NEXT: adcq %r19, %rax
; EGPR-NDD-NEXT: adcq $0, %rdx
; EGPR-NDD-NEXT: addq %r8, %r25, %r12
-; EGPR-NDD-NEXT: movq 32(%r23), %r26
-; EGPR-NDD-NEXT: adcq %r19, %rcx, %r13
+; EGPR-NDD-NEXT: movq 32(%r15), %r26
+; EGPR-NDD-NEXT: adcq %rbx, %rcx, %r13
; EGPR-NDD-NEXT: adcq %rax, %rsi, %rbp
; EGPR-NDD-NEXT: adcq %rdx, %r28, %rbx
-; EGPR-NDD-NEXT: adcq $0, %r29, %rax
-; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq $0, %r29
+; EGPR-NDD-NEXT: movq %r29, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: adcq $0, %r30
; EGPR-NDD-NEXT: adcq $0, %r31
-; EGPR-NDD-NEXT: adcq $0, %rdi, %rax
-; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq $0, %rdi
+; EGPR-NDD-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: movq %r10, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: movq %rdx, %r25
@@ -1259,341 +1259,333 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: movq %r9, %r19
; EGPR-NDD-NEXT: movq %r9, %rax
; EGPR-NDD-NEXT: mulq %r26
-; EGPR-NDD-NEXT: addq %r25, %rax, %rcx
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r8
-; EGPR-NDD-NEXT: movq 40(%r23), %r18
-; EGPR-NDD-NEXT: movq %r23, %r11
+; EGPR-NDD-NEXT: addq %rax, %r25
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
+; EGPR-NDD-NEXT: movq 40(%r15), %r18
; EGPR-NDD-NEXT: movq %r10, %rax
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %rcx, %rax, %rdi
-; EGPR-NDD-NEXT: adcq %rdx, %r8
-; EGPR-NDD-NEXT: setb %r25b
+; EGPR-NDD-NEXT: addq %r25, %rax, %r29
+; EGPR-NDD-NEXT: adcq %rdx, %rcx
+; EGPR-NDD-NEXT: setb %r8b
; EGPR-NDD-NEXT: movq %r9, %rax
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %r8, %rax, %r29
-; EGPR-NDD-NEXT: movzbl %r25b, %eax
+; EGPR-NDD-NEXT: addq %rcx, %rax, %rdi
+; EGPR-NDD-NEXT: movzbl %r8b, %eax
; EGPR-NDD-NEXT: adcq %rax, %rdx, %rsi
-; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: movq %r17, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: movq %rdx, %r28
; EGPR-NDD-NEXT: movq %rax, %r25
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: movq %r11, %r10
+; EGPR-NDD-NEXT: movq %r11, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: addq %r28, %rax, %r8
; EGPR-NDD-NEXT: adcq $0, %rdx, %r28
-; EGPR-NDD-NEXT: movq %r16, %rax
-; EGPR-NDD-NEXT: movq %r16, %r10
+; EGPR-NDD-NEXT: movq %r17, %rax
; EGPR-NDD-NEXT: mulq %r18
; EGPR-NDD-NEXT: addq %r8, %rax, %r23
; EGPR-NDD-NEXT: adcq %rdx, %r28
; EGPR-NDD-NEXT: setb %cl
-; EGPR-NDD-NEXT: movq %r14, %rax
-; EGPR-NDD-NEXT: movq %r14, %r16
+; EGPR-NDD-NEXT: movq %r11, %rax
; EGPR-NDD-NEXT: mulq %r18
; EGPR-NDD-NEXT: addq %r28, %rax
; EGPR-NDD-NEXT: movzbl %cl, %ecx
; EGPR-NDD-NEXT: adcq %rdx, %rcx
; EGPR-NDD-NEXT: addq %rax, %r27
-; EGPR-NDD-NEXT: adcq %rcx, %rdi
-; EGPR-NDD-NEXT: adcq $0, %r29, %r8
+; EGPR-NDD-NEXT: adcq %rcx, %r29, %r8
+; EGPR-NDD-NEXT: adcq $0, %rdi
; EGPR-NDD-NEXT: adcq $0, %rsi, %r9
-; EGPR-NDD-NEXT: movq %r11, %r14
-; EGPR-NDD-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movq 48(%r11), %r11
-; EGPR-NDD-NEXT: movq %r10, %rsi
-; EGPR-NDD-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movq %r10, %rax
+; EGPR-NDD-NEXT: movq 48(%r15), %r11
+; EGPR-NDD-NEXT: movq %r17, %rsi
+; EGPR-NDD-NEXT: movq %r17, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %r17, %rax
; EGPR-NDD-NEXT: mulq %r11
; EGPR-NDD-NEXT: movq %rdx, %r28
; EGPR-NDD-NEXT: movq %rax, %r29
-; EGPR-NDD-NEXT: movq %r16, %rax
-; EGPR-NDD-NEXT: movq %r16, %r10
-; EGPR-NDD-NEXT: movq %r16, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %r10, %rax
+; EGPR-NDD-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: mulq %r11
; EGPR-NDD-NEXT: addq %rax, %r28
; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
-; EGPR-NDD-NEXT: movq 56(%r14), %r16
+; EGPR-NDD-NEXT: movq 56(%r15), %r17
; EGPR-NDD-NEXT: movq %rsi, %rax
-; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: mulq %r17
; EGPR-NDD-NEXT: addq %rax, %r28
; EGPR-NDD-NEXT: adcq %rdx, %rcx
; EGPR-NDD-NEXT: setb %sil
; EGPR-NDD-NEXT: movq %r10, %rax
-; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: mulq %r17
; EGPR-NDD-NEXT: addq %rcx, %rax
; EGPR-NDD-NEXT: movzbl %sil, %ecx
; EGPR-NDD-NEXT: adcq %rdx, %rcx
-; EGPR-NDD-NEXT: addq %r27, %r29, %r10
-; EGPR-NDD-NEXT: adcq %r28, %rdi
+; EGPR-NDD-NEXT: addq %r29, %r27
+; EGPR-NDD-NEXT: adcq %r8, %r28, %r10
; EGPR-NDD-NEXT: adcq $0, %rax
; EGPR-NDD-NEXT: adcq $0, %rcx
-; EGPR-NDD-NEXT: addq %rax, %r8
-; EGPR-NDD-NEXT: adcq %rcx, %r9, %rsi
-; EGPR-NDD-NEXT: setb %r9b
-; EGPR-NDD-NEXT: movq %r17, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movq %r17, %rax
+; EGPR-NDD-NEXT: addq %rax, %rdi
+; EGPR-NDD-NEXT: adcq %rcx, %r9, %r8
+; EGPR-NDD-NEXT: setb %sil
+; EGPR-NDD-NEXT: movq %r16, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: movq %r16, %rax
; EGPR-NDD-NEXT: mulq %r11
; EGPR-NDD-NEXT: movq %rdx, %r28
; EGPR-NDD-NEXT: movq %rax, %r29
+; EGPR-NDD-NEXT: movq %r19, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: movq %r19, %rax
; EGPR-NDD-NEXT: mulq %r11
-; EGPR-NDD-NEXT: addq %r28, %rax, %r27
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r28
-; EGPR-NDD-NEXT: movq %r17, %rax
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %rax, %r27
-; EGPR-NDD-NEXT: adcq %rdx, %r28
+; EGPR-NDD-NEXT: addq %rax, %r28
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r9
+; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: mulq %r17
+; EGPR-NDD-NEXT: addq %rax, %r28
+; EGPR-NDD-NEXT: adcq %rdx, %r9
; EGPR-NDD-NEXT: setb %cl
; EGPR-NDD-NEXT: movq %r19, %rax
-; EGPR-NDD-NEXT: movq %r19, %r17
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %r28, %rax
+; EGPR-NDD-NEXT: mulq %r17
+; EGPR-NDD-NEXT: addq %r9, %rax
; EGPR-NDD-NEXT: movzbl %cl, %ecx
; EGPR-NDD-NEXT: adcq %rdx, %rcx
-; EGPR-NDD-NEXT: addq %r8, %r29, %rdx
-; EGPR-NDD-NEXT: adcq %r27, %rsi
-; EGPR-NDD-NEXT: movzbl %r9b, %r8d
-; EGPR-NDD-NEXT: adcq %r8, %rax
+; EGPR-NDD-NEXT: addq %r29, %rdi
+; EGPR-NDD-NEXT: adcq %r28, %r8
+; EGPR-NDD-NEXT: movzbl %sil, %edx
+; EGPR-NDD-NEXT: adcq %rdx, %rax
; EGPR-NDD-NEXT: adcq $0, %rcx
-; EGPR-NDD-NEXT: addq %r12, %r25, %r8
-; EGPR-NDD-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %r13, %r23, %r8
-; EGPR-NDD-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %rbp, %r10, %r8
-; EGPR-NDD-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %rbx, %rdi
-; EGPR-NDD-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq $0, %rdx
-; EGPR-NDD-NEXT: adcq $0, %rsi
+; EGPR-NDD-NEXT: addq %r12, %r25
+; EGPR-NDD-NEXT: movq %r25, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %r13, %r23, %r19
+; EGPR-NDD-NEXT: movq %r19, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %rbp, %r27
+; EGPR-NDD-NEXT: movq %r27, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %rbx, %r10
+; EGPR-NDD-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq $0, %rdi
+; EGPR-NDD-NEXT: adcq $0, %r8
; EGPR-NDD-NEXT: adcq $0, %rax
-; EGPR-NDD-NEXT: adcq $0, %rcx, %rdi
-; EGPR-NDD-NEXT: addq %rdx, {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %rsi, %r30, %r19
-; EGPR-NDD-NEXT: adcq %rax, %r31, %r30
-; EGPR-NDD-NEXT: adcq %rdi, {{[-0-9]+}}(%r{{[sb]}}p), %r31 # 8-byte Folded Reload
-; EGPR-NDD-NEXT: setb %bpl
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: adcq $0, %rcx
+; EGPR-NDD-NEXT: addq %rdi, {{[-0-9]+}}(%r{{[sb]}}p), %r19 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %r8, %r30
+; EGPR-NDD-NEXT: adcq %rax, %r31
+; EGPR-NDD-NEXT: adcq %rcx, {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; EGPR-NDD-NEXT: setb %r8b
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; EGPR-NDD-NEXT: movq %r13, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: movq %rdx, %r25
; EGPR-NDD-NEXT: movq %rax, %r28
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r9, %rax
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; EGPR-NDD-NEXT: movq %r10, %rax
; EGPR-NDD-NEXT: mulq %r26
-; EGPR-NDD-NEXT: addq %r25, %rax, %rsi
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rdi
-; EGPR-NDD-NEXT: movq %r15, %rax
-; EGPR-NDD-NEXT: movq %r15, %r13
+; EGPR-NDD-NEXT: addq %rax, %r25
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rsi
+; EGPR-NDD-NEXT: movq %r13, %rax
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %rax, %rsi
-; EGPR-NDD-NEXT: adcq %rdx, %rdi
-; EGPR-NDD-NEXT: setb %r8b
-; EGPR-NDD-NEXT: movq %r9, %rax
-; EGPR-NDD-NEXT: movq %r9, %r23
+; EGPR-NDD-NEXT: addq %r25, %rax, %rdi
+; EGPR-NDD-NEXT: adcq %rdx, %rsi
+; EGPR-NDD-NEXT: setb %r9b
+; EGPR-NDD-NEXT: movq %r10, %rax
+; EGPR-NDD-NEXT: movq %r10, %r16
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %rax, %rdi
-; EGPR-NDD-NEXT: movzbl %r8b, %eax
-; EGPR-NDD-NEXT: adcq %rax, %rdx, %r8
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: addq %rax, %rsi
+; EGPR-NDD-NEXT: movzbl %r9b, %eax
+; EGPR-NDD-NEXT: adcq %rax, %rdx, %r9
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r23 # 8-byte Reload
+; EGPR-NDD-NEXT: movq %r23, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: movq %rdx, %r29
; EGPR-NDD-NEXT: movq %rax, %r25
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; EGPR-NDD-NEXT: movq %r12, %rax
; EGPR-NDD-NEXT: mulq %r26
-; EGPR-NDD-NEXT: addq %r29, %rax, %r9
+; EGPR-NDD-NEXT: addq %rax, %r29
; EGPR-NDD-NEXT: adcq $0, %rdx, %r10
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: movq %r23, %rax
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %r9, %rax, %rbx
-; EGPR-NDD-NEXT: adcq %rdx, %r10, %r9
-; EGPR-NDD-NEXT: setb %r10b
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: addq %r29, %rax, %rbx
+; EGPR-NDD-NEXT: adcq %rdx, %r10
+; EGPR-NDD-NEXT: setb %r27b
+; EGPR-NDD-NEXT: movq %r12, %rax
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %r9, %rax
-; EGPR-NDD-NEXT: movzbl %r10b, %r9d
-; EGPR-NDD-NEXT: adcq %r9, %rdx
-; EGPR-NDD-NEXT: addq %rax, %r28, %r9
-; EGPR-NDD-NEXT: adcq %rdx, %rsi
-; EGPR-NDD-NEXT: adcq $0, %rdi
-; EGPR-NDD-NEXT: adcq $0, %r8
-; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: addq %r10, %rax
+; EGPR-NDD-NEXT: movzbl %r27b, %r10d
+; EGPR-NDD-NEXT: adcq %r10, %rdx
+; EGPR-NDD-NEXT: addq %rax, %r28, %r10
+; EGPR-NDD-NEXT: adcq %rdx, %rdi
+; EGPR-NDD-NEXT: adcq $0, %rsi
+; EGPR-NDD-NEXT: adcq $0, %r9
+; EGPR-NDD-NEXT: movq %r23, %rax
; EGPR-NDD-NEXT: mulq %r11
; EGPR-NDD-NEXT: movq %rdx, %r28
; EGPR-NDD-NEXT: movq %rax, %r29
-; EGPR-NDD-NEXT: movq %r15, %rax
+; EGPR-NDD-NEXT: movq %r12, %rax
; EGPR-NDD-NEXT: mulq %r11
-; EGPR-NDD-NEXT: addq %r28, %rax, %r10
+; EGPR-NDD-NEXT: addq %rax, %r28
; EGPR-NDD-NEXT: adcq $0, %rdx, %r27
-; EGPR-NDD-NEXT: movq %r14, %rax
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %rax, %r10
+; EGPR-NDD-NEXT: movq %r23, %rax
+; EGPR-NDD-NEXT: mulq %r17
+; EGPR-NDD-NEXT: addq %rax, %r28
; EGPR-NDD-NEXT: adcq %rdx, %r27
-; EGPR-NDD-NEXT: setb %r28b
-; EGPR-NDD-NEXT: movq %r15, %rax
-; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: setb %bpl
+; EGPR-NDD-NEXT: movq %r12, %rax
+; EGPR-NDD-NEXT: mulq %r17
; EGPR-NDD-NEXT: addq %r27, %rax
-; EGPR-NDD-NEXT: movzbl %r28b, %r27d
+; EGPR-NDD-NEXT: movzbl %bpl, %r27d
; EGPR-NDD-NEXT: adcq %r27, %rdx
-; EGPR-NDD-NEXT: addq %r29, %r9
-; EGPR-NDD-NEXT: adcq %r10, %rsi
+; EGPR-NDD-NEXT: addq %r29, %r10
+; EGPR-NDD-NEXT: adcq %r28, %rdi
; EGPR-NDD-NEXT: adcq $0, %rax
; EGPR-NDD-NEXT: adcq $0, %rdx
-; EGPR-NDD-NEXT: addq %rax, %rdi
-; EGPR-NDD-NEXT: adcq %rdx, %r8
-; EGPR-NDD-NEXT: setb %r10b
+; EGPR-NDD-NEXT: addq %rax, %rsi
+; EGPR-NDD-NEXT: adcq %rdx, %r9
+; EGPR-NDD-NEXT: setb %r27b
; EGPR-NDD-NEXT: movq %r13, %rax
; EGPR-NDD-NEXT: mulq %r11
; EGPR-NDD-NEXT: movq %rdx, %r28
; EGPR-NDD-NEXT: movq %rax, %r29
-; EGPR-NDD-NEXT: movq %r23, %r14
-; EGPR-NDD-NEXT: movq %r23, %rax
+; EGPR-NDD-NEXT: movq %r16, %rax
; EGPR-NDD-NEXT: mulq %r11
-; EGPR-NDD-NEXT: addq %r28, %rax, %r27
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r28
+; EGPR-NDD-NEXT: addq %rax, %r28
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r12
; EGPR-NDD-NEXT: movq %r13, %rax
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %rax, %r27
-; EGPR-NDD-NEXT: adcq %rdx, %r28
-; EGPR-NDD-NEXT: setb %r15b
-; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %r28, %rax
-; EGPR-NDD-NEXT: movzbl %r15b, %r28d
-; EGPR-NDD-NEXT: adcq %r28, %rdx
-; EGPR-NDD-NEXT: addq %r29, %rdi
-; EGPR-NDD-NEXT: adcq %r27, %r8
-; EGPR-NDD-NEXT: movzbl %r10b, %r10d
-; EGPR-NDD-NEXT: adcq %r10, %rax
+; EGPR-NDD-NEXT: mulq %r17
+; EGPR-NDD-NEXT: addq %rax, %r28
+; EGPR-NDD-NEXT: adcq %rdx, %r12
+; EGPR-NDD-NEXT: setb %bpl
+; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: mulq %r17
+; EGPR-NDD-NEXT: addq %r12, %rax
+; EGPR-NDD-NEXT: movzbl %bpl, %r12d
+; EGPR-NDD-NEXT: adcq %r12, %rdx
+; EGPR-NDD-NEXT: addq %r29, %rsi
+; EGPR-NDD-NEXT: adcq %r28, %r9
+; EGPR-NDD-NEXT: movzbl %r27b, %r27d
+; EGPR-NDD-NEXT: adcq %r27, %rax
; EGPR-NDD-NEXT: adcq $0, %rdx
-; EGPR-NDD-NEXT: addq %r25, %rcx
-; EGPR-NDD-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %r19, %rbx, %rcx
-; EGPR-NDD-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %r30, %r9, %rcx
-; EGPR-NDD-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %r31, %rsi, %rcx
-; EGPR-NDD-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: movzbl %bpl, %ecx
+; EGPR-NDD-NEXT: addq %r25, %r19
+; EGPR-NDD-NEXT: movq %r19, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %rbx, %r30
+; EGPR-NDD-NEXT: movq %r30, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %r31, %r10
+; EGPR-NDD-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: adcq %rdi, %rcx
; EGPR-NDD-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq $0, %r8, %rcx
+; EGPR-NDD-NEXT: movzbl %r8b, %ecx
+; EGPR-NDD-NEXT: adcq %rsi, %rcx
; EGPR-NDD-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq $0, %r9
+; EGPR-NDD-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: adcq $0, %rax
; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rax
-; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq $0, %rdx
+; EGPR-NDD-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: movq 64(%r20), %r28
; EGPR-NDD-NEXT: movq %r24, %rax
; EGPR-NDD-NEXT: mulq %r28
; EGPR-NDD-NEXT: movq %rdx, %r25
; EGPR-NDD-NEXT: movq %rax, %r30
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r23 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r23, %rax
+; EGPR-NDD-NEXT: movq %r14, %rax
; EGPR-NDD-NEXT: mulq %r28
-; EGPR-NDD-NEXT: addq %r25, %rax, %rcx
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rsi
+; EGPR-NDD-NEXT: addq %rax, %r25
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
; EGPR-NDD-NEXT: movq 72(%r20), %r29
; EGPR-NDD-NEXT: movq %r24, %rax
; EGPR-NDD-NEXT: mulq %r29
-; EGPR-NDD-NEXT: addq %rax, %rcx
-; EGPR-NDD-NEXT: adcq %rdx, %rsi
-; EGPR-NDD-NEXT: setb %dil
-; EGPR-NDD-NEXT: movq %r23, %rax
+; EGPR-NDD-NEXT: addq %rax, %r25
+; EGPR-NDD-NEXT: adcq %rdx, %rcx
+; EGPR-NDD-NEXT: setb %sil
+; EGPR-NDD-NEXT: movq %r14, %rax
; EGPR-NDD-NEXT: mulq %r29
-; EGPR-NDD-NEXT: addq %rax, %rsi
-; EGPR-NDD-NEXT: movzbl %dil, %eax
-; EGPR-NDD-NEXT: adcq %rax, %rdx, %rdi
+; EGPR-NDD-NEXT: addq %rax, %rcx
+; EGPR-NDD-NEXT: movzbl %sil, %eax
+; EGPR-NDD-NEXT: adcq %rax, %rdx, %rsi
; EGPR-NDD-NEXT: movq %r22, %rax
; EGPR-NDD-NEXT: mulq %r28
; EGPR-NDD-NEXT: movq %rdx, %r31
; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; EGPR-NDD-NEXT: movq %r21, %rax
; EGPR-NDD-NEXT: mulq %r28
-; EGPR-NDD-NEXT: addq %r31, %rax, %r8
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r9
+; EGPR-NDD-NEXT: addq %rax, %r31
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rdi
; EGPR-NDD-NEXT: movq %r22, %rax
; EGPR-NDD-NEXT: mulq %r29
-; EGPR-NDD-NEXT: addq %r8, %rax
+; EGPR-NDD-NEXT: addq %r31, %rax
; EGPR-NDD-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %rdx, %r9, %r8
-; EGPR-NDD-NEXT: setb %r9b
+; EGPR-NDD-NEXT: adcq %rdx, %rdi
+; EGPR-NDD-NEXT: setb %r8b
; EGPR-NDD-NEXT: movq %r21, %rax
; EGPR-NDD-NEXT: mulq %r29
-; EGPR-NDD-NEXT: addq %r8, %rax
-; EGPR-NDD-NEXT: movzbl %r9b, %r8d
-; EGPR-NDD-NEXT: adcq %r8, %rdx
-; EGPR-NDD-NEXT: addq %rax, %r30, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %rcx
+; EGPR-NDD-NEXT: addq %rdi, %rax
+; EGPR-NDD-NEXT: movzbl %r8b, %edi
+; EGPR-NDD-NEXT: adcq %rdi, %rdx
+; EGPR-NDD-NEXT: addq %rax, %r30, %rdi
+; EGPR-NDD-NEXT: adcq %rdx, %r25
+; EGPR-NDD-NEXT: adcq $0, %rcx
; EGPR-NDD-NEXT: adcq $0, %rsi
-; EGPR-NDD-NEXT: adcq $0, %rdi
-; EGPR-NDD-NEXT: movq 80(%r20), %rbx
+; EGPR-NDD-NEXT: movq 80(%r20), %r8
; EGPR-NDD-NEXT: movq %r22, %rax
-; EGPR-NDD-NEXT: mulq %rbx
+; EGPR-NDD-NEXT: mulq %r8
; EGPR-NDD-NEXT: movq %rdx, %r30
; EGPR-NDD-NEXT: movq %rax, %r31
; EGPR-NDD-NEXT: movq %r21, %rax
-; EGPR-NDD-NEXT: mulq %rbx
-; EGPR-NDD-NEXT: addq %r30, %rax, %r9
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r10
-; EGPR-NDD-NEXT: movq 88(%r20), %r15
+; EGPR-NDD-NEXT: mulq %r8
+; EGPR-NDD-NEXT: addq %rax, %r30
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r9
+; EGPR-NDD-NEXT: movq 88(%r20), %rbx
; EGPR-NDD-NEXT: movq %r22, %rax
-; EGPR-NDD-NEXT: mulq %r15
-; EGPR-NDD-NEXT: addq %rax, %r9
-; EGPR-NDD-NEXT: adcq %rdx, %r10
-; EGPR-NDD-NEXT: setb %r19b
+; EGPR-NDD-NEXT: mulq %rbx
+; EGPR-NDD-NEXT: addq %rax, %r30
+; EGPR-NDD-NEXT: adcq %rdx, %r9
+; EGPR-NDD-NEXT: setb %r10b
; EGPR-NDD-NEXT: movq %r21, %rax
-; EGPR-NDD-NEXT: mulq %r15
-; EGPR-NDD-NEXT: addq %r10, %rax
-; EGPR-NDD-NEXT: movzbl %r19b, %r10d
-; EGPR-NDD-NEXT: adcq %r10, %rdx
-; EGPR-NDD-NEXT: addq %r31, %r8
-; EGPR-NDD-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NDD-NEXT: adcq %r9, %rcx
-; EGPR-NDD-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: mulq %rbx
+; EGPR-NDD-NEXT: addq %r9, %rax
+; EGPR-NDD-NEXT: movzbl %r10b, %r9d
+; EGPR-NDD-NEXT: adcq %r9, %rdx
+; EGPR-NDD-NEXT: addq %r31, %rdi
+; EGPR-NDD-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; EGPR-NDD-NEXT: adcq %r25, %r30, %rbp
; EGPR-NDD-NEXT: adcq $0, %rax
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
-; EGPR-NDD-NEXT: addq %rax, %rsi
-; EGPR-NDD-NEXT: adcq %rdi, %rcx
+; EGPR-NDD-NEXT: adcq $0, %rdx
+; EGPR-NDD-NEXT: addq %rax, %rcx
+; EGPR-NDD-NEXT: adcq %rdx, %rsi
; EGPR-NDD-NEXT: setb %dil
; EGPR-NDD-NEXT: movq %r24, %rax
-; EGPR-NDD-NEXT: mulq %rbx
+; EGPR-NDD-NEXT: mulq %r8
; EGPR-NDD-NEXT: movq %rdx, %r30
; EGPR-NDD-NEXT: movq %rax, %r31
-; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %rbx
-; EGPR-NDD-NEXT: addq %r30, %rax, %r8
+; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: mulq %r8
+; EGPR-NDD-NEXT: addq %rax, %r30
; EGPR-NDD-NEXT: adcq $0, %rdx, %r9
; EGPR-NDD-NEXT: movq %r24, %rax
-; EGPR-NDD-NEXT: mulq %r15
-; EGPR-NDD-NEXT: addq %rax, %r8
+; EGPR-NDD-NEXT: mulq %rbx
+; EGPR-NDD-NEXT: addq %rax, %r30
; EGPR-NDD-NEXT: adcq %rdx, %r9
; EGPR-NDD-NEXT: setb %r10b
-; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %r15
+; EGPR-NDD-NEXT: movq %r14, %rax
+; EGPR-NDD-NEXT: mulq %rbx
; EGPR-NDD-NEXT: addq %r9, %rax
; EGPR-NDD-NEXT: movzbl %r10b, %r9d
; EGPR-NDD-NEXT: adcq %r9, %rdx
-; EGPR-NDD-NEXT: addq %rsi, %r31, %r25
-; EGPR-NDD-NEXT: adcq %rcx, %r8, %r19
-; EGPR-NDD-NEXT: movzbl %dil, %ecx
-; EGPR-NDD-NEXT: adcq %rcx, %rax, %r31
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r12
-; EGPR-NDD-NEXT: imulq %r15, %r26, %rcx
+; EGPR-NDD-NEXT: addq %rcx, %r31, %r25
+; EGPR-NDD-NEXT: adcq %rsi, %r30, %r12
+; EGPR-NDD-NEXT: movzbl %dil, %r19d
+; EGPR-NDD-NEXT: adcq %rax, %r19
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r31
+; EGPR-NDD-NEXT: imulq %r26, %rbx
; EGPR-NDD-NEXT: movq %r26, %rax
-; EGPR-NDD-NEXT: mulq %rbx
+; EGPR-NDD-NEXT: mulq %r8
; EGPR-NDD-NEXT: movq %rax, %r30
-; EGPR-NDD-NEXT: addq %rcx, %rdx, %rax
-; EGPR-NDD-NEXT: imulq %rbx, %r18, %rcx
-; EGPR-NDD-NEXT: addq %rax, %rcx
-; EGPR-NDD-NEXT: imulq %r29, %r11, %rsi
+; EGPR-NDD-NEXT: addq %rbx, %rdx
+; EGPR-NDD-NEXT: imulq %r18, %r8
+; EGPR-NDD-NEXT: addq %rdx, %r8
+; EGPR-NDD-NEXT: imulq %r29, %r11, %rcx
; EGPR-NDD-NEXT: movq %r11, %rax
; EGPR-NDD-NEXT: mulq %r28
-; EGPR-NDD-NEXT: addq %rsi, %rdx
-; EGPR-NDD-NEXT: imulq %r28, %r16, %rsi
-; EGPR-NDD-NEXT: addq %rsi, %rdx
+; EGPR-NDD-NEXT: addq %rdx, %rcx
+; EGPR-NDD-NEXT: imulq %r28, %r17, %r16
+; EGPR-NDD-NEXT: addq %r16, %rcx
; EGPR-NDD-NEXT: addq %r30, %rax, %rsi
-; EGPR-NDD-NEXT: adcq %rcx, %rdx, %rdi
+; EGPR-NDD-NEXT: adcq %rcx, %r8
; EGPR-NDD-NEXT: movq %r28, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: movq %rdx, %r30
@@ -1601,215 +1593,215 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: movq %r29, %rax
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: addq %r30, %rax, %rcx
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r8
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rdi
; EGPR-NDD-NEXT: movq %r28, %rax
; EGPR-NDD-NEXT: mulq %r18
; EGPR-NDD-NEXT: addq %rax, %rcx
-; EGPR-NDD-NEXT: adcq %rdx, %r8
+; EGPR-NDD-NEXT: adcq %rdx, %rdi
; EGPR-NDD-NEXT: setb %r9b
; EGPR-NDD-NEXT: movq %r29, %rax
; EGPR-NDD-NEXT: mulq %r18
-; EGPR-NDD-NEXT: addq %r8, %rax
-; EGPR-NDD-NEXT: movzbl %r9b, %r8d
-; EGPR-NDD-NEXT: adcq %r8, %rdx
+; EGPR-NDD-NEXT: addq %rdi, %rax
+; EGPR-NDD-NEXT: movzbl %r9b, %edi
+; EGPR-NDD-NEXT: adcq %rdi, %rdx
; EGPR-NDD-NEXT: addq %rax, %rsi
-; EGPR-NDD-NEXT: adcq %rdi, %rdx, %r29
+; EGPR-NDD-NEXT: adcq %rdx, %r8
; EGPR-NDD-NEXT: movq 112(%r20), %rdi
; EGPR-NDD-NEXT: movq %r22, %rax
; EGPR-NDD-NEXT: mulq %rdi
; EGPR-NDD-NEXT: movq %rax, %r26
-; EGPR-NDD-NEXT: imulq %rdi, %r21, %rax
-; EGPR-NDD-NEXT: addq %rdx, %rax
-; EGPR-NDD-NEXT: imulq 120(%r20), %r22, %rdx
-; EGPR-NDD-NEXT: addq %rdx, %rax, %r8
+; EGPR-NDD-NEXT: imulq %r21, %rdi
+; EGPR-NDD-NEXT: addq %rdi, %rdx
+; EGPR-NDD-NEXT: imulq 120(%r20), %r22, %rax
+; EGPR-NDD-NEXT: addq %rax, %rdx, %r9
; EGPR-NDD-NEXT: movq 96(%r20), %r28
; EGPR-NDD-NEXT: movq 104(%r20), %rdi
-; EGPR-NDD-NEXT: imulq %rdi, %r24, %r9
+; EGPR-NDD-NEXT: imulq %rdi, %r24, %r10
; EGPR-NDD-NEXT: movq %r24, %rax
; EGPR-NDD-NEXT: mulq %r28
-; EGPR-NDD-NEXT: addq %r9, %rdx
-; EGPR-NDD-NEXT: imulq %r28, %r23, %r9
-; EGPR-NDD-NEXT: addq %r9, %rdx
-; EGPR-NDD-NEXT: addq %r26, %rax, %r9
-; EGPR-NDD-NEXT: adcq %rdx, %r8
+; EGPR-NDD-NEXT: addq %r10, %rdx
+; EGPR-NDD-NEXT: imulq %r28, %r14, %r23
+; EGPR-NDD-NEXT: addq %r23, %rdx
+; EGPR-NDD-NEXT: addq %rax, %r26
+; EGPR-NDD-NEXT: adcq %rdx, %r9
; EGPR-NDD-NEXT: movq %r28, %rax
; EGPR-NDD-NEXT: mulq %r22
; EGPR-NDD-NEXT: movq %rdx, %r23
; EGPR-NDD-NEXT: movq %rax, %r24
; EGPR-NDD-NEXT: movq %rdi, %rax
; EGPR-NDD-NEXT: mulq %r22
-; EGPR-NDD-NEXT: addq %r23, %rax, %r10
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r11
+; EGPR-NDD-NEXT: addq %rax, %r23
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r10
; EGPR-NDD-NEXT: movq %r28, %rax
; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %rax, %r10
-; EGPR-NDD-NEXT: adcq %rdx, %r11
-; EGPR-NDD-NEXT: setb %r16b
+; EGPR-NDD-NEXT: addq %rax, %r23
+; EGPR-NDD-NEXT: adcq %rdx, %r10
+; EGPR-NDD-NEXT: setb %r11b
; EGPR-NDD-NEXT: movq %rdi, %rax
; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %r11, %rax
-; EGPR-NDD-NEXT: movzbl %r16b, %edi
+; EGPR-NDD-NEXT: addq %r10, %rax
+; EGPR-NDD-NEXT: movzbl %r11b, %edi
; EGPR-NDD-NEXT: adcq %rdi, %rdx
-; EGPR-NDD-NEXT: addq %r9, %rax
-; EGPR-NDD-NEXT: adcq %r8, %rdx
-; EGPR-NDD-NEXT: addq %r27, %r24, %rdi
-; EGPR-NDD-NEXT: adcq %r10, %rcx
+; EGPR-NDD-NEXT: addq %r26, %rax
+; EGPR-NDD-NEXT: adcq %r9, %rdx
+; EGPR-NDD-NEXT: addq %r27, %r24
+; EGPR-NDD-NEXT: adcq %r23, %rcx
; EGPR-NDD-NEXT: adcq %rsi, %rax
-; EGPR-NDD-NEXT: adcq %r29, %rdx
-; EGPR-NDD-NEXT: addq %rdi, %r25, %r15
-; EGPR-NDD-NEXT: adcq %rcx, %r19, %rbx
-; EGPR-NDD-NEXT: adcq %rax, %r31, %rbp
-; EGPR-NDD-NEXT: adcq %rdx, %r12, %r30
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r18 # 8-byte Reload
-; EGPR-NDD-NEXT: movq 80(%r18), %r22
+; EGPR-NDD-NEXT: adcq %r8, %rdx
+; EGPR-NDD-NEXT: addq %r24, %r25, %rbx
+; EGPR-NDD-NEXT: adcq %rcx, %r12
+; EGPR-NDD-NEXT: adcq %rax, %r19, %r13
+; EGPR-NDD-NEXT: adcq %rdx, %r31, %r30
+; EGPR-NDD-NEXT: movq 80(%r15), %r22
; EGPR-NDD-NEXT: movq %r22, %rax
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r21 # 8-byte Reload
-; EGPR-NDD-NEXT: mulq %r21
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
+; EGPR-NDD-NEXT: mulq %r16
; EGPR-NDD-NEXT: movq %rax, %r26
; EGPR-NDD-NEXT: movq %rdx, %rdi
-; EGPR-NDD-NEXT: movq 88(%r18), %r20
+; EGPR-NDD-NEXT: movq 88(%r15), %r20
; EGPR-NDD-NEXT: movq %r20, %rax
-; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %rdi, %rax, %rcx
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rsi
+; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: addq %rax, %rdi
+; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
; EGPR-NDD-NEXT: movq %r22, %rax
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
-; EGPR-NDD-NEXT: mulq %r12
-; EGPR-NDD-NEXT: addq %rax, %rcx
-; EGPR-NDD-NEXT: adcq %rdx, %rsi
-; EGPR-NDD-NEXT: setb %dil
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r21 # 8-byte Reload
+; EGPR-NDD-NEXT: mulq %r21
+; EGPR-NDD-NEXT: addq %rax, %rdi
+; EGPR-NDD-NEXT: adcq %rdx, %rcx
+; EGPR-NDD-NEXT: setb %sil
; EGPR-NDD-NEXT: movq %r20, %rax
-; EGPR-NDD-NEXT: mulq %r12
-; EGPR-NDD-NEXT: addq %rax, %rsi
-; EGPR-NDD-NEXT: movzbl %dil, %eax
-; EGPR-NDD-NEXT: adcq %rax, %rdx, %rdi
-; EGPR-NDD-NEXT: movq 64(%r18), %r24
-; EGPR-NDD-NEXT: movq %r24, %rax
; EGPR-NDD-NEXT: mulq %r21
+; EGPR-NDD-NEXT: addq %rax, %rcx
+; EGPR-NDD-NEXT: movzbl %sil, %eax
+; EGPR-NDD-NEXT: adcq %rax, %rdx, %rsi
+; EGPR-NDD-NEXT: movq 64(%r15), %r24
+; EGPR-NDD-NEXT: movq %r24, %rax
+; EGPR-NDD-NEXT: mulq %r16
; EGPR-NDD-NEXT: movq %rax, %r29
; EGPR-NDD-NEXT: movq %rdx, %r27
-; EGPR-NDD-NEXT: movq 72(%r18), %r23
+; EGPR-NDD-NEXT: movq 72(%r15), %r23
; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %r21
-; EGPR-NDD-NEXT: addq %r27, %rax, %r8
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r9
+; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: addq %rax, %r27
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r8
; EGPR-NDD-NEXT: movq %r24, %rax
-; EGPR-NDD-NEXT: mulq %r12
-; EGPR-NDD-NEXT: addq %r8, %rax, %r31
-; EGPR-NDD-NEXT: adcq %rdx, %r9, %r8
+; EGPR-NDD-NEXT: mulq %r21
+; EGPR-NDD-NEXT: addq %r27, %rax, %r31
+; EGPR-NDD-NEXT: adcq %rdx, %r8
; EGPR-NDD-NEXT: setb %r9b
; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %r12
+; EGPR-NDD-NEXT: mulq %r21
; EGPR-NDD-NEXT: addq %r8, %rax
; EGPR-NDD-NEXT: movzbl %r9b, %r8d
; EGPR-NDD-NEXT: adcq %r8, %rdx
-; EGPR-NDD-NEXT: addq %rax, %r26, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %rcx
+; EGPR-NDD-NEXT: addq %rax, %r26, %r28
+; EGPR-NDD-NEXT: adcq %rdx, %rdi
+; EGPR-NDD-NEXT: adcq $0, %rcx
; EGPR-NDD-NEXT: adcq $0, %rsi
-; EGPR-NDD-NEXT: adcq $0, %rdi
; EGPR-NDD-NEXT: movq %r24, %rax
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; EGPR-NDD-NEXT: mulq %r10
; EGPR-NDD-NEXT: movq %rdx, %r26
; EGPR-NDD-NEXT: movq %rax, %r27
; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %r26, %rax, %r9
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r10
+; EGPR-NDD-NEXT: mulq %r10
+; EGPR-NDD-NEXT: addq %rax, %r26
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r8
; EGPR-NDD-NEXT: movq %r24, %rax
-; EGPR-NDD-NEXT: mulq %r17
-; EGPR-NDD-NEXT: addq %rax, %r9
-; EGPR-NDD-NEXT: adcq %rdx, %r10
-; EGPR-NDD-NEXT: setb %r11b
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; EGPR-NDD-NEXT: mulq %r11
+; EGPR-NDD-NEXT: addq %r26, %rax, %r25
+; EGPR-NDD-NEXT: adcq %rdx, %r8
+; EGPR-NDD-NEXT: setb %r9b
; EGPR-NDD-NEXT: movq %r23, %rax
-; EGPR-NDD-NEXT: mulq %r17
-; EGPR-NDD-NEXT: addq %r10, %rax
-; EGPR-NDD-NEXT: movzbl %r11b, %r10d
-; EGPR-NDD-NEXT: adcq %r10, %rdx
-; EGPR-NDD-NEXT: addq %r8, %r27, %r28
-; EGPR-NDD-NEXT: adcq %rcx, %r9, %r25
+; EGPR-NDD-NEXT: mulq %r11
+; EGPR-NDD-NEXT: addq %r8, %rax
+; EGPR-NDD-NEXT: movzbl %r9b, %r8d
+; EGPR-NDD-NEXT: adcq %r8, %rdx
+; EGPR-NDD-NEXT: addq %r27, %r28
+; EGPR-NDD-NEXT: adcq %rdi, %r25
; EGPR-NDD-NEXT: adcq $0, %rax
-; EGPR-NDD-NEXT: adcq $0, %rdx, %rcx
-; EGPR-NDD-NEXT: addq %rax, %rsi
-; EGPR-NDD-NEXT: adcq %rdi, %rcx
+; EGPR-NDD-NEXT: adcq $0, %rdx
+; EGPR-NDD-NEXT: addq %rax, %rcx
+; EGPR-NDD-NEXT: adcq %rdx, %rsi
; EGPR-NDD-NEXT: setb %dil
; EGPR-NDD-NEXT: movq %r22, %rax
-; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: mulq %r10
; EGPR-NDD-NEXT: movq %rdx, %r26
; EGPR-NDD-NEXT: movq %rax, %r27
; EGPR-NDD-NEXT: movq %r20, %rax
-; EGPR-NDD-NEXT: mulq %r16
-; EGPR-NDD-NEXT: addq %r26, %rax, %r8
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r9
+; EGPR-NDD-NEXT: mulq %r10
+; EGPR-NDD-NEXT: addq %rax, %r26
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r8
; EGPR-NDD-NEXT: movq %r22, %rax
-; EGPR-NDD-NEXT: mulq %r17
-; EGPR-NDD-NEXT: addq %rax, %r8
-; EGPR-NDD-NEXT: adcq %rdx, %r9
-; EGPR-NDD-NEXT: setb %r10b
+; EGPR-NDD-NEXT: mulq %r11
+; EGPR-NDD-NEXT: addq %r26, %rax, %r19
+; EGPR-NDD-NEXT: adcq %rdx, %r8
+; EGPR-NDD-NEXT: setb %r9b
; EGPR-NDD-NEXT: movq %r20, %rax
-; EGPR-NDD-NEXT: mulq %r17
-; EGPR-NDD-NEXT: addq %r9, %rax
-; EGPR-NDD-NEXT: movzbl %r10b, %r9d
-; EGPR-NDD-NEXT: adcq %r9, %rdx
-; EGPR-NDD-NEXT: addq %rsi, %r27
-; EGPR-NDD-NEXT: adcq %rcx, %r8, %r19
+; EGPR-NDD-NEXT: mulq %r11
+; EGPR-NDD-NEXT: addq %r8, %rax
+; EGPR-NDD-NEXT: movzbl %r9b, %r8d
+; EGPR-NDD-NEXT: adcq %r8, %rdx
+; EGPR-NDD-NEXT: addq %rcx, %r27
+; EGPR-NDD-NEXT: adcq %rsi, %r19
; EGPR-NDD-NEXT: movzbl %dil, %ecx
; EGPR-NDD-NEXT: adcq %rax, %rcx
; EGPR-NDD-NEXT: adcq $0, %rdx, %rdi
-; EGPR-NDD-NEXT: movq %r18, %r9
-; EGPR-NDD-NEXT: movq 96(%r18), %r26
-; EGPR-NDD-NEXT: imulq %r17, %r26, %rsi
+; EGPR-NDD-NEXT: movq 96(%r15), %r26
+; EGPR-NDD-NEXT: imulq %r11, %r26, %rsi
; EGPR-NDD-NEXT: movq %r26, %rax
-; EGPR-NDD-NEXT: mulq %r16
+; EGPR-NDD-NEXT: mulq %r10
; EGPR-NDD-NEXT: movq %rax, %r18
-; EGPR-NDD-NEXT: addq %rsi, %rdx, %rax
-; EGPR-NDD-NEXT: movq 104(%r9), %r8
-; EGPR-NDD-NEXT: imulq %r16, %r8, %rdx
-; EGPR-NDD-NEXT: addq %rdx, %rax, %rsi
-; EGPR-NDD-NEXT: movq 112(%r9), %rax
-; EGPR-NDD-NEXT: movq %r9, %r11
-; EGPR-NDD-NEXT: imulq %r12, %rax, %r9
-; EGPR-NDD-NEXT: mulq %r21
+; EGPR-NDD-NEXT: addq %rsi, %rdx
+; EGPR-NDD-NEXT: movq 104(%r15), %r8
+; EGPR-NDD-NEXT: imulq %r10, %r8, %rax
+; EGPR-NDD-NEXT: addq %rax, %rdx, %rsi
+; EGPR-NDD-NEXT: movq 112(%r15), %rax
+; EGPR-NDD-NEXT: imulq %r21, %rax, %r9
+; EGPR-NDD-NEXT: mulq %r16
; EGPR-NDD-NEXT: addq %r9, %rdx
-; EGPR-NDD-NEXT: imulq 120(%r11), %r21, %r9
+; EGPR-NDD-NEXT: imulq 120(%r15), %r16, %r9
; EGPR-NDD-NEXT: addq %r9, %rdx
-; EGPR-NDD-NEXT: addq %r18, %rax, %r9
-; EGPR-NDD-NEXT: adcq %rsi, %rdx, %r16
-; EGPR-NDD-NEXT: movq %r21, %rax
+; EGPR-NDD-NEXT: addq %r18, %rax, %r10
+; EGPR-NDD-NEXT: adcq %rsi, %rdx, %r9
+; EGPR-NDD-NEXT: movq %r16, %rax
+; EGPR-NDD-NEXT: movq %r16, %r18
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: movq %rdx, %r17
; EGPR-NDD-NEXT: movq %rax, %rsi
-; EGPR-NDD-NEXT: movq %r12, %rax
-; EGPR-NDD-NEXT: mulq %r26
-; EGPR-NDD-NEXT: addq %r17, %rax, %r10
-; EGPR-NDD-NEXT: adcq $0, %rdx, %r17
; EGPR-NDD-NEXT: movq %r21, %rax
+; EGPR-NDD-NEXT: mulq %r26
+; EGPR-NDD-NEXT: addq %r17, %rax, %r11
+; EGPR-NDD-NEXT: adcq $0, %rdx, %r16
+; EGPR-NDD-NEXT: movq %r18, %rax
; EGPR-NDD-NEXT: mulq %r8
-; EGPR-NDD-NEXT: addq %r10, %rax, %r11
-; EGPR-NDD-NEXT: adcq %rdx, %r17, %r10
+; EGPR-NDD-NEXT: addq %rax, %r11
+; EGPR-NDD-NEXT: adcq %rdx, %r16
; EGPR-NDD-NEXT: setb %r17b
-; EGPR-NDD-NEXT: movq %r12, %rax
+; EGPR-NDD-NEXT: movq %r21, %rax
; EGPR-NDD-NEXT: mulq %r8
-; EGPR-NDD-NEXT: addq %r10, %rax
+; EGPR-NDD-NEXT: addq %r16, %rax
; EGPR-NDD-NEXT: movzbl %r17b, %r8d
; EGPR-NDD-NEXT: adcq %r8, %rdx
-; EGPR-NDD-NEXT: addq %r9, %rax, %r10
-; EGPR-NDD-NEXT: adcq %r16, %rdx, %r17
-; EGPR-NDD-NEXT: imulq %r14, %r24, %r8
+; EGPR-NDD-NEXT: addq %rax, %r10
+; EGPR-NDD-NEXT: adcq %r9, %rdx, %r17
+; EGPR-NDD-NEXT: imulq {{[-0-9]+}}(%r{{[sb]}}p), %r24, %r8 # 8-byte Folded Reload
; EGPR-NDD-NEXT: movq %r24, %rax
-; EGPR-NDD-NEXT: mulq %r13
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
+; EGPR-NDD-NEXT: mulq %r16
; EGPR-NDD-NEXT: movq %rax, %r9
-; EGPR-NDD-NEXT: addq %r8, %rdx, %rax
-; EGPR-NDD-NEXT: imulq %r13, %r23, %rdx
-; EGPR-NDD-NEXT: addq %rdx, %rax, %r8
+; EGPR-NDD-NEXT: addq %r8, %rdx
+; EGPR-NDD-NEXT: imulq %r16, %r23, %rax
+; EGPR-NDD-NEXT: addq %rax, %rdx, %r8
; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r21 # 8-byte Reload
; EGPR-NDD-NEXT: imulq %r21, %r22, %r16
; EGPR-NDD-NEXT: movq %r22, %rax
; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r26 # 8-byte Reload
; EGPR-NDD-NEXT: mulq %r26
; EGPR-NDD-NEXT: addq %r16, %rdx
-; EGPR-NDD-NEXT: imulq %r26, %r20, %r16
-; EGPR-NDD-NEXT: addq %r16, %rdx
+; EGPR-NDD-NEXT: imulq %r26, %r20
+; EGPR-NDD-NEXT: addq %r20, %rdx
; EGPR-NDD-NEXT: addq %r9, %rax, %r16
; EGPR-NDD-NEXT: adcq %r8, %rdx, %r18
; EGPR-NDD-NEXT: movq %r26, %rax
@@ -1840,49 +1832,49 @@ define void @test_1024(ptr %a, ptr %b, ptr %out) nounwind {
; EGPR-NDD-NEXT: addq %r27, %rsi
; EGPR-NDD-NEXT: adcq %r19, %r8
; EGPR-NDD-NEXT: adcq %rcx, %rax
-; EGPR-NDD-NEXT: adcq %rdx, %rdi, %rcx
-; EGPR-NDD-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r29, %rdx # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r31, %rdi # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r28, %r9 # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r25, %r10 # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %r15, %rsi
-; EGPR-NDD-NEXT: adcq %rbx, %r8
-; EGPR-NDD-NEXT: adcq %rbp, %rax
-; EGPR-NDD-NEXT: adcq %r30, %rcx
-; EGPR-NDD-NEXT: addq %rdx, {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %rdi, {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %r9, {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %r10, {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %rsi, {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %r8, (%rsp), %r8 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %rdi, %rdx
+; EGPR-NDD-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r29 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r31 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r28 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %rbp, %r25
+; EGPR-NDD-NEXT: adcq %rbx, %rsi
+; EGPR-NDD-NEXT: adcq %r12, %r8
+; EGPR-NDD-NEXT: adcq %r13, %rax
+; EGPR-NDD-NEXT: adcq %r30, %rdx
+; EGPR-NDD-NEXT: addq %r29, {{[-0-9]+}}(%r{{[sb]}}p), %r29 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %r31, {{[-0-9]+}}(%r{{[sb]}}p), %r31 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %r28, {{[-0-9]+}}(%r{{[sb]}}p), %r28 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %r25, {{[-0-9]+}}(%r{{[sb]}}p), %r25 # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %rsi, (%rsp), %rsi # 8-byte Folded Reload
+; EGPR-NDD-NEXT: adcq %r8, {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
; EGPR-NDD-NEXT: adcq %rax, {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; EGPR-NDD-NEXT: adcq %rcx, {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, (%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 8(%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 16(%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 24(%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 32(%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 40(%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 48(%r11)
-; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r16 # 8-byte Reload
-; EGPR-NDD-NEXT: movq %r16, 56(%r11)
-; EGPR-NDD-NEXT: movq %rdx, 64(%r11)
-; EGPR-NDD-NEXT: movq %rdi, 72(%r11)
-; EGPR-NDD-NEXT: movq %r9, 80(%r11)
-; EGPR-NDD-NEXT: movq %r10, 88(%r11)
-; EGPR-NDD-NEXT: movq %rsi, 96(%r11)
-; EGPR-NDD-NEXT: movq %r8, 104(%r11)
-; EGPR-NDD-NEXT: movq %rax, 112(%r11)
-; EGPR-NDD-NEXT: movq %rcx, 120(%r11)
-; EGPR-NDD-NEXT: addq $104, %rsp
+; EGPR-NDD-NEXT: adcq %rdx, {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, (%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 8(%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 16(%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 24(%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 32(%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 40(%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 48(%rcx)
+; EGPR-NDD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; EGPR-NDD-NEXT: movq %rdi, 56(%rcx)
+; EGPR-NDD-NEXT: movq %r29, 64(%rcx)
+; EGPR-NDD-NEXT: movq %r31, 72(%rcx)
+; EGPR-NDD-NEXT: movq %r28, 80(%rcx)
+; EGPR-NDD-NEXT: movq %r25, 88(%rcx)
+; EGPR-NDD-NEXT: movq %rsi, 96(%rcx)
+; EGPR-NDD-NEXT: movq %r8, 104(%rcx)
+; EGPR-NDD-NEXT: movq %rax, 112(%rcx)
+; EGPR-NDD-NEXT: movq %rdx, 120(%rcx)
+; EGPR-NDD-NEXT: addq $96, %rsp
; EGPR-NDD-NEXT: popq %rbx
; EGPR-NDD-NEXT: popq %r12
; EGPR-NDD-NEXT: popq %r13
diff --git a/llvm/test/CodeGen/X86/apx/or.ll b/llvm/test/CodeGen/X86/apx/or.ll
index 6a3db29..e51ba9d 100644
--- a/llvm/test/CodeGen/X86/apx/or.ll
+++ b/llvm/test/CodeGen/X86/apx/or.ll
@@ -478,17 +478,17 @@ define i1 @orflag16rr(i16 %a, i16 %b) {
define i1 @orflag32rr(i32 %a, i32 %b) {
; CHECK-LABEL: orflag32rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: orl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x09,0xf7]
+; CHECK-NEXT: orl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x09,0xfe]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag32rr:
; NF: # %bb.0:
-; NF-NEXT: orl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x09,0xf7]
+; NF-NEXT: orl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x09,0xfe]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = or i32 %a, %b ; 0xff << 50
@@ -500,17 +500,17 @@ define i1 @orflag32rr(i32 %a, i32 %b) {
define i1 @orflag64rr(i64 %a, i64 %b) {
; CHECK-LABEL: orflag64rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: orq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x09,0xf7]
+; CHECK-NEXT: orq %rdi, %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x09,0xfe]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag64rr:
; NF: # %bb.0:
-; NF-NEXT: orq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x09,0xf7]
+; NF-NEXT: orq %rdi, %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x09,0xfe]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = or i64 %a, %b ; 0xff << 50
@@ -574,17 +574,17 @@ define i1 @orflag16rm(ptr %ptr, i16 %b) {
define i1 @orflag32rm(ptr %ptr, i32 %b) {
; CHECK-LABEL: orflag32rm:
; CHECK: # %bb.0:
-; CHECK-NEXT: orl (%rdi), %esi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x0b,0x37]
+; CHECK-NEXT: orl (%rdi), %esi # EVEX TO LEGACY Compression encoding: [0x0b,0x37]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag32rm:
; NF: # %bb.0:
-; NF-NEXT: orl (%rdi), %esi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x0b,0x37]
+; NF-NEXT: orl (%rdi), %esi # EVEX TO LEGACY Compression encoding: [0x0b,0x37]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%a = load i32, ptr %ptr
@@ -597,17 +597,17 @@ define i1 @orflag32rm(ptr %ptr, i32 %b) {
define i1 @orflag64rm(ptr %ptr, i64 %b) {
; CHECK-LABEL: orflag64rm:
; CHECK: # %bb.0:
-; CHECK-NEXT: orq (%rdi), %rsi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x0b,0x37]
+; CHECK-NEXT: orq (%rdi), %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x0b,0x37]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag64rm:
; NF: # %bb.0:
-; NF-NEXT: orq (%rdi), %rsi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x0b,0x37]
+; NF-NEXT: orq (%rdi), %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x0b,0x37]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%a = load i64, ptr %ptr
@@ -668,19 +668,19 @@ define i1 @orflag16ri(i16 %a) {
define i1 @orflag32ri(i32 %a) {
; CHECK-LABEL: orflag32ri:
; CHECK: # %bb.0:
-; CHECK-NEXT: orl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xcf,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: orl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xcf,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag32ri:
; NF: # %bb.0:
-; NF-NEXT: orl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xcf,0x40,0xe2,0x01,0x00]
+; NF-NEXT: orl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xcf,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = or i32 %a, 123456 ; 0xff << 50
@@ -692,19 +692,19 @@ define i1 @orflag32ri(i32 %a) {
define i1 @orflag64ri(i64 %a) {
; CHECK-LABEL: orflag64ri:
; CHECK: # %bb.0:
-; CHECK-NEXT: orq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xcf,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: orq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xcf,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag64ri:
; NF: # %bb.0:
-; NF-NEXT: orq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xcf,0x40,0xe2,0x01,0x00]
+; NF-NEXT: orq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xcf,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = or i64 %a, 123456 ; 0xff << 50
@@ -739,17 +739,17 @@ define i1 @orflag16ri8(i16 %a) {
define i1 @orflag32ri8(i32 %a) {
; CHECK-LABEL: orflag32ri8:
; CHECK: # %bb.0:
-; CHECK-NEXT: orl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xcf,0x7b]
+; CHECK-NEXT: orl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xcf,0x7b]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag32ri8:
; NF: # %bb.0:
-; NF-NEXT: orl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xcf,0x7b]
+; NF-NEXT: orl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xcf,0x7b]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = or i32 %a, 123 ; 0xff << 50
@@ -761,17 +761,17 @@ define i1 @orflag32ri8(i32 %a) {
define i1 @orflag64ri8(i64 %a) {
; CHECK-LABEL: orflag64ri8:
; CHECK: # %bb.0:
-; CHECK-NEXT: orq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xcf,0x7b]
+; CHECK-NEXT: orq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xcf,0x7b]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: orflag64ri8:
; NF: # %bb.0:
-; NF-NEXT: orq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xcf,0x7b]
+; NF-NEXT: orq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xcf,0x7b]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = or i64 %a, 123 ; 0xff << 50
diff --git a/llvm/test/CodeGen/X86/apx/push2-pop2-vector-register.ll b/llvm/test/CodeGen/X86/apx/push2-pop2-vector-register.ll
index aa5c54d..f20c4c1 100644
--- a/llvm/test/CodeGen/X86/apx/push2-pop2-vector-register.ll
+++ b/llvm/test/CodeGen/X86/apx/push2-pop2-vector-register.ll
@@ -43,8 +43,12 @@ define void @widget(float %arg) nounwind {
; FRAME-NEXT: xorl %r8d, %r8d
; FRAME-NEXT: callq *%rsi
; FRAME-NEXT: movss %xmm6, 0
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; FRAME-NEXT: addq $48, %rsp
; FRAME-NEXT: pop2 %r15, %rsi
diff --git a/llvm/test/CodeGen/X86/apx/push2-pop2.ll b/llvm/test/CodeGen/X86/apx/push2-pop2.ll
index 25139f1..6bd9f52 100644
--- a/llvm/test/CodeGen/X86/apx/push2-pop2.ll
+++ b/llvm/test/CodeGen/X86/apx/push2-pop2.ll
@@ -24,8 +24,12 @@ define void @csr1() nounwind {
; FRAME: # %bb.0: # %entry
; FRAME-NEXT: pushq %rbp
; FRAME-NEXT: movq %rsp, %rbp
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: popq %rbp
; FRAME-NEXT: retq
entry:
@@ -59,8 +63,12 @@ define void @csr2() nounwind {
; FRAME-NEXT: pushq %rbp
; FRAME-NEXT: movq %rsp, %rbp
; FRAME-NEXT: pushq %r15
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: popq %r15
; FRAME-NEXT: popq %rbp
; FRAME-NEXT: retq
@@ -95,8 +103,12 @@ define void @csr3() nounwind {
; FRAME-NEXT: pushq %rbp
; FRAME-NEXT: movq %rsp, %rbp
; FRAME-NEXT: push2 %r14, %r15
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: pop2 %r15, %r14
; FRAME-NEXT: popq %rbp
; FRAME-NEXT: retq
@@ -136,8 +148,12 @@ define void @csr4() nounwind {
; FRAME-NEXT: movq %rsp, %rbp
; FRAME-NEXT: push2 %r14, %r15
; FRAME-NEXT: pushq %r13
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: popq %r13
; FRAME-NEXT: pop2 %r15, %r14
; FRAME-NEXT: popq %rbp
@@ -178,8 +194,12 @@ define void @csr5() nounwind {
; FRAME-NEXT: movq %rsp, %rbp
; FRAME-NEXT: push2 %r14, %r15
; FRAME-NEXT: push2 %r12, %r13
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: pop2 %r13, %r12
; FRAME-NEXT: pop2 %r15, %r14
; FRAME-NEXT: popq %rbp
@@ -225,8 +245,12 @@ define void @csr6() nounwind {
; FRAME-NEXT: push2 %r14, %r15
; FRAME-NEXT: push2 %r12, %r13
; FRAME-NEXT: pushq %rbx
+; FRAME-NEXT: pushq %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popq %rbp
; FRAME-NEXT: popq %rbx
; FRAME-NEXT: pop2 %r13, %r12
; FRAME-NEXT: pop2 %r15, %r14
diff --git a/llvm/test/CodeGen/X86/apx/pushp-popp.ll b/llvm/test/CodeGen/X86/apx/pushp-popp.ll
index ad4306f..625e70b 100644
--- a/llvm/test/CodeGen/X86/apx/pushp-popp.ll
+++ b/llvm/test/CodeGen/X86/apx/pushp-popp.ll
@@ -18,8 +18,12 @@ define void @csr2() nounwind {
; FRAME-NEXT: pushp %rbp
; FRAME-NEXT: movq %rsp, %rbp
; FRAME-NEXT: pushp %r15
+; FRAME-NEXT: pushp %rbp
+; FRAME-NEXT: pushq %rax
; FRAME-NEXT: #APP
; FRAME-NEXT: #NO_APP
+; FRAME-NEXT: popq %rax
+; FRAME-NEXT: popp %rbp
; FRAME-NEXT: popp %r15
; FRAME-NEXT: popp %rbp
; FRAME-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/apx/shift-eflags.ll b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
index 5da5090..2659f80 100644
--- a/llvm/test/CodeGen/X86/apx/shift-eflags.ll
+++ b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
@@ -7,7 +7,7 @@
define i32 @ashr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: ashr_const:
; CHECK: # %bb.0:
-; CHECK-NEXT: sarl $14, %edi, %eax
+; CHECK-NEXT: sarl $14, %edi
; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, 14
@@ -85,7 +85,7 @@ define i32 @shl_const_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
define i32 @ashr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: ashr_const1:
; CHECK: # %bb.0:
-; CHECK-NEXT: sarl %edi, %eax
+; CHECK-NEXT: sarl %edi
; CHECK-NEXT: cmovel %edx, %ecx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, 1
@@ -166,8 +166,8 @@ define i32 @ashr_var(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %edi, %ecx
-; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: sarl %cl, %edi
+; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%s = ashr i32 %a0, %a1
@@ -183,8 +183,8 @@ define i32 @lshr_var(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %edi, %ecx
-; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: shrl %cl, %edi
+; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%s = lshr i32 %a0, %a1
@@ -200,8 +200,8 @@ define i32 @shl_var(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %edi, %ecx
-; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: shll %cl, %edi
+; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%s = shl i32 %a0, %a1
@@ -264,8 +264,8 @@ define i32 @ashr_var_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK: # %bb.0:
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: orb $1, %sil, %cl
-; CHECK-NEXT: sarl %cl, %edi, %ecx
-; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: sarl %cl, %edi
+; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%a = or i32 %a1, 1
@@ -281,8 +281,8 @@ define i32 @lshr_var_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK: # %bb.0:
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: orb $1, %sil, %cl
-; CHECK-NEXT: shrl %cl, %edi, %ecx
-; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: shrl %cl, %edi
+; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%a = or i32 %a1, 1
@@ -298,8 +298,8 @@ define i32 @shl_var_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK: # %bb.0:
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: orb $1, %sil, %cl
-; CHECK-NEXT: shll %cl, %edi, %ecx
-; CHECK-NEXT: testl %ecx, %ecx
+; CHECK-NEXT: shll %cl, %edi
+; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%a = or i32 %a1, 1
diff --git a/llvm/test/CodeGen/X86/apx/sub.ll b/llvm/test/CodeGen/X86/apx/sub.ll
index 75d7055..9519fab 100644
--- a/llvm/test/CodeGen/X86/apx/sub.ll
+++ b/llvm/test/CodeGen/X86/apx/sub.ll
@@ -451,16 +451,16 @@ define i16 @subflag16rr(i16 noundef %a, i16 noundef %b) {
; CHECK-LABEL: subflag16rr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subw %si, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: subw %si, %di # EVEX TO LEGACY Compression encoding: [0x66,0x29,0xf7]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag16rr:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subw %si, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x29,0xf7]
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: subw %si, %di # EVEX TO LEGACY Compression encoding: [0x66,0x29,0xf7]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: # kill: def $ax killed $ax killed $eax
; NF-NEXT: retq # encoding: [0xc3]
entry:
@@ -472,15 +472,15 @@ define i32 @subflag32rr(i32 noundef %a, i32 noundef %b) {
; CHECK-LABEL: subflag32rr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: subl %esi, %edi # EVEX TO LEGACY Compression encoding: [0x29,0xf7]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag32rr:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x29,0xf7]
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: subl %esi, %edi # EVEX TO LEGACY Compression encoding: [0x29,0xf7]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 %b)
@@ -491,15 +491,15 @@ define i64 @subflag64rr(i64 noundef %a, i64 noundef %b) {
; CHECK-LABEL: subflag64rr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x29,0xf7]
-; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: subq %rsi, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x29,0xf7]
+; CHECK-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag64rr:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x29,0xf7]
-; NF-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; NF-NEXT: subq %rsi, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x29,0xf7]
+; NF-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 %b)
@@ -534,16 +534,16 @@ define i16 @subflag16rm(i16 noundef %a, ptr %b) {
; CHECK-LABEL: subflag16rm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subw (%rsi), %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: subw (%rsi), %di # EVEX TO LEGACY Compression encoding: [0x66,0x2b,0x3e]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag16rm:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subw (%rsi), %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x2b,0x3e]
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: subw (%rsi), %di # EVEX TO LEGACY Compression encoding: [0x66,0x2b,0x3e]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: # kill: def $ax killed $ax killed $eax
; NF-NEXT: retq # encoding: [0xc3]
entry:
@@ -556,15 +556,15 @@ define i32 @subflag32rm(i32 noundef %a, ptr %b) {
; CHECK-LABEL: subflag32rm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subl (%rsi), %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: subl (%rsi), %edi # EVEX TO LEGACY Compression encoding: [0x2b,0x3e]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag32rm:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subl (%rsi), %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x2b,0x3e]
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: subl (%rsi), %edi # EVEX TO LEGACY Compression encoding: [0x2b,0x3e]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%t = load i32, ptr %b
@@ -576,15 +576,15 @@ define i64 @subflag64rm(i64 noundef %a, ptr %b) {
; CHECK-LABEL: subflag64rm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subq (%rsi), %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x2b,0x3e]
-; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: subq (%rsi), %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x2b,0x3e]
+; CHECK-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag64rm:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subq (%rsi), %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x2b,0x3e]
-; NF-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; NF-NEXT: subq (%rsi), %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x2b,0x3e]
+; NF-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%t = load i64, ptr %b
@@ -596,16 +596,16 @@ define i16 @subflag16ri8(i16 noundef %a) {
; CHECK-LABEL: subflag16ri8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subw $123, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: subw $123, %di # EVEX TO LEGACY Compression encoding: [0x66,0x83,0xef,0x7b]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag16ri8:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subw $123, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xef,0x7b]
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: subw $123, %di # EVEX TO LEGACY Compression encoding: [0x66,0x83,0xef,0x7b]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: # kill: def $ax killed $ax killed $eax
; NF-NEXT: retq # encoding: [0xc3]
entry:
@@ -617,15 +617,15 @@ define i32 @subflag32ri8(i32 noundef %a) {
; CHECK-LABEL: subflag32ri8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: subl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xef,0x7b]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag32ri8:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xef,0x7b]
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: subl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xef,0x7b]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 123)
@@ -636,15 +636,15 @@ define i64 @subflag64ri8(i64 noundef %a) {
; CHECK-LABEL: subflag64ri8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xef,0x7b]
-; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: subq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xef,0x7b]
+; CHECK-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag64ri8:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xef,0x7b]
-; NF-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; NF-NEXT: subq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xef,0x7b]
+; NF-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 123)
@@ -678,18 +678,18 @@ define i16 @subflag16ri(i16 noundef %a) {
; CHECK-LABEL: subflag16ri:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subw $1234, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x81,0xef,0xd2,0x04]
+; CHECK-NEXT: subw $1234, %di # EVEX TO LEGACY Compression encoding: [0x66,0x81,0xef,0xd2,0x04]
; CHECK-NEXT: # imm = 0x4D2
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag16ri:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subw $1234, %di, %cx # encoding: [0x62,0xf4,0x75,0x18,0x81,0xef,0xd2,0x04]
+; NF-NEXT: subw $1234, %di # EVEX TO LEGACY Compression encoding: [0x66,0x81,0xef,0xd2,0x04]
; NF-NEXT: # imm = 0x4D2
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: # kill: def $ax killed $ax killed $eax
; NF-NEXT: retq # encoding: [0xc3]
entry:
@@ -701,17 +701,17 @@ define i32 @subflag32ri(i32 noundef %a) {
; CHECK-LABEL: subflag32ri:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: subl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xef,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag32ri:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
+; NF-NEXT: subl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xef,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
-; NF-NEXT: cmovael %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc1]
+; NF-NEXT: cmovael %edi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i32 @llvm.usub.sat.i32(i32 %a, i32 123456)
@@ -722,17 +722,17 @@ define i64 @subflag64ri(i64 noundef %a) {
; CHECK-LABEL: subflag64ri:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK-NEXT: subq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: subq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xef,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
-; CHECK-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; CHECK-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: subflag64ri:
; NF: # %bb.0: # %entry
; NF-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NF-NEXT: subq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xef,0x40,0xe2,0x01,0x00]
+; NF-NEXT: subq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xef,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
-; NF-NEXT: cmovaeq %rcx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc1]
+; NF-NEXT: cmovaeq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x43,0xc7]
; NF-NEXT: retq # encoding: [0xc3]
entry:
%sub = call i64 @llvm.usub.sat.i64(i64 %a, i64 123456)
diff --git a/llvm/test/CodeGen/X86/apx/xor.ll b/llvm/test/CodeGen/X86/apx/xor.ll
index 3426f9c..d908849e 100644
--- a/llvm/test/CodeGen/X86/apx/xor.ll
+++ b/llvm/test/CodeGen/X86/apx/xor.ll
@@ -428,8 +428,8 @@ entry:
define i1 @xorflag8rr(i8 %a, i8 %b) {
; CHECK-LABEL: xorflag8rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %edi, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x31,0xfe]
-; CHECK-NEXT: xorb $-1, %al, %cl # encoding: [0x62,0xf4,0x74,0x18,0x80,0xf0,0xff]
+; CHECK-NEXT: xorl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x31,0xfe]
+; CHECK-NEXT: xorb $-1, %sil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x80,0xf6,0xff]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; CHECK-NEXT: movb %cl, d64(%rip) # encoding: [0x88,0x0d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
@@ -437,8 +437,8 @@ define i1 @xorflag8rr(i8 %a, i8 %b) {
;
; NF-LABEL: xorflag8rr:
; NF: # %bb.0:
-; NF-NEXT: {nf} xorl %edi, %esi, %eax # EVEX TO EVEX Compression encoding: [0x62,0xf4,0x7c,0x1c,0x31,0xfe]
-; NF-NEXT: xorb $-1, %al, %cl # encoding: [0x62,0xf4,0x74,0x18,0x80,0xf0,0xff]
+; NF-NEXT: xorl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x31,0xfe]
+; NF-NEXT: xorb $-1, %sil, %cl # encoding: [0x62,0xf4,0x74,0x18,0x80,0xf6,0xff]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NF-NEXT: movb %cl, d64(%rip) # encoding: [0x88,0x0d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
@@ -453,8 +453,8 @@ define i1 @xorflag8rr(i8 %a, i8 %b) {
define i1 @xorflag16rr(i16 %a, i16 %b) {
; CHECK-LABEL: xorflag16rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %edi, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x31,0xfe]
-; CHECK-NEXT: xorw $-1, %ax, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xf0,0xff]
+; CHECK-NEXT: xorl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x31,0xfe]
+; CHECK-NEXT: xorw $-1, %si, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xf6,0xff]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; CHECK-NEXT: movw %cx, d64(%rip) # encoding: [0x66,0x89,0x0d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
@@ -462,8 +462,8 @@ define i1 @xorflag16rr(i16 %a, i16 %b) {
;
; NF-LABEL: xorflag16rr:
; NF: # %bb.0:
-; NF-NEXT: {nf} xorl %edi, %esi, %eax # EVEX TO EVEX Compression encoding: [0x62,0xf4,0x7c,0x1c,0x31,0xfe]
-; NF-NEXT: xorw $-1, %ax, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xf0,0xff]
+; NF-NEXT: xorl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x31,0xfe]
+; NF-NEXT: xorw $-1, %si, %cx # encoding: [0x62,0xf4,0x75,0x18,0x83,0xf6,0xff]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NF-NEXT: movw %cx, d64(%rip) # encoding: [0x66,0x89,0x0d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
@@ -478,17 +478,17 @@ define i1 @xorflag16rr(i16 %a, i16 %b) {
define i1 @xorflag32rr(i32 %a, i32 %b) {
; CHECK-LABEL: xorflag32rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x31,0xf7]
+; CHECK-NEXT: xorl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x31,0xfe]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag32rr:
; NF: # %bb.0:
-; NF-NEXT: xorl %esi, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x31,0xf7]
+; NF-NEXT: xorl %edi, %esi # EVEX TO LEGACY Compression encoding: [0x31,0xfe]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = xor i32 %a, %b ; 0xff << 50
@@ -500,17 +500,17 @@ define i1 @xorflag32rr(i32 %a, i32 %b) {
define i1 @xorflag64rr(i64 %a, i64 %b) {
; CHECK-LABEL: xorflag64rr:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x31,0xf7]
+; CHECK-NEXT: xorq %rdi, %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x31,0xfe]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag64rr:
; NF: # %bb.0:
-; NF-NEXT: xorq %rsi, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x31,0xf7]
+; NF-NEXT: xorq %rdi, %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x31,0xfe]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = xor i64 %a, %b ; 0xff << 50
@@ -574,17 +574,17 @@ define i1 @xorflag16rm(ptr %ptr, i16 %b) {
define i1 @xorflag32rm(ptr %ptr, i32 %b) {
; CHECK-LABEL: xorflag32rm:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl (%rdi), %esi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x33,0x37]
+; CHECK-NEXT: xorl (%rdi), %esi # EVEX TO LEGACY Compression encoding: [0x33,0x37]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag32rm:
; NF: # %bb.0:
-; NF-NEXT: xorl (%rdi), %esi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x33,0x37]
+; NF-NEXT: xorl (%rdi), %esi # EVEX TO LEGACY Compression encoding: [0x33,0x37]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %esi, d64(%rip) # encoding: [0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%a = load i32, ptr %ptr
@@ -597,17 +597,17 @@ define i1 @xorflag32rm(ptr %ptr, i32 %b) {
define i1 @xorflag64rm(ptr %ptr, i64 %b) {
; CHECK-LABEL: xorflag64rm:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorq (%rdi), %rsi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x33,0x37]
+; CHECK-NEXT: xorq (%rdi), %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x33,0x37]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag64rm:
; NF: # %bb.0:
-; NF-NEXT: xorq (%rdi), %rsi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x33,0x37]
+; NF-NEXT: xorq (%rdi), %rsi # EVEX TO LEGACY Compression encoding: [0x48,0x33,0x37]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rsi, d64(%rip) # encoding: [0x48,0x89,0x35,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%a = load i64, ptr %ptr
@@ -668,19 +668,19 @@ define i1 @xorflag16ri(i16 %a) {
define i1 @xorflag32ri(i32 %a) {
; CHECK-LABEL: xorflag32ri:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xf7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: xorl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xf7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag32ri:
; NF: # %bb.0:
-; NF-NEXT: xorl $123456, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x81,0xf7,0x40,0xe2,0x01,0x00]
+; NF-NEXT: xorl $123456, %edi # EVEX TO LEGACY Compression encoding: [0x81,0xf7,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = xor i32 %a, 123456 ; 0xff << 50
@@ -692,19 +692,19 @@ define i1 @xorflag32ri(i32 %a) {
define i1 @xorflag64ri(i64 %a) {
; CHECK-LABEL: xorflag64ri:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xf7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: xorq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xf7,0x40,0xe2,0x01,0x00]
; CHECK-NEXT: # imm = 0x1E240
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag64ri:
; NF: # %bb.0:
-; NF-NEXT: xorq $123456, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xf7,0x40,0xe2,0x01,0x00]
+; NF-NEXT: xorq $123456, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xf7,0x40,0xe2,0x01,0x00]
; NF-NEXT: # imm = 0x1E240
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = xor i64 %a, 123456 ; 0xff << 50
@@ -739,17 +739,17 @@ define i1 @xorflag16ri8(i16 %a) {
define i1 @xorflag32ri8(i32 %a) {
; CHECK-LABEL: xorflag32ri8:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xf7,0x7b]
+; CHECK-NEXT: xorl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xf7,0x7b]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag32ri8:
; NF: # %bb.0:
-; NF-NEXT: xorl $123, %edi, %ecx # encoding: [0x62,0xf4,0x74,0x18,0x83,0xf7,0x7b]
+; NF-NEXT: xorl $123, %edi # EVEX TO LEGACY Compression encoding: [0x83,0xf7,0x7b]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movl %ecx, d64(%rip) # encoding: [0x89,0x0d,A,A,A,A]
+; NF-NEXT: movl %edi, d64(%rip) # encoding: [0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 2, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = xor i32 %a, 123 ; 0xff << 50
@@ -761,17 +761,17 @@ define i1 @xorflag32ri8(i32 %a) {
define i1 @xorflag64ri8(i64 %a) {
; CHECK-LABEL: xorflag64ri8:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xf7,0x7b]
+; CHECK-NEXT: xorq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xf7,0x7b]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; CHECK-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq # encoding: [0xc3]
;
; NF-LABEL: xorflag64ri8:
; NF: # %bb.0:
-; NF-NEXT: xorq $123, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x83,0xf7,0x7b]
+; NF-NEXT: xorq $123, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x83,0xf7,0x7b]
; NF-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
-; NF-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NF-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NF-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NF-NEXT: retq # encoding: [0xc3]
%v0 = xor i64 %a, 123 ; 0xff << 50
diff --git a/llvm/test/CodeGen/X86/avx10_2_512satcvt-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2_512satcvt-intrinsics.ll
new file mode 100644
index 0000000..1986053
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx10_2_512satcvt-intrinsics.ll
@@ -0,0 +1,1003 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64 --show-mc-encoding -mattr=+avx10.2-512 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -verify-machineinstrs -mtriple=i686 --show-mc-encoding -mattr=+avx10.2-512 | FileCheck %s --check-prefixes=CHECK,X86
+
+define dso_local <8 x i64> @test_mm512_ipcvtnebf16_epi8(<32 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtnebf16_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtnebf162ibs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7f,0x48,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvtnebf162ibs512(<32 x bfloat> %__A)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtnebf16_epi8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.vcvtnebf162ibs512(<32 x bfloat> %__B)
+ %2 = bitcast i32 %__A to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %0
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx10.vcvtnebf162ibs512(<32 x bfloat>)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtnebf16_epi8(i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvtnebf162ibs512(<32 x bfloat> %__B)
+ %1 = bitcast i32 %__A to <32 x i1>
+ %2 = select <32 x i1> %1, <32 x i16> %0, <32 x i16> zeroinitializer
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtnebf16_epu8(<32 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtnebf16_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtnebf162iubs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7f,0x48,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvtnebf162iubs512(<32 x bfloat> %__A)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtnebf16_epu8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.vcvtnebf162iubs512(<32 x bfloat> %__B)
+ %2 = bitcast i32 %__A to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %0
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx10.vcvtnebf162iubs512(<32 x bfloat>)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtnebf16_epu8(i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvtnebf162iubs512(<32 x bfloat> %__B)
+ %1 = bitcast i32 %__A to <32 x i1>
+ %2 = select <32 x i1> %1, <32 x i16> %0, <32 x i16> zeroinitializer
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtph_epi8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2ibs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtph_epi8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 4)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half>, <32 x i16>, i32, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtph_epi8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvt_roundph_epi8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvt_roundph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2ibs {rz-sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x78,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 11)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvt_roundph_epi8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvt_roundph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x79,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvt_roundph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x79,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 11)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvt_roundph_epi8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvt_roundph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xf9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvt_roundph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xf9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2ibs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 11)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtph_epu8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2iubs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtph_epu8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512( <32 x half> %__B, <32 x i16> %0, i32 %__A, i32 4)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtph_epu8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvt_roundph_epu8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvt_roundph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2iubs {rz-sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x78,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 11)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvt_roundph_epu8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvt_roundph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x79,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvt_roundph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x79,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 11)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512(<32 x half>, <32 x i16>, i32, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvt_roundph_epu8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvt_roundph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xf9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvt_roundph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xf9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvtph2iubs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 11)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtps_epi8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2ibs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x48,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtps_epi8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float>, <16 x i32>, i16, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtps_epi8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvt_roundps_epi8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvt_roundps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2ibs {rz-sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x78,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 11)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvt_roundps_epi8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvt_roundps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x79,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvt_roundps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x79,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 11)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvt_roundps_epi8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvt_roundps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xf9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvt_roundps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xf9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2ibs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 11)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtps_epu8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2iubs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x48,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtps_epu8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtps_epu8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvt_roundps_epu8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvt_roundps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2iubs {rz-sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x78,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 11)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvt_roundps_epu8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvt_roundps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x79,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvt_roundps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs {rz-sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x79,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 11)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float>, <16 x i32>, i16, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvt_roundps_epu8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvt_roundps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xf9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvt_roundps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs {rz-sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xf9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvtps2iubs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 11)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvttnebf16_epi8(<32 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvttnebf16_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttnebf162ibs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7f,0x48,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvttnebf162ibs512(<32 x bfloat> %__A)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvttnebf16_epi8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvttnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvttnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.vcvttnebf162ibs512(<32 x bfloat> %__B)
+ %2 = bitcast i32 %__A to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %0
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx10.vcvttnebf162ibs512(<32 x bfloat>)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvttnebf16_epi8(i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvttnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvttnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvttnebf162ibs512(<32 x bfloat> %__B)
+ %1 = bitcast i32 %__A to <32 x i1>
+ %2 = select <32 x i1> %1, <32 x i16> %0, <32 x i16> zeroinitializer
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvttnebf16_epu8(<32 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvttnebf16_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttnebf162iubs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7f,0x48,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvttnebf162iubs512(<32 x bfloat> %__A)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvttnebf16_epu8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvttnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvttnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x49,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.vcvttnebf162iubs512(<32 x bfloat> %__B)
+ %2 = bitcast i32 %__A to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %0
+ %4 = bitcast <32 x i16> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+declare <32 x i16> @llvm.x86.avx10.vcvttnebf162iubs512(<32 x bfloat>)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvttnebf16_epu8(i32 noundef %__A, <32 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvttnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvttnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xc9,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.vcvttnebf162iubs512(<32 x bfloat> %__B)
+ %1 = bitcast i32 %__A to <32 x i1>
+ %2 = select <32 x i1> %1, <32 x i16> %0, <32 x i16> zeroinitializer
+ %3 = bitcast <32 x i16> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvttph_epi8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvttph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2ibs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvttph_epi8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvttph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvttph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 4)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvttph_epi8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvttph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvttph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtt_roundph_epi8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtt_roundph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2ibs {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x18,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 8)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtt_roundph_epi8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtt_roundph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x19,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtt_roundph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x19,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 8)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half>, <32 x i16>, i32, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtt_roundph_epi8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtt_roundph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x99,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtt_roundph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x99,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2ibs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 8)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvttph_epu8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvttph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2iubs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvttph_epu8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvttph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvttph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 4)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvttph_epu8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvttph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvttph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 4)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtt_roundph_epu8(<32 x half> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtt_roundph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2iubs {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7c,0x18,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half> %__A, <32 x i16> zeroinitializer, i32 -1, i32 8)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtt_roundph_epu8(<8 x i64> noundef %__S, i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtt_roundph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x19,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtt_roundph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x19,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <32 x i16>
+ %1 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half> %__B, <32 x i16> %0, i32 %__A, i32 8)
+ %2 = bitcast <32 x i16> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half>, <32 x i16>, i32, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtt_roundph_epu8(i32 noundef %__A, <32 x half> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtt_roundph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x99,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtt_roundph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x99,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <32 x i16> @llvm.x86.avx10.mask.vcvttph2iubs512(<32 x half> %__B, <32 x i16> zeroinitializer, i32 %__A, i32 8)
+ %1 = bitcast <32 x i16> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvttps_epi8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvttps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2ibs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x48,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvttps_epi8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvttps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvttps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvttps_epi8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvttps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvttps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtt_roundps_epi8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtt_roundps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2ibs {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x18,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 8)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtt_roundps_epi8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtt_roundps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x19,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtt_roundps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x19,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 8)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float>, <16 x i32>, i16, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtt_roundps_epi8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtt_roundps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x99,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtt_roundps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x99,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2ibs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 8)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvttps_epu8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvttps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2iubs %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x48,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvttps_epu8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvttps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvttps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvttps_epu8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvttps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvttps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_ipcvtt_roundps_epu8(<16 x float> noundef %__A) {
+; CHECK-LABEL: test_mm512_ipcvtt_roundps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2iubs {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x18,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float> %__A, <16 x i32> zeroinitializer, i16 -1, i32 8)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
+
+define dso_local <8 x i64> @test_mm512_mask_ipcvtt_roundps_epu8(<8 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_mask_ipcvtt_roundps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x19,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_mask_ipcvtt_roundps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs {sae}, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x19,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <8 x i64> %__S to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float> %__B, <16 x i32> %0, i16 %__A, i32 8)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float>, <16 x i32>, i16, i32)
+
+define dso_local <8 x i64> @test_mm512_maskz_ipcvtt_roundps_epu8(i16 noundef zeroext %__A, <16 x float> noundef %__B) {
+; X64-LABEL: test_mm512_maskz_ipcvtt_roundps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x99,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm512_maskz_ipcvtt_roundps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x99,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i32> @llvm.x86.avx10.mask.vcvttps2iubs512(<16 x float> %__B, <16 x i32> zeroinitializer, i16 %__A, i32 8)
+ %1 = bitcast <16 x i32> %0 to <8 x i64>
+ ret <8 x i64> %1
+}
diff --git a/llvm/test/CodeGen/X86/avx10_2satcvt-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2satcvt-intrinsics.ll
new file mode 100644
index 0000000..e16aa9d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx10_2satcvt-intrinsics.ll
@@ -0,0 +1,1618 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64 --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -verify-machineinstrs -mtriple=i686 --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=CHECK,X86
+
+define dso_local <2 x i64> @test_mm_ipcvtnebf16_epi8(<8 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvtnebf16_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtnebf162ibs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7f,0x08,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvtnebf162ibs128(<8 x bfloat> %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvtnebf16_epi8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvtnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvtnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.vcvtnebf162ibs128(<8 x bfloat> %__B)
+ %2 = bitcast i8 %__A to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %0
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx10.vcvtnebf162ibs128(<8 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvtnebf16_epi8(i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvtnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvtnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvtnebf162ibs128(<8 x bfloat> %__B)
+ %1 = bitcast i8 %__A to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i16> %0, <8 x i16> zeroinitializer
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtnebf16_epi8(<16 x bfloat> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvtnebf16_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtnebf162ibs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7f,0x28,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvtnebf162ibs256(<16 x bfloat> %__A)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtnebf16_epi8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvtnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.vcvtnebf162ibs256(<16 x bfloat> %__B)
+ %2 = bitcast i16 %__A to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %0
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+declare <16 x i16> @llvm.x86.avx10.vcvtnebf162ibs256(<16 x bfloat>)
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtnebf16_epi8(i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvtnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvtnebf162ibs256(<16 x bfloat> %__B)
+ %1 = bitcast i16 %__A to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i16> %0, <16 x i16> zeroinitializer
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define dso_local <2 x i64> @test_mm_ipcvtnebf16_epu8(<8 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvtnebf16_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtnebf162iubs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7f,0x08,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvtnebf162iubs128(<8 x bfloat> %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvtnebf16_epu8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvtnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvtnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.vcvtnebf162iubs128(<8 x bfloat> %__B)
+ %2 = bitcast i8 %__A to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %0
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx10.vcvtnebf162iubs128(<8 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvtnebf16_epu8(i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvtnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvtnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvtnebf162iubs128(<8 x bfloat> %__B)
+ %1 = bitcast i8 %__A to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i16> %0, <8 x i16> zeroinitializer
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtnebf16_epu8(<16 x bfloat> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvtnebf16_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtnebf162iubs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7f,0x28,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvtnebf162iubs256(<16 x bfloat> %__A)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtnebf16_epu8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvtnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.vcvtnebf162iubs256(<16 x bfloat> %__B)
+ %2 = bitcast i16 %__A to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %0
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtnebf16_epu8(i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvtnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtnebf162iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtnebf162iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvtnebf162iubs256(<16 x bfloat> %__B)
+ %1 = bitcast i16 %__A to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i16> %0, <16 x i16> zeroinitializer
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+declare <16 x i16> @llvm.x86.avx10.vcvtnebf162iubs256(<16 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_ipcvtph_epi8(<8 x half> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvtph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2ibs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvtph2ibs128(<8 x half> %__A, <8 x i16> zeroinitializer, i8 -1)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvtph_epi8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvtph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvtph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvtph2ibs128(<8 x half> %__B, <8 x i16> zeroinitializer, i8 %__A)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+declare <8 x i16> @llvm.x86.avx10.mask.vcvtph2ibs128(<8 x half>, <8 x i16>, i8)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvtph_epi8(i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvtph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvtph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvtph2ibs128(<8 x half> %__B, <8 x i16> zeroinitializer, i8 %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtph_epi8(<16 x half> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvtph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2ibs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtph_epi8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvtph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtph_epi8(i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvtph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtph_epi8_round(<16 x half> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvtph_epi8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2ibs {rz-sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x78,0x78,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 11)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtph_epi8_round(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvtph_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x79,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtph_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x79,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 11)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtph_epi8_round(i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvtph_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2ibs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0xf9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtph_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2ibs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0xf9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 11)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <16 x i16> @llvm.x86.avx10.mask.vcvtph2ibs256(<16 x half>, <16 x i16>, i16, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvtph_epu8(<8 x half> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvtph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2iubs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvtph2iubs128(<8 x half> %__A, <8 x i16> zeroinitializer, i8 -1)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvtph_epu8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvtph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvtph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvtph2iubs128(<8 x half> %__B, <8 x i16> %0, i8 %__A)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+declare <8 x i16> @llvm.x86.avx10.mask.vcvtph2iubs128(<8 x half>, <8 x i16>, i8)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvtph_epu8(i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvtph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvtph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvtph2iubs128(<8 x half> %__B, <8 x i16> zeroinitializer, i8 %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtph_epu8(<16 x half> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvtph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2iubs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtph_epu8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvtph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtph_epu8(i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvtph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtph_epu8_round(<16 x half> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvtph_epu8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtph2iubs {rz-sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x78,0x78,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 11)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtph_epu8_round(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvtph_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x79,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtph_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x79,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 11)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtph_epu8_round(i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvtph_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtph2iubs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0xf9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtph_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtph2iubs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0xf9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 11)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <16 x i16> @llvm.x86.avx10.mask.vcvtph2iubs256(<16 x half>, <16 x i16>, i16, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvtps_epi8(<4 x float> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvtps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2ibs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvtps2ibs128(<4 x float> %__A, <4 x i32> zeroinitializer, i8 -1)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvtps_epi8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvtps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvtps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvtps2ibs128(<4 x float> %__B, <4 x i32> %0, i8 %__A)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvtps_epi8(i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvtps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvtps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvtps2ibs128(<4 x float> %__B, <4 x i32> zeroinitializer, i8 %__A)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+declare <4 x i32> @llvm.x86.avx10.mask.vcvtps2ibs128(<4 x float>, <4 x i32>, i8)
+
+define dso_local <4 x i64> @test_mm256_ipcvtps_epi8(<8 x float> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvtps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2ibs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtps_epi8(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvtps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 4)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtps_epi8(i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvtps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtps_epi8_round(<8 x float> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvtps_epi8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2ibs {rz-sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x79,0x78,0x69,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 11)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtps_epi8_round(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvtps_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x79,0x69,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtps_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x79,0x69,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 11)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtps_epi8_round(i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvtps_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2ibs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0xf9,0x69,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtps_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2ibs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0xf9,0x69,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 11)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <8 x i32> @llvm.x86.avx10.mask.vcvtps2ibs256(<8 x float>, <8 x i32>, i8, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvtps_epu8(<4 x float> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvtps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2iubs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvtps2iubs128(<4 x float> %__A, <4 x i32> zeroinitializer, i8 -1)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvtps_epu8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvtps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvtps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvtps2iubs128(<4 x float> %__B, <4 x i32> %0, i8 %__A)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvtps_epu8(i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvtps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvtps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvtps2iubs128(<4 x float> %__B, <4 x i32> zeroinitializer, i8 %__A)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+declare <4 x i32> @llvm.x86.avx10.mask.vcvtps2iubs128(<4 x float>, <4 x i32>, i8)
+
+define dso_local <4 x i64> @test_mm256_ipcvtps_epu8(<8 x float> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvtps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2iubs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtps_epu8(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvtps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 4)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtps_epu8(i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvtps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvtps_epu8_round(<8 x float> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvtps_epu8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvtps2iubs {rz-sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x79,0x78,0x6b,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 11)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvtps_epu8_round(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvtps_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x79,0x6b,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvtps_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs {rz-sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x79,0x6b,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 11)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvtps_epu8_round(i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvtps_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvtps2iubs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0xf9,0x6b,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvtps_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvtps2iubs {rz-sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0xf9,0x6b,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 11)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <8 x i32> @llvm.x86.avx10.mask.vcvtps2iubs256(<8 x float>, <8 x i32>, i8, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvttnebf16_epi8(<8 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvttnebf16_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttnebf162ibs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7f,0x08,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvttnebf162ibs128(<8 x bfloat> %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvttnebf16_epi8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvttnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvttnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.vcvttnebf162ibs128(<8 x bfloat> %__B)
+ %2 = bitcast i8 %__A to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %0
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx10.vcvttnebf162ibs128(<8 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvttnebf16_epi8(i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvttnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvttnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvttnebf162ibs128(<8 x bfloat> %__B)
+ %1 = bitcast i8 %__A to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i16> %0, <8 x i16> zeroinitializer
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttnebf16_epi8(<16 x bfloat> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvttnebf16_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttnebf162ibs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7f,0x28,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvttnebf162ibs256(<16 x bfloat> %__A)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttnebf16_epi8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvttnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.vcvttnebf162ibs256(<16 x bfloat> %__B)
+ %2 = bitcast i16 %__A to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %0
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttnebf16_epi8(i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvttnebf16_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttnebf16_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvttnebf162ibs256(<16 x bfloat> %__B)
+ %1 = bitcast i16 %__A to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i16> %0, <16 x i16> zeroinitializer
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+declare <16 x i16> @llvm.x86.avx10.vcvttnebf162ibs256(<16 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_ipcvttnebf16_epu8(<8 x bfloat> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvttnebf16_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttnebf162iubs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7f,0x08,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvttnebf162iubs128(<8 x bfloat> %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvttnebf16_epu8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvttnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvttnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x09,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.vcvttnebf162iubs128(<8 x bfloat> %__B)
+ %2 = bitcast i8 %__A to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %0
+ %4 = bitcast <8 x i16> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+declare <8 x i16> @llvm.x86.avx10.vcvttnebf162iubs128(<8 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvttnebf16_epu8(i8 noundef zeroext %__A, <8 x bfloat> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvttnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvttnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0x89,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.vcvttnebf162iubs128(<8 x bfloat> %__B)
+ %1 = bitcast i8 %__A to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i16> %0, <8 x i16> zeroinitializer
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttnebf16_epu8(<16 x bfloat> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvttnebf16_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttnebf162iubs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7f,0x28,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvttnebf162iubs256(<16 x bfloat> %__A)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttnebf16_epu8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvttnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7f,0x29,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.vcvttnebf162iubs256(<16 x bfloat> %__B)
+ %2 = bitcast i16 %__A to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %0
+ %4 = bitcast <16 x i16> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttnebf16_epu8(i16 noundef zeroext %__A, <16 x bfloat> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvttnebf16_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttnebf162iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttnebf16_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttnebf162iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7f,0xa9,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.vcvttnebf162iubs256(<16 x bfloat> %__B)
+ %1 = bitcast i16 %__A to <16 x i1>
+ %2 = select <16 x i1> %1, <16 x i16> %0, <16 x i16> zeroinitializer
+ %3 = bitcast <16 x i16> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+declare <16 x i16> @llvm.x86.avx10.vcvttnebf162iubs256(<16 x bfloat>)
+
+define dso_local <2 x i64> @test_mm_ipcvttph_epi8(<8 x half> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvttph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2ibs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvttph2ibs128(<8 x half> %__A, <8 x i16> zeroinitializer, i8 -1)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvttph_epi8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvttph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvttph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvttph2ibs128(<8 x half> %__B, <8 x i16> %0, i8 %__A)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+declare <8 x i16> @llvm.x86.avx10.mask.vcvttph2ibs128(<8 x half>, <8 x i16>, i8)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvttph_epi8(i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvttph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvttph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvttph2ibs128(<8 x half> %__B, <8 x i16> zeroinitializer, i8 %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttph_epi8(<16 x half> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvttph_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2ibs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttph_epi8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvttph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttph_epi8(i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvttph_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttph_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttph_epi8_round(<16 x half> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvttph_epi8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2ibs {sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x78,0x18,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 8)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttph_epi8_round(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvttph_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x19,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttph_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x19,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 8)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttph_epi8_round(i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvttph_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2ibs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0x99,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttph_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2ibs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0x99,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 8)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+declare <16 x i16> @llvm.x86.avx10.mask.vcvttph2ibs256(<16 x half>, <16 x i16>, i16, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvttph_epu8(<8 x half> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvttph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2iubs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvttph2iubs128(<8 x half> %__A, <8 x i16> zeroinitializer, i8 -1)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvttph_epu8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvttph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvttph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <8 x i16>
+ %1 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvttph2iubs128(<8 x half> %__B, <8 x i16> zeroinitializer, i8 %__A)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+declare <8 x i16> @llvm.x86.avx10.mask.vcvttph2iubs128(<8 x half>, <8 x i16>, i8)
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvttph_epu8(i8 noundef zeroext %__A, <8 x half> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvttph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvttph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.avx10.mask.vcvttph2iubs128(<8 x half> %__B, <8 x i16> zeroinitializer, i8 %__A)
+ %1 = bitcast <8 x i16> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttph_epu8(<16 x half> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvttph_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2iubs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttph_epu8(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvttph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 4)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttph_epu8(i16 noundef zeroext %__A, <16 x half> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvttph_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttph_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 4)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttph_epu8_round(<16 x half> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvttph_epu8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttph2iubs {sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x78,0x18,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half> %__A, <16 x i16> zeroinitializer, i16 -1, i32 8)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttph_epu8_round(<4 x i64> noundef %__S, i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvttph_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x19,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttph_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x78,0x19,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <16 x i16>
+ %1 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half> %__B, <16 x i16> %0, i16 %__A, i32 8)
+ %2 = bitcast <16 x i16> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttph_epu8_round(i16 noundef zeroext %__A, <16 x half> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvttph_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttph2iubs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0x99,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttph_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttph2iubs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x78,0x99,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half> %__B, <16 x i16> zeroinitializer, i16 %__A, i32 8)
+ %1 = bitcast <16 x i16> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <16 x i16> @llvm.x86.avx10.mask.vcvttph2iubs256(<16 x half>, <16 x i16>, i16, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvttps_epi8(<4 x float> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvttps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2ibs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvttps2ibs128(<4 x float> %__A, <4 x i32> zeroinitializer, i8 -1)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvttps_epi8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvttps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvttps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvttps2ibs128(<4 x float> %__B, <4 x i32> %0, i8 %__A)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvttps_epi8(i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvttps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvttps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvttps2ibs128(<4 x float> %__B, <4 x i32> zeroinitializer, i8 %__A)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+declare <4 x i32> @llvm.x86.avx10.mask.vcvttps2ibs128(<4 x float>, <4 x i32>, i8)
+
+define dso_local <4 x i64> @test_mm256_ipcvttps_epi8(<8 x float> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvttps_epi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2ibs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttps_epi8(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvttps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 4)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttps_epi8(i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvttps_epi8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttps_epi8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttps_epi8_round(<8 x float> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvttps_epi8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2ibs {sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x79,0x18,0x68,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 8)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttps_epi8_round(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvttps_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x19,0x68,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttps_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x19,0x68,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 8)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttps_epi8_round(i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvttps_epi8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2ibs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0x99,0x68,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttps_epi8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2ibs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0x99,0x68,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 8)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <8 x i32> @llvm.x86.avx10.mask.vcvttps2ibs256(<8 x float>, <8 x i32>, i8, i32)
+
+define dso_local <2 x i64> @test_mm_ipcvttps_epu8(<4 x float> noundef %__A) {
+; CHECK-LABEL: test_mm_ipcvttps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2iubs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvttps2iubs128(<4 x float> %__A, <4 x i32> zeroinitializer, i8 -1)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+define dso_local <2 x i64> @test_mm_mask_ipcvttps_epu8(<2 x i64> noundef %__S, i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_mask_ipcvttps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_mask_ipcvttps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x i64> %__S to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvttps2iubs128(<4 x float> %__B, <4 x i32> %0, i8 %__A)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+define dso_local <2 x i64> @test_mm_maskz_ipcvttps_epu8(i8 noundef zeroext %__A, <4 x float> noundef %__B) {
+; X64-LABEL: test_mm_maskz_ipcvttps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm_maskz_ipcvttps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx10.mask.vcvttps2iubs128(<4 x float> %__B, <4 x i32> zeroinitializer, i8 %__A)
+ %1 = bitcast <4 x i32> %0 to <2 x i64>
+ ret <2 x i64> %1
+}
+
+declare <4 x i32> @llvm.x86.avx10.mask.vcvttps2iubs128(<4 x float>, <4 x i32>, i8)
+
+define dso_local <4 x i64> @test_mm256_ipcvttps_epu8(<8 x float> noundef %__A) local_unnamed_addr #2 {
+; CHECK-LABEL: test_mm256_ipcvttps_epu8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2iubs %ymm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttps_epu8(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_mask_ipcvttps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 4)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttps_epu8(i8 noundef zeroext %__A, <8 x float> noundef %__B) local_unnamed_addr #2 {
+; X64-LABEL: test_mm256_maskz_ipcvttps_epu8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttps_epu8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 4)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_ipcvttps_epu8_round(<8 x float> noundef %__A) {
+; CHECK-LABEL: test_mm256_ipcvttps_epu8_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vcvttps2iubs {sae}, %ymm0, %ymm0 # encoding: [0x62,0xf5,0x79,0x18,0x6a,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float> %__A, <8 x i32> zeroinitializer, i8 -1, i32 8)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+define dso_local <4 x i64> @test_mm256_mask_ipcvttps_epu8_round(<4 x i64> noundef %__S, i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_mask_ipcvttps_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x19,0x6a,0xc1]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_mask_ipcvttps_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs {sae}, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf5,0x79,0x19,0x6a,0xc1]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = bitcast <4 x i64> %__S to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float> %__B, <8 x i32> %0, i8 %__A, i32 8)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+define dso_local <4 x i64> @test_mm256_maskz_ipcvttps_epu8_round(i8 noundef zeroext %__A, <8 x float> noundef %__B) {
+; X64-LABEL: test_mm256_maskz_ipcvttps_epu8_round:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT: vcvttps2iubs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0x99,0x6a,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; X86-LABEL: test_mm256_maskz_ipcvttps_epu8_round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vcvttps2iubs {sae}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x79,0x99,0x6a,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float> %__B, <8 x i32> zeroinitializer, i8 %__A, i32 8)
+ %1 = bitcast <8 x i32> %0 to <4 x i64>
+ ret <4 x i64> %1
+}
+
+declare <8 x i32> @llvm.x86.avx10.mask.vcvttps2iubs256(<8 x float>, <8 x i32>, i8, i32)
diff --git a/llvm/test/CodeGen/X86/avx512-intel-ocl.ll b/llvm/test/CodeGen/X86/avx512-intel-ocl.ll
index 25d182a..7887027 100644
--- a/llvm/test/CodeGen/X86/avx512-intel-ocl.ll
+++ b/llvm/test/CodeGen/X86/avx512-intel-ocl.ll
@@ -69,8 +69,12 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
; X64-NEXT: andq $-64, %rsp
; X64-NEXT: subq $128, %rsp
; X64-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; X64-NEXT: pushq %rbp
+; X64-NEXT: pushq %rax
; X64-NEXT: movq %rsp, %rdi
; X64-NEXT: callq _func_float16_ptr
+; X64-NEXT: addq $8, %rsp
+; X64-NEXT: popq %rbp
; X64-NEXT: vaddps (%rsp), %zmm0, %zmm0
; X64-NEXT: leaq -16(%rbp), %rsp
; X64-NEXT: popq %r12
@@ -149,8 +153,12 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
; X64-NEXT: subq $128, %rsp
; X64-NEXT: vmovaps %zmm1, %zmm16
; X64-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; X64-NEXT: pushq %rbp
+; X64-NEXT: pushq %rax
; X64-NEXT: movq %rsp, %rdi
; X64-NEXT: callq _func_float16_ptr
+; X64-NEXT: addq $8, %rsp
+; X64-NEXT: popq %rbp
; X64-NEXT: vaddps %zmm16, %zmm0, %zmm0
; X64-NEXT: vaddps (%rsp), %zmm0, %zmm0
; X64-NEXT: leaq -16(%rbp), %rsp
diff --git a/llvm/test/CodeGen/X86/clobber_base_ptr.ll b/llvm/test/CodeGen/X86/clobber_base_ptr.ll
new file mode 100644
index 0000000..2c39560
--- /dev/null
+++ b/llvm/test/CodeGen/X86/clobber_base_ptr.ll
@@ -0,0 +1,118 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32"
+target triple = "i386-pc-windows-gnu"
+
+; This function uses esi as base pointer, the inline asm clobbers esi, so we
+; should save esi using esp before the inline asm, and restore esi after the
+; inline asm.
+
+define i32 @clober_bp() {
+; CHECK-LABEL: clober_bp:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .cfi_offset %ebp, -8
+; CHECK-NEXT: movl %esp, %ebp
+; CHECK-NEXT: .cfi_def_cfa_register %ebp
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: andl $-16, %esp
+; CHECK-NEXT: subl $16, %esp
+; CHECK-NEXT: movl %esp, %esi
+; CHECK-NEXT: .cfi_offset %esi, -16
+; CHECK-NEXT: .cfi_offset %edi, -12
+; CHECK-NEXT: movl $4, 12(%esi)
+; CHECK-NEXT: movl 12(%esi), %eax
+; CHECK-NEXT: addl $3, %eax
+; CHECK-NEXT: andl $-4, %eax
+; CHECK-NEXT: calll __alloca
+; CHECK-NEXT: movl %esp, %eax
+; CHECK-NEXT: andl $-16, %eax
+; CHECK-NEXT: movl %eax, %esp
+; CHECK-NEXT: movl $1, (%eax)
+; CHECK-NEXT: leal 8(%esi), %edi
+; CHECK-NEXT: movl $4, %ecx
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: movl %eax, %esi
+; CHECK-NEXT: #APP
+; CHECK-NEXT: rep movsb (%esi), %es:(%edi)
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: movl 8(%esi), %eax
+; CHECK-NEXT: leal -8(%ebp), %esp
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: retl
+entry:
+ %size = alloca i32, align 4
+ %g = alloca i32, align 4
+ store volatile i32 4, ptr %size, align 4
+ %len = load volatile i32, ptr %size, align 4
+ %var_array = alloca i8, i32 %len, align 16
+ store i32 1, ptr %var_array, align 16
+ %nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %g, ptr %var_array, i32 4)
+ %retval = load i32, ptr %g, align 4
+ ret i32 %retval
+}
+
+; This function has the same code except the inline asm also clobbers
+; frame pointer.
+
+define i32 @clobber_bpfp() {
+; CHECK-LABEL: clobber_bpfp:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .cfi_offset %ebp, -8
+; CHECK-NEXT: movl %esp, %ebp
+; CHECK-NEXT: .cfi_def_cfa_register %ebp
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: andl $-16, %esp
+; CHECK-NEXT: subl $16, %esp
+; CHECK-NEXT: movl %esp, %esi
+; CHECK-NEXT: .cfi_offset %esi, -16
+; CHECK-NEXT: .cfi_offset %edi, -12
+; CHECK-NEXT: movl $4, 12(%esi)
+; CHECK-NEXT: movl 12(%esi), %eax
+; CHECK-NEXT: addl $3, %eax
+; CHECK-NEXT: andl $-4, %eax
+; CHECK-NEXT: calll __alloca
+; CHECK-NEXT: movl %esp, %eax
+; CHECK-NEXT: andl $-16, %eax
+; CHECK-NEXT: movl %eax, %esp
+; CHECK-NEXT: movl $1, (%eax)
+; CHECK-NEXT: leal 8(%esi), %edi
+; CHECK-NEXT: movl $4, %ecx
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x74, 0x04, 0x06, 0x11, 0x08, 0x22 #
+; CHECK-NEXT: movl %eax, %esi
+; CHECK-NEXT: #APP
+; CHECK-NEXT: rep movsb (%esi), %es:(%edi)
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: movl 8(%esi), %eax
+; CHECK-NEXT: leal -8(%ebp), %esp
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: retl
+entry:
+ %size = alloca i32, align 4
+ %g = alloca i32, align 4
+ store volatile i32 4, ptr %size, align 4
+ %len = load volatile i32, ptr %size, align 4
+ %var_array = alloca i8, i32 %len, align 16
+ store i32 1, ptr %var_array, align 16
+ %nil = call { ptr, ptr, i32 } asm "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags},~{ebp}"(ptr %g, ptr %var_array, i32 4)
+ %retval = load i32, ptr %g, align 4
+ ret i32 %retval
+}
+
diff --git a/llvm/test/CodeGen/X86/clobber_frame_ptr.ll b/llvm/test/CodeGen/X86/clobber_frame_ptr.ll
new file mode 100644
index 0000000..6209e1a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/clobber_frame_ptr.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=x86_64-pc-linux -stackrealign -verify-machineinstrs < %s | FileCheck %s
+
+; Calling convention ghccc uses ebp to pass parameter, so calling a function
+; using ghccc clobbers ebp. We should save and restore ebp around such a call
+; if ebp is used as frame pointer.
+
+declare ghccc i32 @external(i32)
+
+; Basic test with ghccc calling convention.
+define i32 @test1(i32 %0, i32 %1) {
+; CHECK-LABEL: test1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: andq $-16, %rsp
+; CHECK-NEXT: subq $16, %rsp
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x77, 0x08, 0x06, 0x11, 0x10, 0x22 #
+; CHECK-NEXT: movl %esi, %ebp
+; CHECK-NEXT: movq %rdi, %r13
+; CHECK-NEXT: callq external@PLT
+; CHECK-NEXT: addq $8, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: leaq -40(%rbp), %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+ %x = call ghccc i32 @external(i32 %0, i32 %1)
+ ret i32 %x
+}
+
+; Calling convention hipe has similar behavior. It clobbers rbp but not rbx.
+
+declare cc 11 i64 @hipe1(i64)
+declare cc 11 i64 @hipe2(i64, i64, i64, i64, i64, i64, i64)
+
+; Basic test with hipe calling convention.
+define i64 @test2(i64 %a0, i64 %a1) {
+; CHECK-LABEL: test2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: andq $-16, %rsp
+; CHECK-NEXT: subq $16, %rsp
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x77, 0x08, 0x06, 0x11, 0x10, 0x22 #
+; CHECK-NEXT: movq %rsi, %rbp
+; CHECK-NEXT: movq %rdi, %r15
+; CHECK-NEXT: callq hipe1@PLT
+; CHECK-NEXT: addq $8, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: movq %r15, %rax
+; CHECK-NEXT: leaq -40(%rbp), %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+ %x = call cc 11 i64 @hipe1(i64 %a0, i64 %a1)
+ ret i64 %x
+}
+
+; Test with more arguments, so some of them are passed from stack. The spilling
+; of rbp should not disturb stack arguments.
+; fixme: current generated code is wrong because rbp is used to load passed in
+; argument after rbp is assigned argument for function call, it is caused
+; by x86-cf-opt.
+define i64 @test3(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) {
+; CHECK-LABEL: test3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: andq $-16, %rsp
+; CHECK-NEXT: subq $16, %rsp
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x77, 0x08, 0x06, 0x11, 0x10, 0x22 #
+; CHECK-NEXT: movq %rsi, %rbp
+; CHECK-NEXT: movq %rdi, %r15
+; CHECK-NEXT: movq %rdx, %rsi
+; CHECK-NEXT: movq %rcx, %rdx
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: movq %r9, %r8
+; CHECK-NEXT: pushq 24(%rbp)
+; CHECK-NEXT: pushq 16(%rbp)
+; CHECK-NEXT: callq hipe2@PLT
+; CHECK-NEXT: addq $8, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: addq $16, %rsp
+; CHECK-NEXT: movq %r15, %rax
+; CHECK-NEXT: leaq -40(%rbp), %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+ %x = call cc 11 i64 @hipe2(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7)
+ ret i64 %x
+}
diff --git a/llvm/test/CodeGen/X86/clobber_frame_ptr_x32.ll b/llvm/test/CodeGen/X86/clobber_frame_ptr_x32.ll
new file mode 100644
index 0000000..25c951d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/clobber_frame_ptr_x32.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s | FileCheck %s
+
+target triple = "x86_64-linux-gnux32"
+
+define i32 @foo() {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: subq $16, %rsp
+; CHECK-NEXT: movl $4, -8(%rbp)
+; CHECK-NEXT: movl $5, -4(%rbp)
+; CHECK-NEXT: movl -8(%rbp), %eax
+; CHECK-NEXT: movq %rsp, %rcx
+; CHECK-NEXT: addq $15, %rax
+; CHECK-NEXT: andq $-16, %rax
+; CHECK-NEXT: movq %rcx, %rdx
+; CHECK-NEXT: subq %rax, %rdx
+; CHECK-NEXT: movq %rdx, %rsp
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: movl $1, (%rcx,%rax)
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: .cfi_escape 0x0f, 0x06, 0x77, 0x08, 0x06, 0x11, 0x10, 0x22 #
+; CHECK-NEXT: movl $123, %ebp
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: addq $8, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: movl -4(%rbp), %eax
+; CHECK-NEXT: movq %rbp, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+entry:
+ %size = alloca i32, align 4
+ %g = alloca i32, align 4
+ store volatile i32 4, ptr %size, align 4
+ store volatile i32 5, ptr %g, align 4
+ %len = load volatile i32, ptr %size, align 4
+ %var_array = alloca i8, i32 %len, align 16
+ store i32 1, ptr %var_array, align 16
+ call void asm "nop", "{ebp},~{memory}"(i32 123)
+ %retval = load i32, ptr %g, align 4
+ ret i32 %retval
+}
diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index 5a63d36..0965b1c 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -178,7 +178,7 @@ define i32 @test7(i64 %res) nounwind {
; NDD-LABEL: test7:
; NDD: # %bb.0: # %entry
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shrq $32, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x20]
+; NDD-NEXT: shrq $32, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x20]
; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
entry:
@@ -198,9 +198,9 @@ define i32 @test8(i64 %res) nounwind {
;
; NDD-LABEL: test8:
; NDD: # %bb.0:
-; NDD-NEXT: shrq $32, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x20]
+; NDD-NEXT: shrq $32, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x20]
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: cmpl $3, %ecx # encoding: [0x83,0xf9,0x03]
+; NDD-NEXT: cmpl $3, %edi # encoding: [0x83,0xff,0x03]
; NDD-NEXT: setb %al # encoding: [0x0f,0x92,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%lnot = icmp ult i64 %res, 12884901888
@@ -219,7 +219,7 @@ define i32 @test9(i64 %res) nounwind {
; NDD-LABEL: test9:
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shrq $33, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x21]
+; NDD-NEXT: shrq $33, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x21]
; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%lnot = icmp ult i64 %res, 8589934592
@@ -238,7 +238,7 @@ define i32 @test10(i64 %res) nounwind {
; NDD-LABEL: test10:
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shrq $32, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x20]
+; NDD-NEXT: shrq $32, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x20]
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%lnot = icmp uge i64 %res, 4294967296
@@ -257,9 +257,9 @@ define i32 @test11(i64 %l) nounwind {
;
; NDD-LABEL: test11:
; NDD: # %bb.0:
-; NDD-NEXT: shrq $47, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x2f]
+; NDD-NEXT: shrq $47, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x2f]
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: cmpl $1, %ecx # encoding: [0x83,0xf9,0x01]
+; NDD-NEXT: cmpl $1, %edi # encoding: [0x83,0xff,0x01]
; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%shr.mask = and i64 %l, -140737488355328
@@ -331,7 +331,7 @@ define i32 @test14(i32 %mask, i32 %base, i32 %intra) {
;
; NDD-LABEL: test14:
; NDD: # %bb.0:
-; NDD-NEXT: shrl $7, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0xc1,0xef,0x07]
+; NDD-NEXT: shrl $7, %edi # EVEX TO LEGACY Compression encoding: [0xc1,0xef,0x07]
; NDD-NEXT: cmovnsl %edx, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x49,0xf2]
; NDD-NEXT: retq # encoding: [0xc3]
%s = lshr i32 %mask, 7
@@ -353,10 +353,10 @@ define zeroext i1 @test15(i32 %bf.load, i32 %n) {
;
; NDD-LABEL: test15:
; NDD: # %bb.0:
-; NDD-NEXT: shrl $16, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0xc1,0xef,0x10]
-; NDD-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
-; NDD-NEXT: cmpl %esi, %eax # encoding: [0x39,0xf0]
-; NDD-NEXT: setae %al # encoding: [0x0f,0x93,0xc0]
+; NDD-NEXT: shrl $16, %edi # EVEX TO LEGACY Compression encoding: [0xc1,0xef,0x10]
+; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
+; NDD-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; NDD-NEXT: setae %cl # encoding: [0x0f,0x93,0xc1]
; NDD-NEXT: orb %cl, %al # EVEX TO LEGACY Compression encoding: [0x08,0xc8]
; NDD-NEXT: retq # encoding: [0xc3]
%bf.lshr = lshr i32 %bf.load, 16
@@ -482,7 +482,7 @@ define i32 @highmask_i64_mask64(i64 %val) {
; NDD-LABEL: highmask_i64_mask64:
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shrq $41, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x29]
+; NDD-NEXT: shrq $41, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x29]
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%and = and i64 %val, -2199023255552
@@ -526,7 +526,7 @@ define i32 @highmask_i64_mask32(i64 %val) {
; NDD-LABEL: highmask_i64_mask32:
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shrq $20, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xef,0x14]
+; NDD-NEXT: shrq $20, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x14]
; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%and = and i64 %val, -1048576
@@ -584,7 +584,7 @@ define i32 @lowmask_i64_mask64(i64 %val) {
; NDD-LABEL: lowmask_i64_mask64:
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shlq $16, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xe7,0x10]
+; NDD-NEXT: shlq $16, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x10]
; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%and = and i64 %val, 281474976710655
@@ -628,7 +628,7 @@ define i32 @lowmask_i64_mask32(i64 %val) {
; NDD-LABEL: lowmask_i64_mask32:
; NDD: # %bb.0:
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
-; NDD-NEXT: shlq $44, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0xc1,0xe7,0x2c]
+; NDD-NEXT: shlq $44, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x2c]
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%and = and i64 %val, 1048575
@@ -739,8 +739,8 @@ define i1 @shifted_mask64_testb(i64 %a) {
;
; NDD-LABEL: shifted_mask64_testb:
; NDD: # %bb.0:
-; NDD-NEXT: shrq $50, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0xc1,0xef,0x32]
-; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; NDD-NEXT: shrq $50, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x32]
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 287104476244869120 ; 0xff << 50
@@ -758,8 +758,8 @@ define i1 @shifted_mask64_testw(i64 %a) {
;
; NDD-LABEL: shifted_mask64_testw:
; NDD: # %bb.0:
-; NDD-NEXT: shrq $33, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0xc1,0xef,0x21]
-; NDD-NEXT: testw %ax, %ax # encoding: [0x66,0x85,0xc0]
+; NDD-NEXT: shrq $33, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x21]
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 562941363486720 ; 0xffff << 33
@@ -777,8 +777,8 @@ define i1 @shifted_mask64_testl(i64 %a) {
;
; NDD-LABEL: shifted_mask64_testl:
; NDD: # %bb.0:
-; NDD-NEXT: shrq $7, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0xc1,0xef,0x07]
-; NDD-NEXT: testl %eax, %eax # encoding: [0x85,0xc0]
+; NDD-NEXT: shrq $7, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xef,0x07]
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 549755813760 ; 0xffffffff << 7
@@ -817,9 +817,9 @@ define i1 @shifted_mask64_extra_use_and(i64 %a) {
; NDD: # %bb.0:
; NDD-NEXT: movabsq $287104476244869120, %rax # encoding: [0x48,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,0x03]
; NDD-NEXT: # imm = 0x3FC000000000000
-; NDD-NEXT: andq %rax, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x21,0xc7]
+; NDD-NEXT: andq %rax, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x21,0xc7]
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
-; NDD-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NDD-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NDD-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NDD-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 287104476244869120 ; 0xff << 50
@@ -868,10 +868,10 @@ define i1 @shifted_mask32_extra_use_and(i64 %a) {
;
; NDD-LABEL: shifted_mask32_extra_use_and:
; NDD: # %bb.0:
-; NDD-NEXT: andq $66846720, %rdi, %rcx # encoding: [0x62,0xf4,0xf4,0x18,0x81,0xe7,0x00,0x00,0xfc,0x03]
+; NDD-NEXT: andq $66846720, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0x81,0xe7,0x00,0x00,0xfc,0x03]
; NDD-NEXT: # imm = 0x3FC0000
; NDD-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
-; NDD-NEXT: movq %rcx, d64(%rip) # encoding: [0x48,0x89,0x0d,A,A,A,A]
+; NDD-NEXT: movq %rdi, d64(%rip) # encoding: [0x48,0x89,0x3d,A,A,A,A]
; NDD-NEXT: # fixup A - offset: 3, value: d64-4, kind: reloc_riprel_4byte
; NDD-NEXT: retq # encoding: [0xc3]
%v0 = and i64 %a, 66846720 ; 0xff << 50
diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll
index 4ed00a9..8bfaa61 100644
--- a/llvm/test/CodeGen/X86/combine-srem.ll
+++ b/llvm/test/CodeGen/X86/combine-srem.ll
@@ -83,7 +83,7 @@ define <4 x i32> @combine_vec_srem_by_minsigned(<4 x i32> %x) {
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_srem_by_minsigned:
@@ -93,7 +93,7 @@ define <4 x i32> @combine_vec_srem_by_minsigned(<4 x i32> %x) {
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = srem <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
ret <4 x i32> %1
@@ -225,24 +225,28 @@ define <4 x i32> @combine_vec_srem_by_pow2a_neg(<4 x i32> %x) {
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $30, %xmm1
; SSE-NEXT: paddd %xmm0, %xmm1
-; SSE-NEXT: psrld $2, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: psubd %xmm1, %xmm2
-; SSE-NEXT: pslld $2, %xmm2
-; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: combine_vec_srem_by_pow2a_neg:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX-NEXT: vpsrld $30, %xmm1, %xmm1
-; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpsrld $2, %xmm1, %xmm1
-; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX-NEXT: vpslld $2, %xmm1, %xmm1
-; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: combine_vec_srem_by_pow2a_neg:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
+; AVX1-NEXT: vpsrld $30, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_vec_srem_by_pow2a_neg:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
+; AVX2-NEXT: vpsrld $30, %xmm1, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4294967292,4294967292,4294967292,4294967292]
+; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
%1 = srem <4 x i32> %x, <i32 -4, i32 -4, i32 -4, i32 -4>
ret <4 x i32> %1
}
diff --git a/llvm/test/CodeGen/X86/i386-baseptr.ll b/llvm/test/CodeGen/X86/i386-baseptr.ll
index 08e4bde..777eb83 100644
--- a/llvm/test/CodeGen/X86/i386-baseptr.ll
+++ b/llvm/test/CodeGen/X86/i386-baseptr.ll
@@ -109,10 +109,14 @@ define x86_regcallcc void @clobber_baseptr_argptr(i32 %param1, i32 %param2, i32
; CHECK-NEXT: subl %eax, %edx
; CHECK-NEXT: movl %edx, %esp
; CHECK-NEXT: negl %eax
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: movl $405, %esi # imm = 0x195
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: addl $28, %esp
+; CHECK-NEXT: popl %esi
; CHECK-NEXT: movl $405, %ebx # imm = 0x195
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
diff --git a/llvm/test/CodeGen/X86/inline-asm-function-call-pic.ll b/llvm/test/CodeGen/X86/inline-asm-function-call-pic.ll
index 3c98eea..d3ca872 100644
--- a/llvm/test/CodeGen/X86/inline-asm-function-call-pic.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-function-call-pic.ll
@@ -37,6 +37,8 @@ define void @func() local_unnamed_addr #0 {
; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %ebx
; CHECK-NEXT: calll static_func
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: #APP
; CHECK-EMPTY:
; CHECK-NEXT: calll static_func
@@ -52,6 +54,8 @@ define void @func() local_unnamed_addr #0 {
; CHECK-NEXT: shrl $0, %esp
; CHECK-EMPTY:
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: addl $12, %esp
+; CHECK-NEXT: popl %ebp
entry:
%call = tail call i32 @static_func()
;; We test call, CALL, and jmp.
diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index 275a42e..6fcc1ed 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -59,7 +59,6 @@
; CHECK-NEXT: Constant Hoisting
; CHECK-NEXT: Replace intrinsics with calls to vector library
; CHECK-NEXT: Partially inline calls to library functions
-; CHECK-NEXT: Expand vector predication intrinsics
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
diff --git a/llvm/test/CodeGen/X86/popcnt.ll b/llvm/test/CodeGen/X86/popcnt.ll
index 13fa639..35c7c0e 100644
--- a/llvm/test/CodeGen/X86/popcnt.ll
+++ b/llvm/test/CodeGen/X86/popcnt.ll
@@ -182,11 +182,11 @@ define i32 @cnt32(i32 %x) nounwind readnone {
; X64-NDD: # %bb.0:
; X64-NDD-NEXT: shrl %edi, %eax
; X64-NDD-NEXT: andl $1431655765, %eax # imm = 0x55555555
-; X64-NDD-NEXT: subl %eax, %edi, %eax
-; X64-NDD-NEXT: andl $858993459, %eax, %ecx # imm = 0x33333333
-; X64-NDD-NEXT: shrl $2, %eax
-; X64-NDD-NEXT: andl $858993459, %eax # imm = 0x33333333
-; X64-NDD-NEXT: addl %ecx, %eax
+; X64-NDD-NEXT: subl %eax, %edi
+; X64-NDD-NEXT: andl $858993459, %edi, %eax # imm = 0x33333333
+; X64-NDD-NEXT: shrl $2, %edi
+; X64-NDD-NEXT: andl $858993459, %edi # imm = 0x33333333
+; X64-NDD-NEXT: addl %edi, %eax
; X64-NDD-NEXT: shrl $4, %eax, %ecx
; X64-NDD-NEXT: addl %ecx, %eax
; X64-NDD-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -277,12 +277,12 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X64-NDD-NEXT: shrq %rdi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rdi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rcx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rcx, %rax, %rdx
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: addq %rdx, %rax
+; X64-NDD-NEXT: subq %rax, %rdi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
@@ -491,32 +491,32 @@ define i128 @cnt128(i128 %x) nounwind readnone {
; X64-NDD-NEXT: shrq %rsi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rsi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rdx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rdx, %rax, %rsi
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rdx, %rax
-; X64-NDD-NEXT: addq %rsi, %rax
-; X64-NDD-NEXT: shrq $4, %rax, %rsi
-; X64-NDD-NEXT: addq %rsi, %rax
+; X64-NDD-NEXT: subq %rax, %rsi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rsi, %rdx
+; X64-NDD-NEXT: shrq $2, %rsi
+; X64-NDD-NEXT: andq %rax, %rsi
+; X64-NDD-NEXT: addq %rsi, %rdx
+; X64-NDD-NEXT: shrq $4, %rdx, %rsi
+; X64-NDD-NEXT: addq %rsi, %rdx
; X64-NDD-NEXT: movabsq $1085102592571150095, %rsi # imm = 0xF0F0F0F0F0F0F0F
-; X64-NDD-NEXT: andq %rsi, %rax
+; X64-NDD-NEXT: andq %rsi, %rdx
; X64-NDD-NEXT: movabsq $72340172838076673, %r8 # imm = 0x101010101010101
-; X64-NDD-NEXT: imulq %r8, %rax
-; X64-NDD-NEXT: shrq $56, %rax
+; X64-NDD-NEXT: imulq %r8, %rdx
+; X64-NDD-NEXT: shrq $56, %rdx
; X64-NDD-NEXT: shrq %rdi, %r9
; X64-NDD-NEXT: andq %r9, %rcx
-; X64-NDD-NEXT: subq %rcx, %rdi, %rcx
-; X64-NDD-NEXT: andq %rdx, %rcx, %rdi
-; X64-NDD-NEXT: shrq $2, %rcx
-; X64-NDD-NEXT: andq %rdx, %rcx
-; X64-NDD-NEXT: addq %rdi, %rcx
-; X64-NDD-NEXT: shrq $4, %rcx, %rdx
-; X64-NDD-NEXT: addq %rdx, %rcx
-; X64-NDD-NEXT: andq %rsi, %rcx
-; X64-NDD-NEXT: imulq %r8, %rcx
-; X64-NDD-NEXT: shrq $56, %rcx
+; X64-NDD-NEXT: subq %rcx, %rdi
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
+; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
+; X64-NDD-NEXT: andq %rsi, %rax
+; X64-NDD-NEXT: imulq %r8, %rax
+; X64-NDD-NEXT: shrq $56, %rax
+; X64-NDD-NEXT: addq %rdx, %rax
; X64-NDD-NEXT: xorl %edx, %edx
; X64-NDD-NEXT: retq
;
@@ -685,12 +685,12 @@ define i64 @cnt64_noimplicitfloat(i64 %x) nounwind readnone noimplicitfloat {
; X64-NDD-NEXT: shrq %rdi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rdi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rcx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rcx, %rax, %rdx
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: addq %rdx, %rax
+; X64-NDD-NEXT: subq %rax, %rdi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
@@ -759,12 +759,12 @@ define i32 @cnt32_optsize(i32 %x) nounwind readnone optsize {
; X64-NDD: # %bb.0:
; X64-NDD-NEXT: shrl %edi, %eax
; X64-NDD-NEXT: andl $1431655765, %eax # imm = 0x55555555
-; X64-NDD-NEXT: subl %eax, %edi, %eax
-; X64-NDD-NEXT: movl $858993459, %ecx # imm = 0x33333333
-; X64-NDD-NEXT: andl %ecx, %eax, %edx
-; X64-NDD-NEXT: shrl $2, %eax
-; X64-NDD-NEXT: andl %ecx, %eax
-; X64-NDD-NEXT: addl %edx, %eax
+; X64-NDD-NEXT: subl %eax, %edi
+; X64-NDD-NEXT: movl $858993459, %eax # imm = 0x33333333
+; X64-NDD-NEXT: andl %eax, %edi, %ecx
+; X64-NDD-NEXT: shrl $2, %edi
+; X64-NDD-NEXT: andl %edi, %eax
+; X64-NDD-NEXT: addl %ecx, %eax
; X64-NDD-NEXT: shrl $4, %eax, %ecx
; X64-NDD-NEXT: addl %ecx, %eax
; X64-NDD-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -864,12 +864,12 @@ define i64 @cnt64_optsize(i64 %x) nounwind readnone optsize {
; X64-NDD-NEXT: shrq %rdi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rdi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rcx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rcx, %rax, %rdx
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: addq %rdx, %rax
+; X64-NDD-NEXT: subq %rax, %rdi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
@@ -1087,32 +1087,32 @@ define i128 @cnt128_optsize(i128 %x) nounwind readnone optsize {
; X64-NDD-NEXT: shrq %rsi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rsi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rdx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rdx, %rax, %rsi
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rdx, %rax
-; X64-NDD-NEXT: addq %rsi, %rax
-; X64-NDD-NEXT: shrq $4, %rax, %rsi
-; X64-NDD-NEXT: addq %rsi, %rax
+; X64-NDD-NEXT: subq %rax, %rsi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rsi, %rdx
+; X64-NDD-NEXT: shrq $2, %rsi
+; X64-NDD-NEXT: andq %rax, %rsi
+; X64-NDD-NEXT: addq %rsi, %rdx
+; X64-NDD-NEXT: shrq $4, %rdx, %rsi
+; X64-NDD-NEXT: addq %rsi, %rdx
; X64-NDD-NEXT: movabsq $1085102592571150095, %rsi # imm = 0xF0F0F0F0F0F0F0F
-; X64-NDD-NEXT: andq %rsi, %rax
+; X64-NDD-NEXT: andq %rsi, %rdx
; X64-NDD-NEXT: movabsq $72340172838076673, %r8 # imm = 0x101010101010101
-; X64-NDD-NEXT: imulq %r8, %rax
-; X64-NDD-NEXT: shrq $56, %rax
+; X64-NDD-NEXT: imulq %r8, %rdx
+; X64-NDD-NEXT: shrq $56, %rdx
; X64-NDD-NEXT: shrq %rdi, %r9
; X64-NDD-NEXT: andq %r9, %rcx
-; X64-NDD-NEXT: subq %rcx, %rdi, %rcx
-; X64-NDD-NEXT: andq %rdx, %rcx, %rdi
-; X64-NDD-NEXT: shrq $2, %rcx
-; X64-NDD-NEXT: andq %rdx, %rcx
-; X64-NDD-NEXT: addq %rdi, %rcx
-; X64-NDD-NEXT: shrq $4, %rcx, %rdx
-; X64-NDD-NEXT: addq %rdx, %rcx
-; X64-NDD-NEXT: andq %rsi, %rcx
-; X64-NDD-NEXT: imulq %r8, %rcx
-; X64-NDD-NEXT: shrq $56, %rcx
+; X64-NDD-NEXT: subq %rcx, %rdi
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
+; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
+; X64-NDD-NEXT: andq %rsi, %rax
+; X64-NDD-NEXT: imulq %r8, %rax
+; X64-NDD-NEXT: shrq $56, %rax
+; X64-NDD-NEXT: addq %rdx, %rax
; X64-NDD-NEXT: xorl %edx, %edx
; X64-NDD-NEXT: retq
;
@@ -1257,11 +1257,11 @@ define i32 @cnt32_pgso(i32 %x) nounwind readnone !prof !14 {
; X64-NDD: # %bb.0:
; X64-NDD-NEXT: shrl %edi, %eax
; X64-NDD-NEXT: andl $1431655765, %eax # imm = 0x55555555
-; X64-NDD-NEXT: subl %eax, %edi, %eax
-; X64-NDD-NEXT: andl $858993459, %eax, %ecx # imm = 0x33333333
-; X64-NDD-NEXT: shrl $2, %eax
-; X64-NDD-NEXT: andl $858993459, %eax # imm = 0x33333333
-; X64-NDD-NEXT: addl %ecx, %eax
+; X64-NDD-NEXT: subl %eax, %edi
+; X64-NDD-NEXT: andl $858993459, %edi, %eax # imm = 0x33333333
+; X64-NDD-NEXT: shrl $2, %edi
+; X64-NDD-NEXT: andl $858993459, %edi # imm = 0x33333333
+; X64-NDD-NEXT: addl %edi, %eax
; X64-NDD-NEXT: shrl $4, %eax, %ecx
; X64-NDD-NEXT: addl %ecx, %eax
; X64-NDD-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -1352,12 +1352,12 @@ define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
; X64-NDD-NEXT: shrq %rdi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rdi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rcx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rcx, %rax, %rdx
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: addq %rdx, %rax
+; X64-NDD-NEXT: subq %rax, %rdi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
; X64-NDD-NEXT: movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
@@ -1568,32 +1568,32 @@ define i128 @cnt128_pgso(i128 %x) nounwind readnone !prof !14 {
; X64-NDD-NEXT: shrq %rsi, %rax
; X64-NDD-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; X64-NDD-NEXT: andq %rcx, %rax
-; X64-NDD-NEXT: subq %rax, %rsi, %rax
-; X64-NDD-NEXT: movabsq $3689348814741910323, %rdx # imm = 0x3333333333333333
-; X64-NDD-NEXT: andq %rdx, %rax, %rsi
-; X64-NDD-NEXT: shrq $2, %rax
-; X64-NDD-NEXT: andq %rdx, %rax
-; X64-NDD-NEXT: addq %rsi, %rax
-; X64-NDD-NEXT: shrq $4, %rax, %rsi
-; X64-NDD-NEXT: addq %rsi, %rax
+; X64-NDD-NEXT: subq %rax, %rsi
+; X64-NDD-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NDD-NEXT: andq %rax, %rsi, %rdx
+; X64-NDD-NEXT: shrq $2, %rsi
+; X64-NDD-NEXT: andq %rax, %rsi
+; X64-NDD-NEXT: addq %rsi, %rdx
+; X64-NDD-NEXT: shrq $4, %rdx, %rsi
+; X64-NDD-NEXT: addq %rsi, %rdx
; X64-NDD-NEXT: movabsq $1085102592571150095, %rsi # imm = 0xF0F0F0F0F0F0F0F
-; X64-NDD-NEXT: andq %rsi, %rax
+; X64-NDD-NEXT: andq %rsi, %rdx
; X64-NDD-NEXT: movabsq $72340172838076673, %r8 # imm = 0x101010101010101
-; X64-NDD-NEXT: imulq %r8, %rax
-; X64-NDD-NEXT: shrq $56, %rax
+; X64-NDD-NEXT: imulq %r8, %rdx
+; X64-NDD-NEXT: shrq $56, %rdx
; X64-NDD-NEXT: shrq %rdi, %r9
; X64-NDD-NEXT: andq %r9, %rcx
-; X64-NDD-NEXT: subq %rcx, %rdi, %rcx
-; X64-NDD-NEXT: andq %rdx, %rcx, %rdi
-; X64-NDD-NEXT: shrq $2, %rcx
-; X64-NDD-NEXT: andq %rdx, %rcx
-; X64-NDD-NEXT: addq %rdi, %rcx
-; X64-NDD-NEXT: shrq $4, %rcx, %rdx
-; X64-NDD-NEXT: addq %rdx, %rcx
-; X64-NDD-NEXT: andq %rsi, %rcx
-; X64-NDD-NEXT: imulq %r8, %rcx
-; X64-NDD-NEXT: shrq $56, %rcx
+; X64-NDD-NEXT: subq %rcx, %rdi
+; X64-NDD-NEXT: andq %rax, %rdi, %rcx
+; X64-NDD-NEXT: shrq $2, %rdi
+; X64-NDD-NEXT: andq %rdi, %rax
+; X64-NDD-NEXT: addq %rcx, %rax
+; X64-NDD-NEXT: shrq $4, %rax, %rcx
; X64-NDD-NEXT: addq %rcx, %rax
+; X64-NDD-NEXT: andq %rsi, %rax
+; X64-NDD-NEXT: imulq %r8, %rax
+; X64-NDD-NEXT: shrq $56, %rax
+; X64-NDD-NEXT: addq %rdx, %rax
; X64-NDD-NEXT: xorl %edx, %edx
; X64-NDD-NEXT: retq
;
@@ -1739,11 +1739,11 @@ define i32 @popcount_zext_i32(i16 zeroext %x) {
; X64-NDD: # %bb.0:
; X64-NDD-NEXT: shrl %edi, %eax
; X64-NDD-NEXT: andl $21845, %eax # imm = 0x5555
-; X64-NDD-NEXT: subl %eax, %edi, %eax
-; X64-NDD-NEXT: andl $858993459, %eax, %ecx # imm = 0x33333333
-; X64-NDD-NEXT: shrl $2, %eax
-; X64-NDD-NEXT: andl $858993459, %eax # imm = 0x33333333
-; X64-NDD-NEXT: addl %ecx, %eax
+; X64-NDD-NEXT: subl %eax, %edi
+; X64-NDD-NEXT: andl $858993459, %edi, %eax # imm = 0x33333333
+; X64-NDD-NEXT: shrl $2, %edi
+; X64-NDD-NEXT: andl $858993459, %edi # imm = 0x33333333
+; X64-NDD-NEXT: addl %edi, %eax
; X64-NDD-NEXT: shrl $4, %eax, %ecx
; X64-NDD-NEXT: addl %ecx, %eax
; X64-NDD-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
diff --git a/llvm/test/CodeGen/X86/select_const_i128.ll b/llvm/test/CodeGen/X86/select_const_i128.ll
index d7859ba..f0f0c58 100644
--- a/llvm/test/CodeGen/X86/select_const_i128.ll
+++ b/llvm/test/CodeGen/X86/select_const_i128.ll
@@ -23,8 +23,8 @@ define i128 @select_eq_i128(ptr %a) {
; NDD-NEXT: ptest %xmm0, %xmm0
; NDD-NEXT: setne %al
; NDD-NEXT: addq $-1, %rax
-; NDD-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
-; NDD-NEXT: adcq $0, %rcx, %rdx
+; NDD-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
+; NDD-NEXT: adcq $0, %rdx
; NDD-NEXT: retq
%1 = load i128, ptr %a, align 16
%cmp = icmp eq i128 %1, 1
diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll
index d2a1e5e..3359202 100644
--- a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll
+++ b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll
@@ -624,7 +624,7 @@ define <4 x i32> @test_srem_int_min(<4 x i32> %X) nounwind {
; CHECK-AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -637,7 +637,7 @@ define <4 x i32> @test_srem_int_min(<4 x i32> %X) nounwind {
; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; CHECK-AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -649,7 +649,7 @@ define <4 x i32> @test_srem_int_min(<4 x i32> %X) nounwind {
; CHECK-AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/x86-32-intrcc.ll b/llvm/test/CodeGen/X86/x86-32-intrcc.ll
index 3c3944c..a0f937e 100644
--- a/llvm/test/CodeGen/X86/x86-32-intrcc.ll
+++ b/llvm/test/CodeGen/X86/x86-32-intrcc.ll
@@ -108,8 +108,10 @@ define x86_intrcc void @test_isr_clobbers(ptr byval(%struct.interrupt_frame) %fr
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: andl $-16, %esp
; CHECK-NEXT: cld
+; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: popl %ebp
; CHECK-NEXT: leal -12(%ebp), %esp
; CHECK-NEXT: popl %eax
; CHECK-NEXT: popl %ebx
@@ -127,8 +129,10 @@ define x86_intrcc void @test_isr_clobbers(ptr byval(%struct.interrupt_frame) %fr
; CHECK0-NEXT: pushl %eax
; CHECK0-NEXT: andl $-16, %esp
; CHECK0-NEXT: cld
+; CHECK0-NEXT: pushl %ebp
; CHECK0-NEXT: #APP
; CHECK0-NEXT: #NO_APP
+; CHECK0-NEXT: popl %ebp
; CHECK0-NEXT: leal -12(%ebp), %esp
; CHECK0-NEXT: popl %eax
; CHECK0-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/x86-64-baseptr.ll b/llvm/test/CodeGen/X86/x86-64-baseptr.ll
index 8cda4ba..020004d 100644
--- a/llvm/test/CodeGen/X86/x86-64-baseptr.ll
+++ b/llvm/test/CodeGen/X86/x86-64-baseptr.ll
@@ -136,10 +136,14 @@ define void @clobber_base() #0 {
; X32ABI-NEXT: subl %eax, %edx
; X32ABI-NEXT: negl %eax
; X32ABI-NEXT: movl %edx, %esp
+; X32ABI-NEXT: pushq %rbx
+; X32ABI-NEXT: subl $24, %esp
; X32ABI-NEXT: movl $405, %ebx # imm = 0x195
; X32ABI-NEXT: #APP
; X32ABI-NEXT: nop
; X32ABI-NEXT: #NO_APP
+; X32ABI-NEXT: addl $24, %esp
+; X32ABI-NEXT: popq %rbx
; X32ABI-NEXT: movl $8, %edx
; X32ABI-NEXT: #APP
; X32ABI-NEXT: movl %edx, (%ebx)
@@ -268,6 +272,8 @@ define x86_regcallcc void @clobber_baseptr_argptr(i32 %param1, i32 %param2, i32
; X32ABI-NEXT: subl %eax, %edx
; X32ABI-NEXT: negl %eax
; X32ABI-NEXT: movl %edx, %esp
+; X32ABI-NEXT: pushq %rbx
+; X32ABI-NEXT: subl $24, %esp
; X32ABI-NEXT: movl $405, %ebx # imm = 0x195
; X32ABI-NEXT: #APP
; X32ABI-NEXT: nop
@@ -275,6 +281,8 @@ define x86_regcallcc void @clobber_baseptr_argptr(i32 %param1, i32 %param2, i32
; X32ABI-NEXT: #APP
; X32ABI-NEXT: nop
; X32ABI-NEXT: #NO_APP
+; X32ABI-NEXT: addl $24, %esp
+; X32ABI-NEXT: popq %rbx
; X32ABI-NEXT: movl $8, %edx
; X32ABI-NEXT: #APP
; X32ABI-NEXT: movl %edx, (%ebx)
@@ -385,10 +393,14 @@ define void @vmw_host_printf(ptr %fmt, ...) nounwind {
; X32ABI-NEXT: movl $48, (%eax)
; X32ABI-NEXT: movl $8, (%eax)
; X32ABI-NEXT: xorl %eax, %eax
+; X32ABI-NEXT: pushq %rbx
+; X32ABI-NEXT: subl $24, %esp
; X32ABI-NEXT: xorl %ebx, %ebx
; X32ABI-NEXT: xorl %ecx, %ecx
; X32ABI-NEXT: #APP
; X32ABI-NEXT: #NO_APP
+; X32ABI-NEXT: addl $24, %esp
+; X32ABI-NEXT: popq %rbx
; X32ABI-NEXT: leal -8(%ebp), %esp
; X32ABI-NEXT: popq %rbx
; X32ABI-NEXT: popq %rbp
diff --git a/llvm/test/CodeGen/X86/x86-64-flags-intrinsics.ll b/llvm/test/CodeGen/X86/x86-64-flags-intrinsics.ll
index 47aefdb..b4c18dd 100644
--- a/llvm/test/CodeGen/X86/x86-64-flags-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/x86-64-flags-intrinsics.ll
@@ -94,6 +94,8 @@ define i64 @read_flags_reg_pressure() nounwind {
; WIN64-NEXT: pushq %rbx
; WIN64-NEXT: subq $16, %rsp
; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
+; WIN64-NEXT: pushq %rbp
+; WIN64-NEXT: pushq %rax
; WIN64-NEXT: #APP
; WIN64-NEXT: #NO_APP
; WIN64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
@@ -103,6 +105,8 @@ define i64 @read_flags_reg_pressure() nounwind {
; WIN64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; WIN64-NEXT: #APP
; WIN64-NEXT: #NO_APP
+; WIN64-NEXT: addq $8, %rsp
+; WIN64-NEXT: popq %rbp
; WIN64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; WIN64-NEXT: addq $16, %rsp
; WIN64-NEXT: popq %rbx
@@ -177,6 +181,8 @@ define void @write_flags_reg_pressure(i64 noundef %0) nounwind {
; WIN64-NEXT: subq $16, %rsp
; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
; WIN64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; WIN64-NEXT: pushq %rbp
+; WIN64-NEXT: pushq %rax
; WIN64-NEXT: #APP
; WIN64-NEXT: #NO_APP
; WIN64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
@@ -186,6 +192,8 @@ define void @write_flags_reg_pressure(i64 noundef %0) nounwind {
; WIN64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; WIN64-NEXT: #APP
; WIN64-NEXT: #NO_APP
+; WIN64-NEXT: popq %rax
+; WIN64-NEXT: popq %rbp
; WIN64-NEXT: addq $16, %rsp
; WIN64-NEXT: popq %rbx
; WIN64-NEXT: popq %rdi
diff --git a/llvm/test/DebugInfo/Generic/assignment-tracking/dse/dse-after-memcpyopt-merge.ll b/llvm/test/DebugInfo/Generic/assignment-tracking/dse/dse-after-memcpyopt-merge.ll
index 5e9c7b3..2c26cb8 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/dse/dse-after-memcpyopt-merge.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/dse/dse-after-memcpyopt-merge.ll
@@ -17,8 +17,8 @@
; CHECK: #dbg_assign({{.*}}, ptr %g, !DIExpression(),
; CHECK: #dbg_assign(float 0.000000e+00, ![[#]], !DIExpression(DW_OP_LLVM_fragment, 64, 32), ![[ID:[0-9]+]], ptr %arrayidx.i, !DIExpression(),
; CHECK: #dbg_assign(float 0.000000e+00, ![[#]], !DIExpression(DW_OP_LLVM_fragment, 32, 32), ![[ID]], ptr %arrayidx3.i, !DIExpression(),
-; CHECK: #dbg_assign(float 0.000000e+00, ![[#]], !DIExpression(DW_OP_LLVM_fragment, 0, 32), ![[UniqueID1:[0-9]+]], ptr undef, !DIExpression(),
-; CHECK: #dbg_assign(float 0.000000e+00, ![[#]], !DIExpression(DW_OP_LLVM_fragment, 96, 32), ![[UniqueID2:[0-9]+]], ptr undef, !DIExpression(),
+; CHECK: #dbg_assign(float 0.000000e+00, ![[#]], !DIExpression(DW_OP_LLVM_fragment, 0, 32), ![[UniqueID1:[0-9]+]], ptr poison, !DIExpression(),
+; CHECK: #dbg_assign(float 0.000000e+00, ![[#]], !DIExpression(DW_OP_LLVM_fragment, 96, 32), ![[UniqueID2:[0-9]+]], ptr poison, !DIExpression(),
; CHECK: call void @llvm.memset{{.*}}, !DIAssignID ![[ID]]
; CHECK-DAG: ![[ID]] = distinct !DIAssignID()
@@ -32,7 +32,7 @@ $_ZN1vC2Ef = comdat any
define dso_local void @_Z1fv() local_unnamed_addr !dbg !7 {
entry:
%g = alloca %struct.v, align 4, !DIAssignID !23
- call void @llvm.dbg.assign(metadata i1 undef, metadata !11, metadata !DIExpression(), metadata !23, metadata ptr %g, metadata !DIExpression()), !dbg !24
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !11, metadata !DIExpression(), metadata !23, metadata ptr %g, metadata !DIExpression()), !dbg !24
%arrayidx.i = getelementptr inbounds %struct.v, ptr %g, i64 0, i32 0, i64 2, !dbg !37
call void @llvm.dbg.assign(metadata float 0.000000e+00, metadata !11, metadata !DIExpression(DW_OP_LLVM_fragment, 64, 32), metadata !39, metadata ptr %arrayidx.i, metadata !DIExpression()), !dbg !24
%arrayidx3.i = getelementptr inbounds %struct.v, ptr %g, i64 0, i32 0, i64 1, !dbg !40
diff --git a/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten-offset.ll b/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten-offset.ll
index ad83b6f..47dabf6 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten-offset.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten-offset.ll
@@ -32,7 +32,7 @@
; CHECK: #dbg_assign({{.*}}, ptr %local, !DIExpression(),
; CHECK: call void @llvm.memset{{.*}}, !DIAssignID ![[ID:[0-9]+]]
; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR:[0-9]+]], !DIExpression(DW_OP_LLVM_fragment, 64, 96), ![[ID:[0-9]+]], ptr %offset_4_bytes, !DIExpression(DW_OP_plus_uconst, 4),
-; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR]], !DIExpression(DW_OP_LLVM_fragment, 128, 32), ![[UniqueID1:[0-9]+]], ptr undef, !DIExpression({{.*}}),
+; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR]], !DIExpression(DW_OP_LLVM_fragment, 128, 32), ![[UniqueID1:[0-9]+]], ptr poison, !DIExpression({{.*}}),
;; DSE will shorten the first store in shortenStart from [0, 160) bits to [128,
;; 160) bits. Variable 'local2' has been adjusted to be 160 bits. Check we get
@@ -44,7 +44,7 @@
; CHECK: #dbg_assign({{.*}}, ptr %local2, !DIExpression(),
; CHECK: call void @llvm.memset{{.*}}, !DIAssignID ![[ID2:[0-9]+]]
; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR2:[0-9]+]], !DIExpression(), ![[ID2]], ptr %local2, !DIExpression(),
-; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR2]], !DIExpression(DW_OP_LLVM_fragment, 0, 128), ![[UniqueID2:[0-9]+]], ptr undef, !DIExpression(),
+; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR2]], !DIExpression(DW_OP_LLVM_fragment, 0, 128), ![[UniqueID2:[0-9]+]], ptr poison, !DIExpression(),
; CHECK-DAG: ![[ID]] = distinct !DIAssignID()
; CHECK-DAG: ![[UniqueID1]] = distinct !DIAssignID()
@@ -53,7 +53,7 @@
define dso_local void @_Z10shortenEndv() local_unnamed_addr #0 !dbg !7 {
entry:
%local = alloca [80 x i8], align 16, !DIAssignID !16
- call void @llvm.dbg.assign(metadata i1 undef, metadata !11, metadata !DIExpression(), metadata !16, metadata ptr %local, metadata !DIExpression()), !dbg !17
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !11, metadata !DIExpression(), metadata !16, metadata ptr %local, metadata !DIExpression()), !dbg !17
%arraydecay = getelementptr inbounds [80 x i8], ptr %local, i64 0, i64 0, !dbg !19
%offset_4_bytes = getelementptr inbounds [80 x i8], ptr %local, i64 0, i64 4, !dbg !21
%offset_8_bytes = getelementptr inbounds [80 x i8], ptr %local, i64 0, i64 8, !dbg !21
@@ -71,7 +71,7 @@ declare !dbg !26 dso_local void @_Z3escPi(ptr noundef) local_unnamed_addr
define dso_local void @_Z12shortenStartv() local_unnamed_addr #0 !dbg !31 {
entry:
%local2 = alloca [40 x i8], align 16, !DIAssignID !37
- call void @llvm.dbg.assign(metadata i1 undef, metadata !33, metadata !DIExpression(), metadata !37, metadata ptr %local2, metadata !DIExpression()), !dbg !38
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !33, metadata !DIExpression(), metadata !37, metadata ptr %local2, metadata !DIExpression()), !dbg !38
%arraydecay = getelementptr inbounds [40 x i8], ptr %local2, i64 0, i64 0, !dbg !40
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(40) %local2, i8 0, i64 36, i1 false), !dbg !40, !DIAssignID !41
call void @llvm.dbg.assign(metadata i8 0, metadata !33, metadata !DIExpression(), metadata !41, metadata ptr %local2, metadata !DIExpression()), !dbg !38
diff --git a/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten.ll b/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten.ll
index 0770629..1d5acc2 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/dse/shorten.ll
@@ -28,12 +28,12 @@
; CHECK: @_Z10shortenEndv
; CHECK: call void @llvm.memset{{.*}}, !DIAssignID ![[ID:[0-9]+]]
; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR:[0-9]+]], !DIExpression(DW_OP_LLVM_fragment, 0, 192), ![[ID:[0-9]+]], ptr %local, !DIExpression(),
-; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR]], !DIExpression(DW_OP_LLVM_fragment, 128, 64), ![[UniqueID1:[0-9]+]], ptr undef, !DIExpression(),
+; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR]], !DIExpression(DW_OP_LLVM_fragment, 128, 64), ![[UniqueID1:[0-9]+]], ptr poison, !DIExpression(),
; CHECK: @_Z12shortenStartv
; CHECK: call void @llvm.memset{{.*}}, !DIAssignID ![[ID2:[0-9]+]]
; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR2:[0-9]+]], !DIExpression(), ![[ID2]], ptr %local2, !DIExpression(),
-; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR2]], !DIExpression(DW_OP_LLVM_fragment, 0, 128), ![[UniqueID2:[0-9]+]], ptr undef, !DIExpression(),
+; CHECK-NEXT: #dbg_assign(i8 0, ![[VAR2]], !DIExpression(DW_OP_LLVM_fragment, 0, 128), ![[UniqueID2:[0-9]+]], ptr poison, !DIExpression(),
; CHECK-DAG: ![[ID]] = distinct !DIAssignID()
; CHECK-DAG: ![[UniqueID1]] = distinct !DIAssignID()
@@ -42,14 +42,14 @@
define dso_local void @_Z10shortenEndv() local_unnamed_addr #0 !dbg !7 {
entry:
%local = alloca [20 x i32], align 16, !DIAssignID !16
- call void @llvm.dbg.assign(metadata i1 undef, metadata !11, metadata !DIExpression(), metadata !16, metadata ptr %local, metadata !DIExpression()), !dbg !17
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !11, metadata !DIExpression(), metadata !16, metadata ptr %local, metadata !DIExpression()), !dbg !17
call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %local) #5, !dbg !18
%arraydecay = getelementptr inbounds [20 x i32], ptr %local, i64 0, i64 0, !dbg !19
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(24) %local, i8 0, i64 24, i1 false), !dbg !19, !DIAssignID !20
call void @llvm.dbg.assign(metadata i8 0, metadata !11, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 192), metadata !20, metadata ptr %local, metadata !DIExpression()), !dbg !17
%add.ptr = getelementptr inbounds [20 x i32], ptr %local, i64 0, i64 4, !dbg !21
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(40) %add.ptr, i8 8, i64 40, i1 false), !dbg !22, !DIAssignID !23
- call void @llvm.dbg.assign(metadata i1 undef, metadata !11, metadata !DIExpression(DW_OP_LLVM_fragment, 128, 320), metadata !23, metadata ptr %add.ptr, metadata !DIExpression()), !dbg !17
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !11, metadata !DIExpression(DW_OP_LLVM_fragment, 128, 320), metadata !23, metadata ptr %add.ptr, metadata !DIExpression()), !dbg !17
call void @_Z3escPi(ptr noundef nonnull %arraydecay), !dbg !24
call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %local) #5, !dbg !25
ret void, !dbg !25
@@ -63,13 +63,13 @@ declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
define dso_local void @_Z12shortenStartv() local_unnamed_addr #0 !dbg !31 {
entry:
%local2 = alloca [10 x i32], align 16, !DIAssignID !37
- call void @llvm.dbg.assign(metadata i1 undef, metadata !33, metadata !DIExpression(), metadata !37, metadata ptr %local2, metadata !DIExpression()), !dbg !38
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !33, metadata !DIExpression(), metadata !37, metadata ptr %local2, metadata !DIExpression()), !dbg !38
call void @llvm.lifetime.start.p0(i64 40, ptr nonnull %local2) #5, !dbg !39
%arraydecay = getelementptr inbounds [10 x i32], ptr %local2, i64 0, i64 0, !dbg !40
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(40) %local2, i8 0, i64 40, i1 false), !dbg !40, !DIAssignID !41
call void @llvm.dbg.assign(metadata i8 0, metadata !33, metadata !DIExpression(), metadata !41, metadata ptr %local2, metadata !DIExpression()), !dbg !38
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) %local2, i8 8, i64 16, i1 false), !dbg !42, !DIAssignID !43
- call void @llvm.dbg.assign(metadata i1 undef, metadata !33, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 128), metadata !43, metadata ptr %local2, metadata !DIExpression()), !dbg !38
+ call void @llvm.dbg.assign(metadata i1 poison, metadata !33, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 128), metadata !43, metadata ptr %local2, metadata !DIExpression()), !dbg !38
call void @_Z3escPi(ptr noundef nonnull %arraydecay), !dbg !44
call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %local2) #5, !dbg !45
ret void, !dbg !45
diff --git a/llvm/test/DebugInfo/Generic/assignment-tracking/salvage-value.ll b/llvm/test/DebugInfo/Generic/assignment-tracking/salvage-value.ll
index e3eb039..29edc38 100644
--- a/llvm/test/DebugInfo/Generic/assignment-tracking/salvage-value.ll
+++ b/llvm/test/DebugInfo/Generic/assignment-tracking/salvage-value.ll
@@ -32,8 +32,8 @@ entry:
%arrayidx2 = getelementptr inbounds i32, ptr %p, i32 %x
call void @llvm.dbg.assign(metadata i32 %x, metadata !34, metadata !DIExpression(), metadata !19, metadata ptr %arrayidx2, metadata !DIExpression()), !dbg !16
;; Variadic DIExpressions for dbg.assign address component is not supported -
-;; set undef.
-; CHECK-NEXT: #dbg_assign(i32 %x,{{.+}}, !DIExpression(),{{.+}}, ptr undef, !DIExpression(),
+;; set poison.
+; CHECK-NEXT: #dbg_assign(i32 %x,{{.+}}, !DIExpression(),{{.+}}, ptr poison, !DIExpression(),
ret void
}
diff --git a/llvm/test/DebugInfo/Generic/sroa-extract-bits.ll b/llvm/test/DebugInfo/Generic/sroa-extract-bits.ll
index fe41b3e6..f47e495 100644
--- a/llvm/test/DebugInfo/Generic/sroa-extract-bits.ll
+++ b/llvm/test/DebugInfo/Generic/sroa-extract-bits.ll
@@ -127,14 +127,14 @@ define i8 @test6(i32 %arg1, i8 %arg2) {
; CHECK-SAME: i32 [[ARG1:%.*]], i8 [[ARG2:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PTR_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[ARG1]] to i8
-; CHECK-NEXT: #dbg_value(i8 undef, [[META2]], !DIExpression(DW_OP_LLVM_extract_bits_sext, 0, 8), [[META7]])
+; CHECK-NEXT: #dbg_value(i8 poison, [[META2]], !DIExpression(DW_OP_LLVM_extract_bits_sext, 0, 8), [[META7]])
; CHECK-NEXT: [[PTR_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[ARG1]], 8
; CHECK-NEXT: [[PTR_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[PTR_SROA_2_0_EXTRACT_SHIFT]] to i16
-; CHECK-NEXT: #dbg_value(i16 undef, [[META9]], !DIExpression(DW_OP_LLVM_extract_bits_zext, 0, 16), [[META7]])
+; CHECK-NEXT: #dbg_value(i16 poison, [[META9]], !DIExpression(DW_OP_LLVM_extract_bits_zext, 0, 16), [[META7]])
; CHECK-NEXT: [[PTR_SROA_21_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[ARG1]], 24
; CHECK-NEXT: [[PTR_SROA_21_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[PTR_SROA_21_0_EXTRACT_SHIFT]] to i8
-; CHECK-NEXT: #dbg_value(i8 undef, [[META8]], !DIExpression(DW_OP_LLVM_extract_bits_sext, 0, 8), [[META7]])
-; CHECK-NEXT: #dbg_value(i8 undef, [[META8]], !DIExpression(DW_OP_LLVM_extract_bits_sext, 0, 8), [[META7]])
+; CHECK-NEXT: #dbg_value(i8 poison, [[META8]], !DIExpression(DW_OP_LLVM_extract_bits_sext, 0, 8), [[META7]])
+; CHECK-NEXT: #dbg_value(i8 poison, [[META8]], !DIExpression(DW_OP_LLVM_extract_bits_sext, 0, 8), [[META7]])
; CHECK-NEXT: ret i8 [[PTR_SROA_0_0_EXTRACT_TRUNC]]
;
entry:
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_relocations.s b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_relocations.s
index 75e367b..aef96f0 100644
--- a/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_relocations.s
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_relocations.s
@@ -264,6 +264,20 @@ test_ld64_gotlo12_external:
ldr x0, [x0, :got_lo12:external_data]
.size test_ld64_gotlo12_external, .-test_ld64_gotlo12_external
+# Check R_AARCH64_LD64_GOTPAGE_LO15 handling with a reference to an external
+# symbol. Validate the reference to the GOT entry.
+# For the LDR :gotpage_lo15: instruction we have the 15-bit offset of the GOT
+# entry from the page containing the GOT.
+# jitlink-check: decode_operand(test_ld64_gotpagelo15_external, 2) = \
+# jitlink-check: (got_addr(elf_reloc.o, external_data) - \
+# jitlink-check: (section_addr(elf_reloc.o, $__GOT) & 0xfffffffffffff000)) \
+# jitlink-check: [15:3]
+ .globl test_ld64_gotpagelo15_external
+ .p2align 2
+test_ld64_gotpagelo15_external:
+ ldr x0, [x0, :gotpage_lo15:external_data]
+ .size test_ld64_gotpagelo15_external, .-test_ld64_gotpagelo15_external
+
# Check R_AARCH64_TSTBR14 for tbz
#
# jitlink-check: decode_operand(test_tstbr14_tbz, 2) = \
diff --git a/llvm/test/MC/AArch64/SME2/bfclamp-diagnostics.s b/llvm/test/MC/AArch64/SME2/bfclamp-diagnostics.s
index 661cfad..5d40bb0 100644
--- a/llvm/test/MC/AArch64/SME2/bfclamp-diagnostics.s
+++ b/llvm/test/MC/AArch64/SME2/bfclamp-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector list
diff --git a/llvm/test/MC/AArch64/SME2/bfclamp.s b/llvm/test/MC/AArch64/SME2/bfclamp.s
index dc3caec..3ce9ed6 100644
--- a/llvm/test/MC/AArch64/SME2/bfclamp.s
+++ b/llvm/test/MC/AArch64/SME2/bfclamp.s
@@ -1,60 +1,60 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | llvm-objdump -d --mattr=-sme2 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
bfclamp {z0.h, z1.h}, z0.h, z0.h // 11000001-00100000-11000000-00000000
// CHECK-INST: bfclamp { z0.h, z1.h }, z0.h, z0.h
// CHECK-ENCODING: [0x00,0xc0,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120c000 <unknown>
bfclamp {z20.h, z21.h}, z10.h, z21.h // 11000001-00110101-11000001-01010100
// CHECK-INST: bfclamp { z20.h, z21.h }, z10.h, z21.h
// CHECK-ENCODING: [0x54,0xc1,0x35,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c135c154 <unknown>
bfclamp {z22.h, z23.h}, z13.h, z8.h // 11000001-00101000-11000001-10110110
// CHECK-INST: bfclamp { z22.h, z23.h }, z13.h, z8.h
// CHECK-ENCODING: [0xb6,0xc1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128c1b6 <unknown>
bfclamp {z30.h, z31.h}, z31.h, z31.h // 11000001-00111111-11000011-11111110
// CHECK-INST: bfclamp { z30.h, z31.h }, z31.h, z31.h
// CHECK-ENCODING: [0xfe,0xc3,0x3f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13fc3fe <unknown>
bfclamp {z0.h - z3.h}, z0.h, z0.h // 11000001-00100000-11001000-00000000
// CHECK-INST: bfclamp { z0.h - z3.h }, z0.h, z0.h
// CHECK-ENCODING: [0x00,0xc8,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120c800 <unknown>
bfclamp {z20.h - z23.h}, z10.h, z21.h // 11000001-00110101-11001001-01010100
// CHECK-INST: bfclamp { z20.h - z23.h }, z10.h, z21.h
// CHECK-ENCODING: [0x54,0xc9,0x35,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c135c954 <unknown>
bfclamp {z20.h - z23.h}, z13.h, z8.h // 11000001-00101000-11001001-10110100
// CHECK-INST: bfclamp { z20.h - z23.h }, z13.h, z8.h
// CHECK-ENCODING: [0xb4,0xc9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128c9b4 <unknown>
bfclamp {z28.h - z31.h}, z31.h, z31.h // 11000001-00111111-11001011-11111100
// CHECK-INST: bfclamp { z28.h - z31.h }, z31.h, z31.h
// CHECK-ENCODING: [0xfc,0xcb,0x3f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13fcbfc <unknown>
diff --git a/llvm/test/MC/AArch64/SME2/bfmax-diagnostics.s b/llvm/test/MC/AArch64/SME2/bfmax-diagnostics.s
index bbb619e..d0718e6 100644
--- a/llvm/test/MC/AArch64/SME2/bfmax-diagnostics.s
+++ b/llvm/test/MC/AArch64/SME2/bfmax-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector list
diff --git a/llvm/test/MC/AArch64/SME2/bfmax.s b/llvm/test/MC/AArch64/SME2/bfmax.s
index 657fcbc..78656a7 100644
--- a/llvm/test/MC/AArch64/SME2/bfmax.s
+++ b/llvm/test/MC/AArch64/SME2/bfmax.s
@@ -1,108 +1,108 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | llvm-objdump -d --mattr=-sme2 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
bfmax {z0.h, z1.h}, {z0.h, z1.h}, z0.h // 11000001-00100000-10100001-00000000
// CHECK-INST: bfmax { z0.h, z1.h }, { z0.h, z1.h }, z0.h
// CHECK-ENCODING: [0x00,0xa1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a100 <unknown>
bfmax {z20.h, z21.h}, {z20.h, z21.h}, z5.h // 11000001-00100101-10100001-00010100
// CHECK-INST: bfmax { z20.h, z21.h }, { z20.h, z21.h }, z5.h
// CHECK-ENCODING: [0x14,0xa1,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a114 <unknown>
bfmax {z22.h, z23.h}, {z22.h, z23.h}, z8.h // 11000001-00101000-10100001-00010110
// CHECK-INST: bfmax { z22.h, z23.h }, { z22.h, z23.h }, z8.h
// CHECK-ENCODING: [0x16,0xa1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a116 <unknown>
bfmax {z30.h, z31.h}, {z30.h, z31.h}, z15.h // 11000001-00101111-10100001-00011110
// CHECK-INST: bfmax { z30.h, z31.h }, { z30.h, z31.h }, z15.h
// CHECK-ENCODING: [0x1e,0xa1,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa11e <unknown>
bfmax {z0.h, z1.h}, {z0.h, z1.h}, {z0.h, z1.h} // 11000001-00100000-10110001-00000000
// CHECK-INST: bfmax { z0.h, z1.h }, { z0.h, z1.h }, { z0.h, z1.h }
// CHECK-ENCODING: [0x00,0xb1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b100 <unknown>
bfmax {z20.h, z21.h}, {z20.h, z21.h}, {z20.h, z21.h} // 11000001-00110100-10110001-00010100
// CHECK-INST: bfmax { z20.h, z21.h }, { z20.h, z21.h }, { z20.h, z21.h }
// CHECK-ENCODING: [0x14,0xb1,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b114 <unknown>
bfmax {z22.h, z23.h}, {z22.h, z23.h}, {z8.h, z9.h} // 11000001-00101000-10110001-00010110
// CHECK-INST: bfmax { z22.h, z23.h }, { z22.h, z23.h }, { z8.h, z9.h }
// CHECK-ENCODING: [0x16,0xb1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b116 <unknown>
bfmax {z30.h, z31.h}, {z30.h, z31.h}, {z30.h, z31.h} // 11000001-00111110-10110001-00011110
// CHECK-INST: bfmax { z30.h, z31.h }, { z30.h, z31.h }, { z30.h, z31.h }
// CHECK-ENCODING: [0x1e,0xb1,0x3e,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13eb11e <unknown>
bfmax {z0.h - z3.h}, {z0.h - z3.h}, z0.h // 11000001-00100000-10101001-00000000
// CHECK-INST: bfmax { z0.h - z3.h }, { z0.h - z3.h }, z0.h
// CHECK-ENCODING: [0x00,0xa9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a900 <unknown>
bfmax {z20.h - z23.h}, {z20.h - z23.h}, z5.h // 11000001-00100101-10101001-00010100
// CHECK-INST: bfmax { z20.h - z23.h }, { z20.h - z23.h }, z5.h
// CHECK-ENCODING: [0x14,0xa9,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a914 <unknown>
bfmax {z20.h - z23.h}, {z20.h - z23.h}, z8.h // 11000001-00101000-10101001-00010100
// CHECK-INST: bfmax { z20.h - z23.h }, { z20.h - z23.h }, z8.h
// CHECK-ENCODING: [0x14,0xa9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a914 <unknown>
bfmax {z28.h - z31.h}, {z28.h - z31.h}, z15.h // 11000001-00101111-10101001-00011100
// CHECK-INST: bfmax { z28.h - z31.h }, { z28.h - z31.h }, z15.h
// CHECK-ENCODING: [0x1c,0xa9,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa91c <unknown>
bfmax {z0.h - z3.h}, {z0.h - z3.h}, {z0.h - z3.h} // 11000001-00100000-10111001-00000000
// CHECK-INST: bfmax { z0.h - z3.h }, { z0.h - z3.h }, { z0.h - z3.h }
// CHECK-ENCODING: [0x00,0xb9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b900 <unknown>
bfmax {z20.h - z23.h}, {z20.h - z23.h}, {z20.h - z23.h} // 11000001-00110100-10111001-00010100
// CHECK-INST: bfmax { z20.h - z23.h }, { z20.h - z23.h }, { z20.h - z23.h }
// CHECK-ENCODING: [0x14,0xb9,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b914 <unknown>
bfmax {z20.h - z23.h}, {z20.h - z23.h}, {z8.h - z11.h} // 11000001-00101000-10111001-00010100
// CHECK-INST: bfmax { z20.h - z23.h }, { z20.h - z23.h }, { z8.h - z11.h }
// CHECK-ENCODING: [0x14,0xb9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b914 <unknown>
bfmax {z28.h - z31.h}, {z28.h - z31.h}, {z28.h - z31.h} // 11000001-00111100-10111001-00011100
// CHECK-INST: bfmax { z28.h - z31.h }, { z28.h - z31.h }, { z28.h - z31.h }
// CHECK-ENCODING: [0x1c,0xb9,0x3c,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13cb91c <unknown>
diff --git a/llvm/test/MC/AArch64/SME2/bfmaxnm-diagnostics.s b/llvm/test/MC/AArch64/SME2/bfmaxnm-diagnostics.s
index ab837b6..39a2740 100644
--- a/llvm/test/MC/AArch64/SME2/bfmaxnm-diagnostics.s
+++ b/llvm/test/MC/AArch64/SME2/bfmaxnm-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector list
diff --git a/llvm/test/MC/AArch64/SME2/bfmaxnm.s b/llvm/test/MC/AArch64/SME2/bfmaxnm.s
index f61f530..860da4d 100644
--- a/llvm/test/MC/AArch64/SME2/bfmaxnm.s
+++ b/llvm/test/MC/AArch64/SME2/bfmaxnm.s
@@ -1,108 +1,108 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | llvm-objdump -d --mattr=-sme2 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
bfmaxnm {z0.h, z1.h}, {z0.h, z1.h}, z0.h // 11000001-00100000-10100001-00100000
// CHECK-INST: bfmaxnm { z0.h, z1.h }, { z0.h, z1.h }, z0.h
// CHECK-ENCODING: [0x20,0xa1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a120 <unknown>
bfmaxnm {z20.h, z21.h}, {z20.h, z21.h}, z5.h // 11000001-00100101-10100001-00110100
// CHECK-INST: bfmaxnm { z20.h, z21.h }, { z20.h, z21.h }, z5.h
// CHECK-ENCODING: [0x34,0xa1,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a134 <unknown>
bfmaxnm {z22.h, z23.h}, {z22.h, z23.h}, z8.h // 11000001-00101000-10100001-00110110
// CHECK-INST: bfmaxnm { z22.h, z23.h }, { z22.h, z23.h }, z8.h
// CHECK-ENCODING: [0x36,0xa1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a136 <unknown>
bfmaxnm {z30.h, z31.h}, {z30.h, z31.h}, z15.h // 11000001-00101111-10100001-00111110
// CHECK-INST: bfmaxnm { z30.h, z31.h }, { z30.h, z31.h }, z15.h
// CHECK-ENCODING: [0x3e,0xa1,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa13e <unknown>
bfmaxnm {z0.h, z1.h}, {z0.h, z1.h}, {z0.h, z1.h} // 11000001-00100000-10110001-00100000
// CHECK-INST: bfmaxnm { z0.h, z1.h }, { z0.h, z1.h }, { z0.h, z1.h }
// CHECK-ENCODING: [0x20,0xb1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b120 <unknown>
bfmaxnm {z20.h, z21.h}, {z20.h, z21.h}, {z20.h, z21.h} // 11000001-00110100-10110001-00110100
// CHECK-INST: bfmaxnm { z20.h, z21.h }, { z20.h, z21.h }, { z20.h, z21.h }
// CHECK-ENCODING: [0x34,0xb1,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b134 <unknown>
bfmaxnm {z22.h, z23.h}, {z22.h, z23.h}, {z8.h, z9.h} // 11000001-00101000-10110001-00110110
// CHECK-INST: bfmaxnm { z22.h, z23.h }, { z22.h, z23.h }, { z8.h, z9.h }
// CHECK-ENCODING: [0x36,0xb1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b136 <unknown>
bfmaxnm {z30.h, z31.h}, {z30.h, z31.h}, {z30.h, z31.h} // 11000001-00111110-10110001-00111110
// CHECK-INST: bfmaxnm { z30.h, z31.h }, { z30.h, z31.h }, { z30.h, z31.h }
// CHECK-ENCODING: [0x3e,0xb1,0x3e,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13eb13e <unknown>
bfmaxnm {z0.h - z3.h}, {z0.h - z3.h}, z0.h // 11000001-00100000-10101001-00100000
// CHECK-INST: bfmaxnm { z0.h - z3.h }, { z0.h - z3.h }, z0.h
// CHECK-ENCODING: [0x20,0xa9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a920 <unknown>
bfmaxnm {z20.h - z23.h}, {z20.h - z23.h}, z5.h // 11000001-00100101-10101001-00110100
// CHECK-INST: bfmaxnm { z20.h - z23.h }, { z20.h - z23.h }, z5.h
// CHECK-ENCODING: [0x34,0xa9,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a934 <unknown>
bfmaxnm {z20.h - z23.h}, {z20.h - z23.h}, z8.h // 11000001-00101000-10101001-00110100
// CHECK-INST: bfmaxnm { z20.h - z23.h }, { z20.h - z23.h }, z8.h
// CHECK-ENCODING: [0x34,0xa9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a934 <unknown>
bfmaxnm {z28.h - z31.h}, {z28.h - z31.h}, z15.h // 11000001-00101111-10101001-00111100
// CHECK-INST: bfmaxnm { z28.h - z31.h }, { z28.h - z31.h }, z15.h
// CHECK-ENCODING: [0x3c,0xa9,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa93c <unknown>
bfmaxnm {z0.h - z3.h}, {z0.h - z3.h}, {z0.h - z3.h} // 11000001-00100000-10111001-00100000
// CHECK-INST: bfmaxnm { z0.h - z3.h }, { z0.h - z3.h }, { z0.h - z3.h }
// CHECK-ENCODING: [0x20,0xb9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b920 <unknown>
bfmaxnm {z20.h - z23.h}, {z20.h - z23.h}, {z20.h - z23.h} // 11000001-00110100-10111001-00110100
// CHECK-INST: bfmaxnm { z20.h - z23.h }, { z20.h - z23.h }, { z20.h - z23.h }
// CHECK-ENCODING: [0x34,0xb9,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b934 <unknown>
bfmaxnm {z20.h - z23.h}, {z20.h - z23.h}, {z8.h - z11.h} // 11000001-00101000-10111001-00110100
// CHECK-INST: bfmaxnm { z20.h - z23.h }, { z20.h - z23.h }, { z8.h - z11.h }
// CHECK-ENCODING: [0x34,0xb9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b934 <unknown>
bfmaxnm {z28.h - z31.h}, {z28.h - z31.h}, {z28.h - z31.h} // 11000001-00111100-10111001-00111100
// CHECK-INST: bfmaxnm { z28.h - z31.h }, { z28.h - z31.h }, { z28.h - z31.h }
// CHECK-ENCODING: [0x3c,0xb9,0x3c,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13cb93c <unknown>
diff --git a/llvm/test/MC/AArch64/SME2/bfmin-diagnostics.s b/llvm/test/MC/AArch64/SME2/bfmin-diagnostics.s
index 41f1036..3628f53 100644
--- a/llvm/test/MC/AArch64/SME2/bfmin-diagnostics.s
+++ b/llvm/test/MC/AArch64/SME2/bfmin-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector list
diff --git a/llvm/test/MC/AArch64/SME2/bfmin.s b/llvm/test/MC/AArch64/SME2/bfmin.s
index 6612e3c..5cc3fd3 100644
--- a/llvm/test/MC/AArch64/SME2/bfmin.s
+++ b/llvm/test/MC/AArch64/SME2/bfmin.s
@@ -1,108 +1,108 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | llvm-objdump -d --mattr=-sme2 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
bfmin {z0.h, z1.h}, {z0.h, z1.h}, z0.h // 11000001-00100000-10100001-00000001
// CHECK-INST: bfmin { z0.h, z1.h }, { z0.h, z1.h }, z0.h
// CHECK-ENCODING: [0x01,0xa1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a101 <unknown>
bfmin {z20.h, z21.h}, {z20.h, z21.h}, z5.h // 11000001-00100101-10100001-00010101
// CHECK-INST: bfmin { z20.h, z21.h }, { z20.h, z21.h }, z5.h
// CHECK-ENCODING: [0x15,0xa1,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a115 <unknown>
bfmin {z22.h, z23.h}, {z22.h, z23.h}, z8.h // 11000001-00101000-10100001-00010111
// CHECK-INST: bfmin { z22.h, z23.h }, { z22.h, z23.h }, z8.h
// CHECK-ENCODING: [0x17,0xa1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a117 <unknown>
bfmin {z30.h, z31.h}, {z30.h, z31.h}, z15.h // 11000001-00101111-10100001-00011111
// CHECK-INST: bfmin { z30.h, z31.h }, { z30.h, z31.h }, z15.h
// CHECK-ENCODING: [0x1f,0xa1,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa11f <unknown>
bfmin {z0.h, z1.h}, {z0.h, z1.h}, {z0.h, z1.h} // 11000001-00100000-10110001-00000001
// CHECK-INST: bfmin { z0.h, z1.h }, { z0.h, z1.h }, { z0.h, z1.h }
// CHECK-ENCODING: [0x01,0xb1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b101 <unknown>
bfmin {z20.h, z21.h}, {z20.h, z21.h}, {z20.h, z21.h} // 11000001-00110100-10110001-00010101
// CHECK-INST: bfmin { z20.h, z21.h }, { z20.h, z21.h }, { z20.h, z21.h }
// CHECK-ENCODING: [0x15,0xb1,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b115 <unknown>
bfmin {z22.h, z23.h}, {z22.h, z23.h}, {z8.h, z9.h} // 11000001-00101000-10110001-00010111
// CHECK-INST: bfmin { z22.h, z23.h }, { z22.h, z23.h }, { z8.h, z9.h }
// CHECK-ENCODING: [0x17,0xb1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b117 <unknown>
bfmin {z30.h, z31.h}, {z30.h, z31.h}, {z30.h, z31.h} // 11000001-00111110-10110001-00011111
// CHECK-INST: bfmin { z30.h, z31.h }, { z30.h, z31.h }, { z30.h, z31.h }
// CHECK-ENCODING: [0x1f,0xb1,0x3e,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13eb11f <unknown>
bfmin {z0.h - z3.h}, {z0.h - z3.h}, z0.h // 11000001-00100000-10101001-00000001
// CHECK-INST: bfmin { z0.h - z3.h }, { z0.h - z3.h }, z0.h
// CHECK-ENCODING: [0x01,0xa9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a901 <unknown>
bfmin {z20.h - z23.h}, {z20.h - z23.h}, z5.h // 11000001-00100101-10101001-00010101
// CHECK-INST: bfmin { z20.h - z23.h }, { z20.h - z23.h }, z5.h
// CHECK-ENCODING: [0x15,0xa9,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a915 <unknown>
bfmin {z20.h - z23.h}, {z20.h - z23.h}, z8.h // 11000001-00101000-10101001-00010101
// CHECK-INST: bfmin { z20.h - z23.h }, { z20.h - z23.h }, z8.h
// CHECK-ENCODING: [0x15,0xa9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a915 <unknown>
bfmin {z28.h - z31.h}, {z28.h - z31.h}, z15.h // 11000001-00101111-10101001-00011101
// CHECK-INST: bfmin { z28.h - z31.h }, { z28.h - z31.h }, z15.h
// CHECK-ENCODING: [0x1d,0xa9,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa91d <unknown>
bfmin {z0.h - z3.h}, {z0.h - z3.h}, {z0.h - z3.h} // 11000001-00100000-10111001-00000001
// CHECK-INST: bfmin { z0.h - z3.h }, { z0.h - z3.h }, { z0.h - z3.h }
// CHECK-ENCODING: [0x01,0xb9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b901 <unknown>
bfmin {z20.h - z23.h}, {z20.h - z23.h}, {z20.h - z23.h} // 11000001-00110100-10111001-00010101
// CHECK-INST: bfmin { z20.h - z23.h }, { z20.h - z23.h }, { z20.h - z23.h }
// CHECK-ENCODING: [0x15,0xb9,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b915 <unknown>
bfmin {z20.h - z23.h}, {z20.h - z23.h}, {z8.h - z11.h} // 11000001-00101000-10111001-00010101
// CHECK-INST: bfmin { z20.h - z23.h }, { z20.h - z23.h }, { z8.h - z11.h }
// CHECK-ENCODING: [0x15,0xb9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b915 <unknown>
bfmin {z28.h - z31.h}, {z28.h - z31.h}, {z28.h - z31.h} // 11000001-00111100-10111001-00011101
// CHECK-INST: bfmin { z28.h - z31.h }, { z28.h - z31.h }, { z28.h - z31.h }
// CHECK-ENCODING: [0x1d,0xb9,0x3c,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13cb91d <unknown>
diff --git a/llvm/test/MC/AArch64/SME2/bfminnm-diagnostics.s b/llvm/test/MC/AArch64/SME2/bfminnm-diagnostics.s
index 14485e9..25a1bf5 100644
--- a/llvm/test/MC/AArch64/SME2/bfminnm-diagnostics.s
+++ b/llvm/test/MC/AArch64/SME2/bfminnm-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector list
diff --git a/llvm/test/MC/AArch64/SME2/bfminnm.s b/llvm/test/MC/AArch64/SME2/bfminnm.s
index 4a48a0d..f035580 100644
--- a/llvm/test/MC/AArch64/SME2/bfminnm.s
+++ b/llvm/test/MC/AArch64/SME2/bfminnm.s
@@ -1,113 +1,113 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sme2 --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | llvm-objdump -d --mattr=-sme2 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
bfminnm {z0.h, z1.h}, {z0.h, z1.h}, z0.h // 11000001-00100000-10100001-00100001
// CHECK-INST: bfminnm { z0.h, z1.h }, { z0.h, z1.h }, z0.h
// CHECK-ENCODING: [0x21,0xa1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a121 <unknown>
bfminnm {z20.h, z21.h}, {z20.h, z21.h}, z5.h // 11000001-00100101-10100001-00110101
// CHECK-INST: bfminnm { z20.h, z21.h }, { z20.h, z21.h }, z5.h
// CHECK-ENCODING: [0x35,0xa1,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a135 <unknown>
bfminnm {z22.h, z23.h}, {z22.h, z23.h}, z8.h // 11000001-00101000-10100001-00110111
// CHECK-INST: bfminnm { z22.h, z23.h }, { z22.h, z23.h }, z8.h
// CHECK-ENCODING: [0x37,0xa1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a137 <unknown>
bfminnm {z30.h, z31.h}, {z30.h, z31.h}, z15.h // 11000001-00101111-10100001-00111111
// CHECK-INST: bfminnm { z30.h, z31.h }, { z30.h, z31.h }, z15.h
// CHECK-ENCODING: [0x3f,0xa1,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa13f <unknown>
bfminnm {z0.h, z1.h}, {z0.h, z1.h}, {z0.h, z1.h} // 11000001-00100000-10110001-00100001
// CHECK-INST: bfminnm { z0.h, z1.h }, { z0.h, z1.h }, { z0.h, z1.h }
// CHECK-ENCODING: [0x21,0xb1,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b121 <unknown>
bfminnm {z20.h, z21.h}, {z20.h, z21.h}, {z20.h, z21.h} // 11000001-00110100-10110001-00110101
// CHECK-INST: bfminnm { z20.h, z21.h }, { z20.h, z21.h }, { z20.h, z21.h }
// CHECK-ENCODING: [0x35,0xb1,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b135 <unknown>
bfminnm {z22.h, z23.h}, {z22.h, z23.h}, {z8.h, z9.h} // 11000001-00101000-10110001-00110111
// CHECK-INST: bfminnm { z22.h, z23.h }, { z22.h, z23.h }, { z8.h, z9.h }
// CHECK-ENCODING: [0x37,0xb1,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b137 <unknown>
bfminnm {z30.h, z31.h}, {z30.h, z31.h}, {z30.h, z31.h} // 11000001-00111110-10110001-00111111
// CHECK-INST: bfminnm { z30.h, z31.h }, { z30.h, z31.h }, { z30.h, z31.h }
// CHECK-ENCODING: [0x3f,0xb1,0x3e,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13eb13f <unknown>
bfminnm {z0.h - z3.h}, {z0.h - z3.h}, z0.h // 11000001-00100000-10101001-00100001
// CHECK-INST: bfminnm { z0.h - z3.h }, { z0.h - z3.h }, z0.h
// CHECK-ENCODING: [0x21,0xa9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120a921 <unknown>
bfminnm {z20.h - z23.h}, {z20.h - z23.h}, z5.h // 11000001-00100101-10101001-00110101
// CHECK-INST: bfminnm { z20.h - z23.h }, { z20.h - z23.h }, z5.h
// CHECK-ENCODING: [0x35,0xa9,0x25,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c125a935 <unknown>
bfminnm {z20.h - z23.h}, {z20.h - z23.h}, z8.h // 11000001-00101000-10101001-00110101
// CHECK-INST: bfminnm { z20.h - z23.h }, { z20.h - z23.h }, z8.h
// CHECK-ENCODING: [0x35,0xa9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128a935 <unknown>
bfminnm {z28.h - z31.h}, {z28.h - z31.h}, z15.h // 11000001-00101111-10101001-00111101
// CHECK-INST: bfminnm { z28.h - z31.h }, { z28.h - z31.h }, z15.h
// CHECK-ENCODING: [0x3d,0xa9,0x2f,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c12fa93d <unknown>
bfminnm {z0.h - z3.h}, {z0.h - z3.h}, {z0.h - z3.h} // 11000001-00100000-10111001-00100001
// CHECK-INST: bfminnm { z0.h - z3.h }, { z0.h - z3.h }, { z0.h - z3.h }
// CHECK-ENCODING: [0x21,0xb9,0x20,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c120b921 <unknown>
bfminnm {z20.h - z23.h}, {z20.h - z23.h}, {z20.h - z23.h} // 11000001-00110100-10111001-00110101
// CHECK-INST: bfminnm { z20.h - z23.h }, { z20.h - z23.h }, { z20.h - z23.h }
// CHECK-ENCODING: [0x35,0xb9,0x34,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c134b935 <unknown>
bfminnm {z20.h - z23.h}, {z20.h - z23.h}, {z8.h - z11.h} // 11000001-00101000-10111001-00110101
// CHECK-INST: bfminnm { z20.h - z23.h }, { z20.h - z23.h }, { z8.h - z11.h }
// CHECK-ENCODING: [0x35,0xb9,0x28,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c128b935 <unknown>
bfminnm {z28.h - z31.h}, {z28.h - z31.h}, {z28.h - z31.h} // 11000001-00111100-10111001-00111101
// CHECK-INST: bfminnm { z28.h - z31.h }, { z28.h - z31.h }, { z28.h - z31.h }
// CHECK-ENCODING: [0x3d,0xb9,0x3c,0xc1]
-// CHECK-ERROR: instruction requires: b16b16 sme2
+// CHECK-ERROR: instruction requires: sme2 sve-b16b16
// CHECK-UNKNOWN: c13cb93d <unknown>
diff --git a/llvm/test/MC/AArch64/SME2p1/directive-arch-negative.s b/llvm/test/MC/AArch64/SME2p1/directive-arch-negative.s
index 948d8f9..17d239b 100644
--- a/llvm/test/MC/AArch64/SME2p1/directive-arch-negative.s
+++ b/llvm/test/MC/AArch64/SME2p1/directive-arch-negative.s
@@ -5,3 +5,9 @@
sqcvt z0.h, {z0.s, z1.s}
// CHECK: error: instruction requires: sme2
// CHECK: sqcvt z0.h, {z0.s, z1.s}
+
+.arch armv9-a+sme2+sve-b16b16
+.arch armv9-a+sme2+nosve-b16b16
+bfclamp { z0.h, z1.h }, z0.h, z0.h
+// CHECK: error: instruction requires: sve-b16b16
+// CHECK: bfclamp { z0.h, z1.h }, z0.h, z0.h
diff --git a/llvm/test/MC/AArch64/SME2p1/directive-arch.s b/llvm/test/MC/AArch64/SME2p1/directive-arch.s
index 112de25..085a38c 100644
--- a/llvm/test/MC/AArch64/SME2p1/directive-arch.s
+++ b/llvm/test/MC/AArch64/SME2p1/directive-arch.s
@@ -5,4 +5,6 @@
sqcvt z0.h, {z0.s, z1.s}
// CHECK: sqcvt z0.h, { z0.s, z1.s }
-.arch armv9-a+nosme2p1
+.arch armv9-a+sme2+sve-b16b16
+bfclamp { z0.h, z1.h }, z0.h, z0.h
+// CHECK: bfclamp { z0.h, z1.h }, z0.h, z0.h
diff --git a/llvm/test/MC/AArch64/SME2p1/directive-arch_extension-negative.s b/llvm/test/MC/AArch64/SME2p1/directive-arch_extension-negative.s
index 19f11b8..6a7301c 100644
--- a/llvm/test/MC/AArch64/SME2p1/directive-arch_extension-negative.s
+++ b/llvm/test/MC/AArch64/SME2p1/directive-arch_extension-negative.s
@@ -5,3 +5,10 @@
sqcvt z0.h, { z0.s, z1.s }
// CHECK: error: instruction requires: sme2
// CHECK: sqcvt z0.h
+
+.arch_extension sme2
+.arch_extension sve-b16b16
+.arch_extension nosve-b16b16
+bfclamp { z0.h, z1.h }, z0.h, z0.h
+// CHECK: error: instruction requires: sve-b16b16
+// CHECK: bfclamp { z0.h, z1.h }, z0.h, z0.h \ No newline at end of file
diff --git a/llvm/test/MC/AArch64/SME2p1/directive-arch_extension.s b/llvm/test/MC/AArch64/SME2p1/directive-arch_extension.s
index 653956d..0e6e2e7 100644
--- a/llvm/test/MC/AArch64/SME2p1/directive-arch_extension.s
+++ b/llvm/test/MC/AArch64/SME2p1/directive-arch_extension.s
@@ -3,3 +3,8 @@
.arch_extension sme2p1
sqcvt z0.h, { z0.s, z1.s }
// CHECK: sqcvt z0.h, { z0.s, z1.s }
+
+.arch_extension sme2
+.arch_extension sve-b16b16
+bfclamp { z0.h, z1.h }, z0.h, z0.h
+// CHECK: bfclamp { z0.h, z1.h }, z0.h, z0.h \ No newline at end of file
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfadd-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfadd-diagnostics.s
index 1ead9d2..b34e57e 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfadd-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfadd-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfadd.s b/llvm/test/MC/AArch64/SVE2p1/bfadd.s
index b0d6733..ae0086e 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfadd.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfadd.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23.h, p3/m, z31.h
@@ -27,7 +27,7 @@ bfadd z23.h, p3/m, z23.h, z13.h // 01100101-00000000-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfadd z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65008db7 <unknown>
movprfx z23, z31
@@ -35,53 +35,53 @@ bfadd z23.h, p3/m, z23.h, z13.h // 01100101-00000000-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfadd z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65008db7 <unknown>
bfadd z0.h, p0/m, z0.h, z0.h // 01100101-00000000-10000000-00000000
// CHECK-INST: bfadd z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65008000 <unknown>
bfadd z21.h, p5/m, z21.h, z10.h // 01100101-00000000-10010101-01010101
// CHECK-INST: bfadd z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65009555 <unknown>
bfadd z23.h, p3/m, z23.h, z13.h // 01100101-00000000-10001101-10110111
// CHECK-INST: bfadd z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65008db7 <unknown>
bfadd z31.h, p7/m, z31.h, z31.h // 01100101-00000000-10011111-11111111
// CHECK-INST: bfadd z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65009fff <unknown>
bfadd z0.h, z0.h, z0.h // 01100101-00000000-00000000-00000000
// CHECK-INST: bfadd z0.h, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x00,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65000000 <unknown>
bfadd z21.h, z10.h, z21.h // 01100101-00010101-00000001-01010101
// CHECK-INST: bfadd z21.h, z10.h, z21.h
// CHECK-ENCODING: [0x55,0x01,0x15,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65150155 <unknown>
bfadd z23.h, z13.h, z8.h // 01100101-00001000-00000001-10110111
// CHECK-INST: bfadd z23.h, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x01,0x08,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 650801b7 <unknown>
bfadd z31.h, z31.h, z31.h // 01100101-00011111-00000011-11111111
// CHECK-INST: bfadd z31.h, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x03,0x1f,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 651f03ff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfclamp-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfclamp-diagnostics.s
index b18108f..db9ce2d 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfclamp-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfclamp-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector suffix
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfclamp.s b/llvm/test/MC/AArch64/SVE2p1/bfclamp.s
index 93b0a3e..2a88241 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfclamp.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfclamp.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23, z31
@@ -27,30 +27,30 @@ bfclamp z23.h, z13.h, z8.h // 01100100-00101000-00100101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfclamp z23.h, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x25,0x28,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 642825b7 <unknown>
bfclamp z0.h, z0.h, z0.h // 01100100-00100000-00100100-00000000
// CHECK-INST: bfclamp z0.h, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x24,0x20,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64202400 <unknown>
bfclamp z21.h, z10.h, z21.h // 01100100-00110101-00100101-01010101
// CHECK-INST: bfclamp z21.h, z10.h, z21.h
// CHECK-ENCODING: [0x55,0x25,0x35,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64352555 <unknown>
bfclamp z23.h, z13.h, z8.h // 01100100-00101000-00100101-10110111
// CHECK-INST: bfclamp z23.h, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x25,0x28,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 642825b7 <unknown>
bfclamp z31.h, z31.h, z31.h // 01100100-00111111-00100111-11111111
// CHECK-INST: bfclamp z31.h, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x27,0x3f,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 643f27ff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmax-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfmax-diagnostics.s
index f7e3071..da3e7f8 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmax-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmax-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmax.s b/llvm/test/MC/AArch64/SVE2p1/bfmax.s
index e6089f5..607a3c6 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmax.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmax.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23.h, p3/m, z31.h
@@ -27,7 +27,7 @@ bfmax z23.h, p3/m, z23.h, z13.h // 01100101-00000110-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfmax z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x06,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65068db7 <unknown>
movprfx z23, z31
@@ -35,29 +35,29 @@ bfmax z23.h, p3/m, z23.h, z13.h // 01100101-00000110-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmax z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x06,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65068db7 <unknown>
bfmax z0.h, p0/m, z0.h, z0.h // 01100101-00000110-10000000-00000000
// CHECK-INST: bfmax z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x06,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65068000 <unknown>
bfmax z21.h, p5/m, z21.h, z10.h // 01100101-00000110-10010101-01010101
// CHECK-INST: bfmax z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x06,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65069555 <unknown>
bfmax z23.h, p3/m, z23.h, z13.h // 01100101-00000110-10001101-10110111
// CHECK-INST: bfmax z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x06,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65068db7 <unknown>
bfmax z31.h, p7/m, z31.h, z31.h // 01100101-00000110-10011111-11111111
// CHECK-INST: bfmax z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x06,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65069fff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmaxnm-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfmaxnm-diagnostics.s
index 220b66b..16aff60 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmaxnm-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmaxnm-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmaxnm.s b/llvm/test/MC/AArch64/SVE2p1/bfmaxnm.s
index a4b0e9a..248f912 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmaxnm.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmaxnm.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23.h, p3/m, z31.h
@@ -27,7 +27,7 @@ bfmaxnm z23.h, p3/m, z23.h, z13.h // 01100101-00000100-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfmaxnm z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x04,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65048db7 <unknown>
movprfx z23, z31
@@ -35,30 +35,30 @@ bfmaxnm z23.h, p3/m, z23.h, z13.h // 01100101-00000100-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmaxnm z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x04,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65048db7 <unknown>
bfmaxnm z0.h, p0/m, z0.h, z0.h // 01100101-00000100-10000000-00000000
// CHECK-INST: bfmaxnm z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x04,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65048000 <unknown>
bfmaxnm z21.h, p5/m, z21.h, z10.h // 01100101-00000100-10010101-01010101
// CHECK-INST: bfmaxnm z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x04,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65049555 <unknown>
bfmaxnm z23.h, p3/m, z23.h, z13.h // 01100101-00000100-10001101-10110111
// CHECK-INST: bfmaxnm z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x04,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65048db7 <unknown>
bfmaxnm z31.h, p7/m, z31.h, z31.h // 01100101-00000100-10011111-11111111
// CHECK-INST: bfmaxnm z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x04,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65049fff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmin-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfmin-diagnostics.s
index a7f8be2..c681eff 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmin-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmin-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmin.s b/llvm/test/MC/AArch64/SVE2p1/bfmin.s
index 2475143..0e536c5 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmin.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmin.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23.h, p3/m, z31.h
@@ -27,7 +27,7 @@ bfmin z23.h, p3/m, z23.h, z13.h // 01100101-00000111-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfmin z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x07,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65078db7 <unknown>
movprfx z23, z31
@@ -35,30 +35,30 @@ bfmin z23.h, p3/m, z23.h, z13.h // 01100101-00000111-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmin z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x07,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65078db7 <unknown>
bfmin z0.h, p0/m, z0.h, z0.h // 01100101-00000111-10000000-00000000
// CHECK-INST: bfmin z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x07,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65078000 <unknown>
bfmin z21.h, p5/m, z21.h, z10.h // 01100101-00000111-10010101-01010101
// CHECK-INST: bfmin z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x07,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65079555 <unknown>
bfmin z23.h, p3/m, z23.h, z13.h // 01100101-00000111-10001101-10110111
// CHECK-INST: bfmin z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x07,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65078db7 <unknown>
bfmin z31.h, p7/m, z31.h, z31.h // 01100101-00000111-10011111-11111111
// CHECK-INST: bfmin z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x07,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65079fff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfminnm-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfminnm-diagnostics.s
index 68c4211..efce929 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfminnm-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfminnm-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfminnm.s b/llvm/test/MC/AArch64/SVE2p1/bfminnm.s
index 6c4c9e4..785559d 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfminnm.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfminnm.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23.h, p3/m, z31.h
@@ -27,7 +27,7 @@ bfminnm z23.h, p3/m, z23.h, z13.h // 01100101-00000101-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfminnm z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x05,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65058db7 <unknown>
movprfx z23, z31
@@ -35,30 +35,30 @@ bfminnm z23.h, p3/m, z23.h, z13.h // 01100101-00000101-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfminnm z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x05,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65058db7 <unknown>
bfminnm z0.h, p0/m, z0.h, z0.h // 01100101-00000101-10000000-00000000
// CHECK-INST: bfminnm z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x05,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65058000 <unknown>
bfminnm z21.h, p5/m, z21.h, z10.h // 01100101-00000101-10010101-01010101
// CHECK-INST: bfminnm z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x05,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65059555 <unknown>
bfminnm z23.h, p3/m, z23.h, z13.h // 01100101-00000101-10001101-10110111
// CHECK-INST: bfminnm z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x05,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65058db7 <unknown>
bfminnm z31.h, p7/m, z31.h, z31.h // 01100101-00000101-10011111-11111111
// CHECK-INST: bfminnm z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x05,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65059fff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmla-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfmla-diagnostics.s
index 035f289..a75a054 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmla-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmla-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector lane index
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmla.s b/llvm/test/MC/AArch64/SVE2p1/bfmla.s
index 1d22cbe..ffe3b66 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmla.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmla.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23, z31
@@ -27,31 +27,31 @@ bfmla z23.h, z13.h, z0.h[5] // 01100100-01101000-00001001-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmla z23.h, z13.h, z0.h[5]
// CHECK-ENCODING: [0xb7,0x09,0x68,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 646809b7 <unknown>
bfmla z0.h, z0.h, z0.h[0] // 01100100-00100000-00001000-00000000
// CHECK-INST: bfmla z0.h, z0.h, z0.h[0]
// CHECK-ENCODING: [0x00,0x08,0x20,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64200800 <unknown>
bfmla z21.h, z10.h, z5.h[6] // 01100100-01110101-00001001-01010101
// CHECK-INST: bfmla z21.h, z10.h, z5.h[6]
// CHECK-ENCODING: [0x55,0x09,0x75,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64750955 <unknown>
bfmla z23.h, z13.h, z0.h[5] // 01100100-01101000-00001001-10110111
// CHECK-INST: bfmla z23.h, z13.h, z0.h[5]
// CHECK-ENCODING: [0xb7,0x09,0x68,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 646809b7 <unknown>
bfmla z31.h, z31.h, z7.h[7] // 01100100-01111111-00001011-11111111
// CHECK-INST: bfmla z31.h, z31.h, z7.h[7]
// CHECK-ENCODING: [0xff,0x0b,0x7f,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 647f0bff <unknown>
@@ -60,7 +60,7 @@ bfmla z23.h, p3/m, z13.h, z8.h // 01100101-00101000-00001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfmla z23.h, p3/m, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x0d,0x28,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65280db7 <unknown>
movprfx z23, z31
@@ -68,30 +68,30 @@ bfmla z23.h, p3/m, z13.h, z8.h // 01100101-00101000-00001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmla z23.h, p3/m, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x0d,0x28,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65280db7 <unknown>
bfmla z0.h, p0/m, z0.h, z0.h // 01100101-00100000-00000000-00000000
// CHECK-INST: bfmla z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x00,0x20,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65200000 <unknown>
bfmla z21.h, p5/m, z10.h, z21.h // 01100101-00110101-00010101-01010101
// CHECK-INST: bfmla z21.h, p5/m, z10.h, z21.h
// CHECK-ENCODING: [0x55,0x15,0x35,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65351555 <unknown>
bfmla z23.h, p3/m, z13.h, z8.h // 01100101-00101000-00001101-10110111
// CHECK-INST: bfmla z23.h, p3/m, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x0d,0x28,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65280db7 <unknown>
bfmla z31.h, p7/m, z31.h, z31.h // 01100101-00111111-00011111-11111111
// CHECK-INST: bfmla z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x1f,0x3f,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 653f1fff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmls-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfmls-diagnostics.s
index cbc7efe..0c3fddc 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmls-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmls-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid vector lane index
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmls.s b/llvm/test/MC/AArch64/SVE2p1/bfmls.s
index 7a27e3d..52af1a8 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmls.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmls.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
@@ -28,31 +28,31 @@ bfmls z23.h, z13.h, z0.h[5] // 01100100-01101000-00001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmls z23.h, z13.h, z0.h[5]
// CHECK-ENCODING: [0xb7,0x0d,0x68,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64680db7 <unknown>
bfmls z0.h, z0.h, z0.h[0] // 01100100-00100000-00001100-00000000
// CHECK-INST: bfmls z0.h, z0.h, z0.h[0]
// CHECK-ENCODING: [0x00,0x0c,0x20,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64200c00 <unknown>
bfmls z21.h, z10.h, z5.h[6] // 01100100-01110101-00001101-01010101
// CHECK-INST: bfmls z21.h, z10.h, z5.h[6]
// CHECK-ENCODING: [0x55,0x0d,0x75,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64750d55 <unknown>
bfmls z23.h, z13.h, z0.h[5] // 01100100-01101000-00001101-10110111
// CHECK-INST: bfmls z23.h, z13.h, z0.h[5]
// CHECK-ENCODING: [0xb7,0x0d,0x68,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64680db7 <unknown>
bfmls z31.h, z31.h, z7.h[7] // 01100100-01111111-00001111-11111111
// CHECK-INST: bfmls z31.h, z31.h, z7.h[7]
// CHECK-ENCODING: [0xff,0x0f,0x7f,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 647f0fff <unknown>
@@ -61,7 +61,7 @@ bfmls z23.h, p3/m, z13.h, z8.h // 01100101-00101000-00101101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfmls z23.h, p3/m, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x2d,0x28,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65282db7 <unknown>
movprfx z23, z31
@@ -69,30 +69,30 @@ bfmls z23.h, p3/m, z13.h, z8.h // 01100101-00101000-00101101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmls z23.h, p3/m, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x2d,0x28,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65282db7 <unknown>
bfmls z0.h, p0/m, z0.h, z0.h // 01100101-00100000-00100000-00000000
// CHECK-INST: bfmls z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x20,0x20,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65202000 <unknown>
bfmls z21.h, p5/m, z10.h, z21.h // 01100101-00110101-00110101-01010101
// CHECK-INST: bfmls z21.h, p5/m, z10.h, z21.h
// CHECK-ENCODING: [0x55,0x35,0x35,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65353555 <unknown>
bfmls z23.h, p3/m, z13.h, z8.h // 01100101-00101000-00101101-10110111
// CHECK-INST: bfmls z23.h, p3/m, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x2d,0x28,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65282db7 <unknown>
bfmls z31.h, p7/m, z31.h, z31.h // 01100101-00111111-00111111-11111111
// CHECK-INST: bfmls z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x3f,0x3f,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 653f3fff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmul-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfmul-diagnostics.s
index 51adae0..ce4c7c8 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmul-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmul-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfmul.s b/llvm/test/MC/AArch64/SVE2p1/bfmul.s
index 593eb4a..61aaa9d 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfmul.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfmul.s
@@ -1,49 +1,49 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
bfmul z0.h, z0.h, z0.h[0] // 01100100-00100000-00101000-00000000
// CHECK-INST: bfmul z0.h, z0.h, z0.h[0]
// CHECK-ENCODING: [0x00,0x28,0x20,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64202800 <unknown>
bfmul z21.h, z10.h, z5.h[6] // 01100100-01110101-00101001-01010101
// CHECK-INST: bfmul z21.h, z10.h, z5.h[6]
// CHECK-ENCODING: [0x55,0x29,0x75,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 64752955 <unknown>
bfmul z23.h, z13.h, z0.h[5] // 01100100-01101000-00101001-10110111
// CHECK-INST: bfmul z23.h, z13.h, z0.h[5]
// CHECK-ENCODING: [0xb7,0x29,0x68,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 646829b7 <unknown>
bfmul z31.h, z31.h, z7.h[7] // 01100100-01111111-00101011-11111111
// CHECK-INST: bfmul z31.h, z31.h, z7.h[7]
// CHECK-ENCODING: [0xff,0x2b,0x7f,0x64]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 647f2bff <unknown>
movprfx z23.h, p3/m, z31.h
@@ -51,7 +51,7 @@ bfmul z23.h, p3/m, z23.h, z13.h // 01100101-00000010-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfmul z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x02,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65028db7 <unknown>
movprfx z23, z31
@@ -59,54 +59,54 @@ bfmul z23.h, p3/m, z23.h, z13.h // 01100101-00000010-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfmul z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x02,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65028db7 <unknown>
bfmul z0.h, p0/m, z0.h, z0.h // 01100101-00000010-10000000-00000000
// CHECK-INST: bfmul z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x02,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65028000 <unknown>
bfmul z21.h, p5/m, z21.h, z10.h // 01100101-00000010-10010101-01010101
// CHECK-INST: bfmul z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x02,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65029555 <unknown>
bfmul z23.h, p3/m, z23.h, z13.h // 01100101-00000010-10001101-10110111
// CHECK-INST: bfmul z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x02,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65028db7 <unknown>
bfmul z31.h, p7/m, z31.h, z31.h // 01100101-00000010-10011111-11111111
// CHECK-INST: bfmul z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x02,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65029fff <unknown>
bfmul z0.h, z0.h, z0.h // 01100101-00000000-00001000-00000000
// CHECK-INST: bfmul z0.h, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x08,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65000800 <unknown>
bfmul z21.h, z10.h, z21.h // 01100101-00010101-00001001-01010101
// CHECK-INST: bfmul z21.h, z10.h, z21.h
// CHECK-ENCODING: [0x55,0x09,0x15,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65150955 <unknown>
bfmul z23.h, z13.h, z8.h // 01100101-00001000-00001001-10110111
// CHECK-INST: bfmul z23.h, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x09,0x08,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 650809b7 <unknown>
bfmul z31.h, z31.h, z31.h // 01100101-00011111-00001011-11111111
// CHECK-INST: bfmul z31.h, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x0b,0x1f,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 651f0bff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfsub-diagnostics.s b/llvm/test/MC/AArch64/SVE2p1/bfsub-diagnostics.s
index 86cb320..dff7cde 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfsub-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfsub-diagnostics.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+b16b16 2>&1 < %s | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2p1,+sve-b16b16 2>&1 < %s | FileCheck %s
// --------------------------------------------------------------------------//
// Invalid predicate register
diff --git a/llvm/test/MC/AArch64/SVE2p1/bfsub.s b/llvm/test/MC/AArch64/SVE2p1/bfsub.s
index 5eae056..2ff84f8 100644
--- a/llvm/test/MC/AArch64/SVE2p1/bfsub.s
+++ b/llvm/test/MC/AArch64/SVE2p1/bfsub.s
@@ -1,25 +1,25 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sve2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sve2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+b16b16 - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+b16b16 < %s \
-// RUN: | llvm-objdump -d --mattr=-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+b16b16 < %s \
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --no-print-imm-hex --mattr=+sme2,+sve-b16b16 - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sme2,+sve-b16b16 < %s \
+// RUN: | llvm-objdump -d --mattr=-sve-b16b16 - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sme2,+sve-b16b16 < %s \
// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
-// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+b16b16 -disassemble -show-encoding \
+// RUN: | llvm-mc -triple=aarch64 -mattr=+sme2,+sve-b16b16 -disassemble -show-encoding \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
movprfx z23.h, p3/m, z31.h
@@ -27,7 +27,7 @@ bfsub z23.h, p3/m, z23.h, z13.h // 01100101-00000001-10001101-10110111
// CHECK-INST: movprfx z23.h, p3/m, z31.h
// CHECK-INST: bfsub z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x01,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65018db7 <unknown>
movprfx z23, z31
@@ -35,53 +35,53 @@ bfsub z23.h, p3/m, z23.h, z13.h // 01100101-00000001-10001101-10110111
// CHECK-INST: movprfx z23, z31
// CHECK-INST: bfsub z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x01,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65018db7 <unknown>
bfsub z0.h, p0/m, z0.h, z0.h // 01100101-00000001-10000000-00000000
// CHECK-INST: bfsub z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x01,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65018000 <unknown>
bfsub z21.h, p5/m, z21.h, z10.h // 01100101-00000001-10010101-01010101
// CHECK-INST: bfsub z21.h, p5/m, z21.h, z10.h
// CHECK-ENCODING: [0x55,0x95,0x01,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65019555 <unknown>
bfsub z23.h, p3/m, z23.h, z13.h // 01100101-00000001-10001101-10110111
// CHECK-INST: bfsub z23.h, p3/m, z23.h, z13.h
// CHECK-ENCODING: [0xb7,0x8d,0x01,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65018db7 <unknown>
bfsub z31.h, p7/m, z31.h, z31.h // 01100101-00000001-10011111-11111111
// CHECK-INST: bfsub z31.h, p7/m, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x9f,0x01,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65019fff <unknown>
bfsub z0.h, z0.h, z0.h // 01100101-00000000-00000100-00000000
// CHECK-INST: bfsub z0.h, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x04,0x00,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65000400 <unknown>
bfsub z21.h, z10.h, z21.h // 01100101-00010101-00000101-01010101
// CHECK-INST: bfsub z21.h, z10.h, z21.h
// CHECK-ENCODING: [0x55,0x05,0x15,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 65150555 <unknown>
bfsub z23.h, z13.h, z8.h // 01100101-00001000-00000101-10110111
// CHECK-INST: bfsub z23.h, z13.h, z8.h
// CHECK-ENCODING: [0xb7,0x05,0x08,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 650805b7 <unknown>
bfsub z31.h, z31.h, z31.h // 01100101-00011111-00000111-11111111
// CHECK-INST: bfsub z31.h, z31.h, z31.h
// CHECK-ENCODING: [0xff,0x07,0x1f,0x65]
-// CHECK-ERROR: instruction requires: b16b16 sve2 or sme2
+// CHECK-ERROR: instruction requires: sve2 or sme2 sve-b16b16
// CHECK-UNKNOWN: 651f07ff <unknown>
diff --git a/llvm/test/MC/AArch64/SVE2p1/directive-arch-negative.s b/llvm/test/MC/AArch64/SVE2p1/directive-arch-negative.s
index 7f1fa8f..8e8ec92 100644
--- a/llvm/test/MC/AArch64/SVE2p1/directive-arch-negative.s
+++ b/llvm/test/MC/AArch64/SVE2p1/directive-arch-negative.s
@@ -5,3 +5,9 @@
sclamp z0.s, z1.s, z2.s
// CHECK: error: instruction requires: sme or sve2p1
// CHECK: sclamp z0.s, z1.s, z2.s
+
+.arch armv9-a+sve2p1+sve-b16b16
+.arch armv9-a+sve2p1+nosve-b16b16
+bfadd z23.h, p3/m, z23.h, z13.h
+// CHECK: error: instruction requires: sve-b16b16
+// CHECK: bfadd z23.h, p3/m, z23.h, z13.h
diff --git a/llvm/test/MC/AArch64/SVE2p1/directive-arch.s b/llvm/test/MC/AArch64/SVE2p1/directive-arch.s
index c52f271..b8f25e5 100644
--- a/llvm/test/MC/AArch64/SVE2p1/directive-arch.s
+++ b/llvm/test/MC/AArch64/SVE2p1/directive-arch.s
@@ -3,3 +3,7 @@
.arch armv9-a+sve2p1
sclamp z0.s, z1.s, z2.s
// CHECK: sclamp z0.s, z1.s, z2.s
+
+.arch armv9-a+sve2p1+sve-b16b16
+bfadd z23.h, p3/m, z23.h, z13.h
+// CHECK: bfadd z23.h, p3/m, z23.h, z13.h \ No newline at end of file
diff --git a/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension-negative.s b/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension-negative.s
index 29de56c..97c2c1f 100644
--- a/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension-negative.s
+++ b/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension-negative.s
@@ -5,3 +5,10 @@
sclamp z0.s, z1.s, z2.s
// CHECK: error: instruction requires: sme or sve2p1
// CHECK: sclamp z0.s, z1.s, z2.s
+
+.arch_extension sve2p1
+.arch_extension sve-b16b16
+.arch_extension nosve-b16b16
+bfadd z23.h, p3/m, z23.h, z13.h
+// CHECK: error: instruction requires: sve-b16b16
+// CHECK: bfadd z23.h, p3/m, z23.h, z13.h \ No newline at end of file
diff --git a/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension.s b/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension.s
index 3eb39c0..867cf88 100644
--- a/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension.s
+++ b/llvm/test/MC/AArch64/SVE2p1/directive-arch_extension.s
@@ -3,3 +3,8 @@
.arch_extension sve2p1
sclamp z0.s, z1.s, z2.s
// CHECK: sclamp z0.s, z1.s, z2.s
+
+.arch_extension sve2p1
+.arch_extension sve-b16b16
+bfadd z23.h, p3/m, z23.h, z13.h
+// CHECK: bfadd z23.h, p3/m, z23.h, z13.h \ No newline at end of file
diff --git a/llvm/test/MC/AArch64/adrp-auth-relocation.s b/llvm/test/MC/AArch64/adrp-auth-relocation.s
new file mode 100644
index 0000000..57021c7
--- /dev/null
+++ b/llvm/test/MC/AArch64/adrp-auth-relocation.s
@@ -0,0 +1,12 @@
+// RUN: llvm-mc -triple=aarch64-linux-gnu -filetype=obj -o - %s | llvm-readobj -r - | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64-linux-gnu_ilp32 -filetype=obj \
+// RUN: -o /dev/null %s 2>&1 | FileCheck -check-prefix=CHECK-ILP32 %s
+
+.text
+adrp x0, :got_auth:sym
+
+.global sym
+sym:
+
+// CHECK: R_AARCH64_AUTH_ADR_GOT_PAGE sym
+// CHECK-ILP32: error: ILP32 ADRP AUTH relocation not supported (LP64 eqv: AUTH_ADR_GOT_PAGE)
diff --git a/llvm/test/MC/AArch64/arm64-elf-relocs.s b/llvm/test/MC/AArch64/arm64-elf-relocs.s
index 8813c4b..f679bb4 100644
--- a/llvm/test/MC/AArch64/arm64-elf-relocs.s
+++ b/llvm/test/MC/AArch64/arm64-elf-relocs.s
@@ -81,13 +81,17 @@
// CHECK: adrp x15, :got:sym
// CHECK-OBJ-LP64: 58 R_AARCH64_ADR_GOT_PAGE sym
+ adrp x15, :got_auth:sym
+// CHECK: adrp x15, :got_auth:sym
+// CHECK-OBJ-LP64: 5c R_AARCH64_AUTH_ADR_GOT_PAGE sym
+
adrp x29, :gottprel:sym
// CHECK: adrp x29, :gottprel:sym
-// CHECK-OBJ-LP64: 5c R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 sym
+// CHECK-OBJ-LP64: 60 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 sym
adrp x2, :tlsdesc:sym
// CHECK: adrp x2, :tlsdesc:sym
-// CHECK-OBJ-LP64: 60 R_AARCH64_TLSDESC_ADR_PAGE21 sym
+// CHECK-OBJ-LP64: 64 R_AARCH64_TLSDESC_ADR_PAGE21 sym
// LLVM is not competent enough to do this relocation because the
// page boundary could occur anywhere after linking. A relocation
@@ -96,7 +100,7 @@
.global trickQuestion
trickQuestion:
// CHECK: adrp x3, trickQuestion
-// CHECK-OBJ-LP64: 64 R_AARCH64_ADR_PREL_PG_HI21 trickQuestion
+// CHECK-OBJ-LP64: 68 R_AARCH64_ADR_PREL_PG_HI21 trickQuestion
ldrb w2, [x3, :lo12:sym]
ldrsb w5, [x7, #:lo12:sym]
@@ -245,6 +249,16 @@ trickQuestion:
// CHECK-OBJ-LP64: R_AARCH64_LD64_GOT_LO12_NC sym
// CHECK-OBJ-LP64: R_AARCH64_LD64_GOT_LO12_NC sym+0x7
+ ldr x24, [x23, #:got_auth_lo12:sym]
+ ldr d22, [x21, :got_auth_lo12:sym]
+ ldr x24, [x23, :got_auth_lo12:sym+7]
+// CHECK: ldr x24, [x23, :got_auth_lo12:sym]
+// CHECK: ldr d22, [x21, :got_auth_lo12:sym]
+// CHECK: ldr x24, [x23, :got_auth_lo12:sym+7]
+// CHECK-OBJ-LP64: R_AARCH64_AUTH_LD64_GOT_LO12_NC sym
+// CHECK-OBJ-LP64: R_AARCH64_AUTH_LD64_GOT_LO12_NC sym
+// CHECK-OBJ-LP64: R_AARCH64_AUTH_LD64_GOT_LO12_NC sym+0x7
+
ldr x24, [x23, #:gotpage_lo15:sym]
ldr d22, [x21, :gotpage_lo15:sym]
ldr d22, [x23, :gotpage_lo15:sym+7]
diff --git a/llvm/test/MC/AArch64/ilp32-diagnostics.s b/llvm/test/MC/AArch64/ilp32-diagnostics.s
index 8a3bc13..5d9c6e5 100644
--- a/llvm/test/MC/AArch64/ilp32-diagnostics.s
+++ b/llvm/test/MC/AArch64/ilp32-diagnostics.s
@@ -69,6 +69,12 @@ ldr x10, [x0, #:gottprel_lo12:var]
ldr x24, [x23, #:got_lo12:sym]
// ERROR: [[#@LINE-1]]:1: error: ILP32 64-bit load/store relocation not supported (LP64 eqv: LD64_GOT_LO12_NC)
+ldr x24, [x23, #:got_auth_lo12:sym]
+// ERROR: [[#@LINE-1]]:1: error: ILP32 64-bit load/store relocation not supported (LP64 eqv: AUTH_GOT_LO12_NC)
+
+add x24, x23, #:got_auth_lo12:sym
+// ERROR: [[#@LINE-1]]:1: error: ILP32 ADD AUTH relocation not supported (LP64 eqv: AUTH_GOT_ADD_LO12_NC)
+
ldr x24, [x23, :gottprel_lo12:sym]
// ERROR: [[#@LINE-1]]:1: error: ILP32 64-bit load/store relocation not supported (LP64 eqv: TLSIE_LD64_GOTTPREL_LO12_NC)
diff --git a/llvm/test/MC/AMDGPU/gfx10_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx10_asm_vop1.s
index 3cc2550..80ec593 100644
--- a/llvm/test/MC/AMDGPU/gfx10_asm_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx10_asm_vop1.s
@@ -163,6 +163,15 @@ v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
// GFX10: encoding: [0xf9,0x02,0x0a,0x7e,0x01,0x16,0x06,0x00]
+v_mov_b32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:0 src0_sel:06
+// GFX10: encoding: [0xf9,0x02,0x0a,0x7e,0x01,0x05,0x06,0x00]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:1 src0_sel:0x6
+// GFX10: encoding: [0xf9,0x02,0x0a,0x7e,0x01,0x0e,0x06,0x00]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:2 src0_sel:6
+// GFX10: encoding: [0xf9,0x02,0x0a,0x7e,0x01,0x16,0x06,0x00]
+
v_mov_b32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
// GFX10: encoding: [0xf9,0x02,0x0a,0x7e,0x01,0x16,0x06,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx10_err_pos.s b/llvm/test/MC/AMDGPU/gfx10_err_pos.s
index 4db454f..d99da6e 100644
--- a/llvm/test/MC/AMDGPU/gfx10_err_pos.s
+++ b/llvm/test/MC/AMDGPU/gfx10_err_pos.s
@@ -484,23 +484,43 @@ v_mov_b32_sdwa v1, sext(u)
// CHECK-NEXT:{{^}} ^
//==============================================================================
-// expected an identifier
+// expected a valid identifier or number in a valid range
v_mov_b32_sdwa v5, v1 dst_sel:
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: expected an identifier
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: unknown token in expression
// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:
// CHECK-NEXT:{{^}} ^
-v_mov_b32_sdwa v5, v1 dst_sel:0
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: expected an identifier
-// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:0
+v_mov_b32_sdwa v5, v1 dst_sel:0a
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:0a
+// CHECK-NEXT:{{^}} ^
+
+v_mov_b32_sdwa v5, v1 dst_sel:BYTE_1x
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid dst_sel value
+// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:BYTE_1
// CHECK-NEXT:{{^}} ^
v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:[UNUSED_PAD]
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: expected an identifier
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: expected absolute expression
// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:[UNUSED_PAD]
// CHECK-NEXT:{{^}} ^
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:XXX
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid dst_unused value
+// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:XXX
+// CHECK-NEXT:{{^}} ^
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:3
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid dst_unused value
+// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:3
+// CHECK-NEXT:{{^}} ^
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:-1
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid dst_unused value
+// CHECK-NEXT:{{^}}v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:-1
+// CHECK-NEXT:{{^}} ^
+
//==============================================================================
// expected an opening square bracket
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_smem.s b/llvm/test/MC/AMDGPU/gfx12_asm_smem.s
index a64f353..668f767 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_smem.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_smem.s
@@ -772,6 +772,18 @@ s_load_b32 s5, s[4:5], s0 offset:0x0 scope:SCOPE_DEV
s_load_b32 s5, s[4:5], s0 offset:0x0 scope:SCOPE_SYS
// GFX12: s_load_b32 s5, s[4:5], s0 offset:0x0 scope:SCOPE_SYS ; encoding: [0x42,0x01,0x60,0xf4,0x00,0x00,0x00,0x00]
+s_load_b32 s5, s[4:5], s0 offset:0x0 scope:0
+// GFX12: s_load_b32 s5, s[4:5], s0 offset:0x0 ; encoding: [0x42,0x01,0x00,0xf4,0x00,0x00,0x00,0x00]
+
+s_load_b32 s5, s[4:5], s0 offset:0x0 scope:1
+// GFX12: s_load_b32 s5, s[4:5], s0 offset:0x0 scope:SCOPE_SE ; encoding: [0x42,0x01,0x20,0xf4,0x00,0x00,0x00,0x00]
+
+s_load_b32 s5, s[4:5], s0 offset:0x0 scope:0x2
+// GFX12: s_load_b32 s5, s[4:5], s0 offset:0x0 scope:SCOPE_DEV ; encoding: [0x42,0x01,0x40,0xf4,0x00,0x00,0x00,0x00]
+
+s_load_b32 s5, s[4:5], s0 offset:0x0 scope:03
+// GFX12: s_load_b32 s5, s[4:5], s0 offset:0x0 scope:SCOPE_SYS ; encoding: [0x42,0x01,0x60,0xf4,0x00,0x00,0x00,0x00]
+
s_load_b32 s5, s[4:5], s0 offset:0x0 th:TH_LOAD_HT scope:SCOPE_SE
// GFX12: s_load_b32 s5, s[4:5], s0 offset:0x0 th:TH_LOAD_HT scope:SCOPE_SE ; encoding: [0x42,0x01,0x20,0xf5,0x00,0x00,0x00,0x00]
diff --git a/llvm/test/MC/ARM/Windows/branch-reloc-offset.s b/llvm/test/MC/ARM/Windows/branch-reloc-offset.s
index 2e70a72..e7d59cd 100644
--- a/llvm/test/MC/ARM/Windows/branch-reloc-offset.s
+++ b/llvm/test/MC/ARM/Windows/branch-reloc-offset.s
@@ -1,4 +1,5 @@
// RUN: llvm-mc -triple thumbv7-windows-gnu -filetype obj %s -o - | llvm-objdump -D -r - | FileCheck %s
+// RUN: not llvm-mc -triple thumbv7-windows-gnu -filetype obj --defsym ERR=1 %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=ERR
.text
main:
@@ -55,3 +56,40 @@ main:
// CHECK: e: bf00 nop
// CHECK: 00000010 <.Lother_target>:
// CHECK: 10: 4770 bx lr
+
+.ifdef ERR
+ .section "other2", "xr"
+err:
+ nop
+
+// Test errors, if referencing a symbol with an offset
+
+ b .Lerr_target+4
+// ERR: [[#@LINE-1]]:5: error: cannot perform a PC-relative fixup with a non-zero symbol offset
+ bl .Lerr_target+4
+// ERR: [[#@LINE-1]]:5: error: cannot perform a PC-relative fixup with a non-zero symbol offset
+ blx .Lerr_target+4
+// ERR: [[#@LINE-1]]:5: error: cannot perform a PC-relative fixup with a non-zero symbol offset
+
+// Test errors, if referencing a private label which lacks .def/.scl/.type/.endef, in another
+// section, without an offset. Such symbols are omitted from the output symbol table, so the
+// relocation can't reference them. Such relocations usually are made towards the base of the
+// section plus an offset, but such an offset is not supported with this relocation.
+
+ b .Lerr_target2
+// ERR: [[#@LINE-1]]:5: error: cannot perform a PC-relative fixup with a non-zero symbol offset
+
+ .def .Lerr_target
+ .scl 3
+ .type 32
+ .endef
+.Lerr_target:
+ nop
+ nop
+ bx lr
+
+ .section "other3", "xr"
+ nop
+.Lerr_target2:
+ bx lr
+.endif
diff --git a/llvm/test/MC/BPF/insn-unit.s b/llvm/test/MC/BPF/insn-unit.s
index 84735d1..e0a4864 100644
--- a/llvm/test/MC/BPF/insn-unit.s
+++ b/llvm/test/MC/BPF/insn-unit.s
@@ -34,9 +34,9 @@
r6 = *(u16 *)(r1 + 8) // BPF_LDX | BPF_H
r7 = *(u32 *)(r2 + 16) // BPF_LDX | BPF_W
r8 = *(u64 *)(r3 - 30) // BPF_LDX | BPF_DW
-// CHECK-64: 71 05 00 00 00 00 00 00 r5 = *(u8 *)(r0 + 0)
-// CHECK-64: 69 16 08 00 00 00 00 00 r6 = *(u16 *)(r1 + 8)
-// CHECK-64: 61 27 10 00 00 00 00 00 r7 = *(u32 *)(r2 + 16)
+// CHECK-64: 71 05 00 00 00 00 00 00 w5 = *(u8 *)(r0 + 0)
+// CHECK-64: 69 16 08 00 00 00 00 00 w6 = *(u16 *)(r1 + 8)
+// CHECK-64: 61 27 10 00 00 00 00 00 w7 = *(u32 *)(r2 + 16)
// CHECK-32: 71 05 00 00 00 00 00 00 w5 = *(u8 *)(r0 + 0)
// CHECK-32: 69 16 08 00 00 00 00 00 w6 = *(u16 *)(r1 + 8)
// CHECK-32: 61 27 10 00 00 00 00 00 w7 = *(u32 *)(r2 + 16)
@@ -47,9 +47,9 @@
*(u16 *)(r1 + 8) = r8 // BPF_STX | BPF_H
*(u32 *)(r2 + 16) = r9 // BPF_STX | BPF_W
*(u64 *)(r3 - 30) = r10 // BPF_STX | BPF_DW
-// CHECK-64: 73 70 00 00 00 00 00 00 *(u8 *)(r0 + 0) = r7
-// CHECK-64: 6b 81 08 00 00 00 00 00 *(u16 *)(r1 + 8) = r8
-// CHECK-64: 63 92 10 00 00 00 00 00 *(u32 *)(r2 + 16) = r9
+// CHECK-64: 73 70 00 00 00 00 00 00 *(u8 *)(r0 + 0) = w7
+// CHECK-64: 6b 81 08 00 00 00 00 00 *(u16 *)(r1 + 8) = w8
+// CHECK-64: 63 92 10 00 00 00 00 00 *(u32 *)(r2 + 16) = w9
// CHECK-32: 73 70 00 00 00 00 00 00 *(u8 *)(r0 + 0) = w7
// CHECK-32: 6b 81 08 00 00 00 00 00 *(u16 *)(r1 + 8) = w8
// CHECK-32: 63 92 10 00 00 00 00 00 *(u32 *)(r2 + 16) = w9
@@ -57,7 +57,7 @@
lock *(u32 *)(r2 + 16) += r9 // BPF_STX | BPF_W | BPF_XADD
lock *(u64 *)(r3 - 30) += r10 // BPF_STX | BPF_DW | BPF_XADD
-// CHECK-64: c3 92 10 00 00 00 00 00 lock *(u32 *)(r2 + 16) += r9
+// CHECK-64: c3 92 10 00 00 00 00 00 lock *(u32 *)(r2 + 16) += w9
// CHECK-32: c3 92 10 00 00 00 00 00 lock *(u32 *)(r2 + 16) += w9
// CHECK: db a3 e2 ff 00 00 00 00 lock *(u64 *)(r3 - 30) += r10
diff --git a/llvm/test/MC/BPF/load-store-32.s b/llvm/test/MC/BPF/load-store-32.s
index 826b13b..996d696 100644
--- a/llvm/test/MC/BPF/load-store-32.s
+++ b/llvm/test/MC/BPF/load-store-32.s
@@ -1,6 +1,6 @@
# RUN: llvm-mc -triple bpfel -filetype=obj -o %t %s
# RUN: llvm-objdump --no-print-imm-hex --mattr=+alu32 -d -r %t | FileCheck --check-prefix=CHECK-32 %s
-# RUN: llvm-objdump --no-print-imm-hex -d -r %t | FileCheck %s
+# RUN: llvm-objdump --no-print-imm-hex --mcpu=v1 -d -r %t | FileCheck %s
// ======== BPF_LDX Class ========
w5 = *(u8 *)(r0 + 0) // BPF_LDX | BPF_B
diff --git a/llvm/test/MC/Disassembler/X86/avx10.2-satcvt-32.txt b/llvm/test/MC/Disassembler/X86/avx10.2-satcvt-32.txt
new file mode 100644
index 0000000..67e8f36
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/avx10.2-satcvt-32.txt
@@ -0,0 +1,1363 @@
+# RUN: llvm-mc --disassemble %s -triple=i386 | FileCheck %s --check-prefixes=ATT
+# RUN: llvm-mc --disassemble %s -triple=i386 --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL
+
+# ATT: vcvtnebf162ibs %xmm3, %xmm2
+# INTEL: vcvtnebf162ibs xmm2, xmm3
+0x62,0xf5,0x7f,0x08,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %xmm3, %xmm2 {%k7}
+# INTEL: vcvtnebf162ibs xmm2 {k7}, xmm3
+0x62,0xf5,0x7f,0x0f,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7f,0x8f,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %zmm3, %zmm2
+# INTEL: vcvtnebf162ibs zmm2, zmm3
+0x62,0xf5,0x7f,0x48,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %zmm3, %zmm2 {%k7}
+# INTEL: vcvtnebf162ibs zmm2 {k7}, zmm3
+0x62,0xf5,0x7f,0x4f,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs zmm2 {k7} {z}, zmm3
+0x62,0xf5,0x7f,0xcf,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %ymm3, %ymm2
+# INTEL: vcvtnebf162ibs ymm2, ymm3
+0x62,0xf5,0x7f,0x28,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %ymm3, %ymm2 {%k7}
+# INTEL: vcvtnebf162ibs ymm2 {k7}, ymm3
+0x62,0xf5,0x7f,0x2f,0x69,0xd3
+
+# ATT: vcvtnebf162ibs %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs ymm2 {k7} {z}, ymm3
+0x62,0xf5,0x7f,0xaf,0x69,0xd3
+
+# ATT: vcvtnebf162ibs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvtnebf162ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162ibs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvtnebf162ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162ibs (%eax){1to8}, %xmm2
+# INTEL: vcvtnebf162ibs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7f,0x18,0x69,0x10
+
+# ATT: vcvtnebf162ibs -512(,%ebp,2), %xmm2
+# INTEL: vcvtnebf162ibs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7f,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtnebf162ibs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7f,0x8f,0x69,0x51,0x7f
+
+# ATT: vcvtnebf162ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7f,0x9f,0x69,0x52,0x80
+
+# ATT: vcvtnebf162ibs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvtnebf162ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162ibs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvtnebf162ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162ibs (%eax){1to16}, %ymm2
+# INTEL: vcvtnebf162ibs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7f,0x38,0x69,0x10
+
+# ATT: vcvtnebf162ibs -1024(,%ebp,2), %ymm2
+# INTEL: vcvtnebf162ibs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7f,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtnebf162ibs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7f,0xaf,0x69,0x51,0x7f
+
+# ATT: vcvtnebf162ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7f,0xbf,0x69,0x52,0x80
+
+# ATT: vcvtnebf162ibs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvtnebf162ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162ibs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvtnebf162ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162ibs (%eax){1to32}, %zmm2
+# INTEL: vcvtnebf162ibs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7f,0x58,0x69,0x10
+
+# ATT: vcvtnebf162ibs -2048(,%ebp,2), %zmm2
+# INTEL: vcvtnebf162ibs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7f,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtnebf162ibs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7f,0xcf,0x69,0x51,0x7f
+
+# ATT: vcvtnebf162ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvtnebf162ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7f,0xdf,0x69,0x52,0x80
+
+# ATT: vcvtnebf162iubs %xmm3, %xmm2
+# INTEL: vcvtnebf162iubs xmm2, xmm3
+0x62,0xf5,0x7f,0x08,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %xmm3, %xmm2 {%k7}
+# INTEL: vcvtnebf162iubs xmm2 {k7}, xmm3
+0x62,0xf5,0x7f,0x0f,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7f,0x8f,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %zmm3, %zmm2
+# INTEL: vcvtnebf162iubs zmm2, zmm3
+0x62,0xf5,0x7f,0x48,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %zmm3, %zmm2 {%k7}
+# INTEL: vcvtnebf162iubs zmm2 {k7}, zmm3
+0x62,0xf5,0x7f,0x4f,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs zmm2 {k7} {z}, zmm3
+0x62,0xf5,0x7f,0xcf,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %ymm3, %ymm2
+# INTEL: vcvtnebf162iubs ymm2, ymm3
+0x62,0xf5,0x7f,0x28,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %ymm3, %ymm2 {%k7}
+# INTEL: vcvtnebf162iubs ymm2 {k7}, ymm3
+0x62,0xf5,0x7f,0x2f,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs ymm2 {k7} {z}, ymm3
+0x62,0xf5,0x7f,0xaf,0x6b,0xd3
+
+# ATT: vcvtnebf162iubs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvtnebf162iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162iubs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvtnebf162iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162iubs (%eax){1to8}, %xmm2
+# INTEL: vcvtnebf162iubs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7f,0x18,0x6b,0x10
+
+# ATT: vcvtnebf162iubs -512(,%ebp,2), %xmm2
+# INTEL: vcvtnebf162iubs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7f,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtnebf162iubs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7f,0x8f,0x6b,0x51,0x7f
+
+# ATT: vcvtnebf162iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7f,0x9f,0x6b,0x52,0x80
+
+# ATT: vcvtnebf162iubs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvtnebf162iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162iubs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvtnebf162iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162iubs (%eax){1to16}, %ymm2
+# INTEL: vcvtnebf162iubs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7f,0x38,0x6b,0x10
+
+# ATT: vcvtnebf162iubs -1024(,%ebp,2), %ymm2
+# INTEL: vcvtnebf162iubs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7f,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtnebf162iubs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7f,0xaf,0x6b,0x51,0x7f
+
+# ATT: vcvtnebf162iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7f,0xbf,0x6b,0x52,0x80
+
+# ATT: vcvtnebf162iubs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvtnebf162iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162iubs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvtnebf162iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162iubs (%eax){1to32}, %zmm2
+# INTEL: vcvtnebf162iubs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7f,0x58,0x6b,0x10
+
+# ATT: vcvtnebf162iubs -2048(,%ebp,2), %zmm2
+# INTEL: vcvtnebf162iubs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7f,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtnebf162iubs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7f,0xcf,0x6b,0x51,0x7f
+
+# ATT: vcvtnebf162iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvtnebf162iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7f,0xdf,0x6b,0x52,0x80
+
+# ATT: vcvtph2ibs %xmm3, %xmm2
+# INTEL: vcvtph2ibs xmm2, xmm3
+0x62,0xf5,0x7c,0x08,0x69,0xd3
+
+# ATT: vcvtph2ibs %xmm3, %xmm2 {%k7}
+# INTEL: vcvtph2ibs xmm2 {k7}, xmm3
+0x62,0xf5,0x7c,0x0f,0x69,0xd3
+
+# ATT: vcvtph2ibs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvtph2ibs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7c,0x8f,0x69,0xd3
+
+# ATT: vcvtph2ibs %zmm3, %zmm2
+# INTEL: vcvtph2ibs zmm2, zmm3
+0x62,0xf5,0x7c,0x48,0x69,0xd3
+
+# ATT: vcvtph2ibs {rn-sae}, %zmm3, %zmm2
+# INTEL: vcvtph2ibs zmm2, zmm3, {rn-sae}
+0x62,0xf5,0x7c,0x18,0x69,0xd3
+
+# ATT: vcvtph2ibs %zmm3, %zmm2 {%k7}
+# INTEL: vcvtph2ibs zmm2 {k7}, zmm3
+0x62,0xf5,0x7c,0x4f,0x69,0xd3
+
+# ATT: vcvtph2ibs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvtph2ibs zmm2 {k7} {z}, zmm3, {rz-sae}
+0x62,0xf5,0x7c,0xff,0x69,0xd3
+
+# ATT: vcvtph2ibs %ymm3, %ymm2
+# INTEL: vcvtph2ibs ymm2, ymm3
+0x62,0xf5,0x7c,0x28,0x69,0xd3
+
+# ATT: vcvtph2ibs {rn-sae}, %ymm3, %ymm2
+# INTEL: vcvtph2ibs ymm2, ymm3, {rn-sae}
+0x62,0xf5,0x78,0x18,0x69,0xd3
+
+# ATT: vcvtph2ibs %ymm3, %ymm2 {%k7}
+# INTEL: vcvtph2ibs ymm2 {k7}, ymm3
+0x62,0xf5,0x7c,0x2f,0x69,0xd3
+
+# ATT: vcvtph2ibs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvtph2ibs ymm2 {k7} {z}, ymm3, {rz-sae}
+0x62,0xf5,0x78,0xff,0x69,0xd3
+
+# ATT: vcvtph2ibs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvtph2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvtph2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2ibs (%eax){1to8}, %xmm2
+# INTEL: vcvtph2ibs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7c,0x18,0x69,0x10
+
+# ATT: vcvtph2ibs -512(,%ebp,2), %xmm2
+# INTEL: vcvtph2ibs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7c,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtph2ibs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvtph2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7c,0x8f,0x69,0x51,0x7f
+
+# ATT: vcvtph2ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvtph2ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7c,0x9f,0x69,0x52,0x80
+
+# ATT: vcvtph2ibs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvtph2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvtph2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2ibs (%eax){1to16}, %ymm2
+# INTEL: vcvtph2ibs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7c,0x38,0x69,0x10
+
+# ATT: vcvtph2ibs -1024(,%ebp,2), %ymm2
+# INTEL: vcvtph2ibs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7c,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtph2ibs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvtph2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7c,0xaf,0x69,0x51,0x7f
+
+# ATT: vcvtph2ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvtph2ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7c,0xbf,0x69,0x52,0x80
+
+# ATT: vcvtph2ibs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvtph2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvtph2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2ibs (%eax){1to32}, %zmm2
+# INTEL: vcvtph2ibs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7c,0x58,0x69,0x10
+
+# ATT: vcvtph2ibs -2048(,%ebp,2), %zmm2
+# INTEL: vcvtph2ibs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7c,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtph2ibs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvtph2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7c,0xcf,0x69,0x51,0x7f
+
+# ATT: vcvtph2ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvtph2ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7c,0xdf,0x69,0x52,0x80
+
+# ATT: vcvtph2iubs %xmm3, %xmm2
+# INTEL: vcvtph2iubs xmm2, xmm3
+0x62,0xf5,0x7c,0x08,0x6b,0xd3
+
+# ATT: vcvtph2iubs %xmm3, %xmm2 {%k7}
+# INTEL: vcvtph2iubs xmm2 {k7}, xmm3
+0x62,0xf5,0x7c,0x0f,0x6b,0xd3
+
+# ATT: vcvtph2iubs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvtph2iubs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7c,0x8f,0x6b,0xd3
+
+# ATT: vcvtph2iubs %zmm3, %zmm2
+# INTEL: vcvtph2iubs zmm2, zmm3
+0x62,0xf5,0x7c,0x48,0x6b,0xd3
+
+# ATT: vcvtph2iubs {rn-sae}, %zmm3, %zmm2
+# INTEL: vcvtph2iubs zmm2, zmm3, {rn-sae}
+0x62,0xf5,0x7c,0x18,0x6b,0xd3
+
+# ATT: vcvtph2iubs %zmm3, %zmm2 {%k7}
+# INTEL: vcvtph2iubs zmm2 {k7}, zmm3
+0x62,0xf5,0x7c,0x4f,0x6b,0xd3
+
+# ATT: vcvtph2iubs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvtph2iubs zmm2 {k7} {z}, zmm3, {rz-sae}
+0x62,0xf5,0x7c,0xff,0x6b,0xd3
+
+# ATT: vcvtph2iubs %ymm3, %ymm2
+# INTEL: vcvtph2iubs ymm2, ymm3
+0x62,0xf5,0x7c,0x28,0x6b,0xd3
+
+# ATT: vcvtph2iubs {rn-sae}, %ymm3, %ymm2
+# INTEL: vcvtph2iubs ymm2, ymm3, {rn-sae}
+0x62,0xf5,0x78,0x18,0x6b,0xd3
+
+# ATT: vcvtph2iubs %ymm3, %ymm2 {%k7}
+# INTEL: vcvtph2iubs ymm2 {k7}, ymm3
+0x62,0xf5,0x7c,0x2f,0x6b,0xd3
+
+# ATT: vcvtph2iubs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvtph2iubs ymm2 {k7} {z}, ymm3, {rz-sae}
+0x62,0xf5,0x78,0xff,0x6b,0xd3
+
+# ATT: vcvtph2iubs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvtph2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvtph2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2iubs (%eax){1to8}, %xmm2
+# INTEL: vcvtph2iubs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7c,0x18,0x6b,0x10
+
+# ATT: vcvtph2iubs -512(,%ebp,2), %xmm2
+# INTEL: vcvtph2iubs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7c,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtph2iubs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvtph2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7c,0x8f,0x6b,0x51,0x7f
+
+# ATT: vcvtph2iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvtph2iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7c,0x9f,0x6b,0x52,0x80
+
+# ATT: vcvtph2iubs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvtph2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvtph2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2iubs (%eax){1to16}, %ymm2
+# INTEL: vcvtph2iubs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7c,0x38,0x6b,0x10
+
+# ATT: vcvtph2iubs -1024(,%ebp,2), %ymm2
+# INTEL: vcvtph2iubs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7c,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtph2iubs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvtph2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7c,0xaf,0x6b,0x51,0x7f
+
+# ATT: vcvtph2iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvtph2iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7c,0xbf,0x6b,0x52,0x80
+
+# ATT: vcvtph2iubs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvtph2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvtph2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2iubs (%eax){1to32}, %zmm2
+# INTEL: vcvtph2iubs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7c,0x58,0x6b,0x10
+
+# ATT: vcvtph2iubs -2048(,%ebp,2), %zmm2
+# INTEL: vcvtph2iubs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7c,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtph2iubs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvtph2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7c,0xcf,0x6b,0x51,0x7f
+
+# ATT: vcvtph2iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvtph2iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7c,0xdf,0x6b,0x52,0x80
+
+# ATT: vcvtps2ibs %xmm3, %xmm2
+# INTEL: vcvtps2ibs xmm2, xmm3
+0x62,0xf5,0x7d,0x08,0x69,0xd3
+
+# ATT: vcvtps2ibs %xmm3, %xmm2 {%k7}
+# INTEL: vcvtps2ibs xmm2 {k7}, xmm3
+0x62,0xf5,0x7d,0x0f,0x69,0xd3
+
+# ATT: vcvtps2ibs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvtps2ibs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7d,0x8f,0x69,0xd3
+
+# ATT: vcvtps2ibs %zmm3, %zmm2
+# INTEL: vcvtps2ibs zmm2, zmm3
+0x62,0xf5,0x7d,0x48,0x69,0xd3
+
+# ATT: vcvtps2ibs {rn-sae}, %zmm3, %zmm2
+# INTEL: vcvtps2ibs zmm2, zmm3, {rn-sae}
+0x62,0xf5,0x7d,0x18,0x69,0xd3
+
+# ATT: vcvtps2ibs %zmm3, %zmm2 {%k7}
+# INTEL: vcvtps2ibs zmm2 {k7}, zmm3
+0x62,0xf5,0x7d,0x4f,0x69,0xd3
+
+# ATT: vcvtps2ibs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvtps2ibs zmm2 {k7} {z}, zmm3, {rz-sae}
+0x62,0xf5,0x7d,0xff,0x69,0xd3
+
+# ATT: vcvtps2ibs %ymm3, %ymm2
+# INTEL: vcvtps2ibs ymm2, ymm3
+0x62,0xf5,0x7d,0x28,0x69,0xd3
+
+# ATT: vcvtps2ibs {rn-sae}, %ymm3, %ymm2
+# INTEL: vcvtps2ibs ymm2, ymm3, {rn-sae}
+0x62,0xf5,0x79,0x18,0x69,0xd3
+
+# ATT: vcvtps2ibs %ymm3, %ymm2 {%k7}
+# INTEL: vcvtps2ibs ymm2 {k7}, ymm3
+0x62,0xf5,0x7d,0x2f,0x69,0xd3
+
+# ATT: vcvtps2ibs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvtps2ibs ymm2 {k7} {z}, ymm3, {rz-sae}
+0x62,0xf5,0x79,0xff,0x69,0xd3
+
+# ATT: vcvtps2ibs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvtps2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvtps2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2ibs (%eax){1to4}, %xmm2
+# INTEL: vcvtps2ibs xmm2, dword ptr [eax]{1to4}
+0x62,0xf5,0x7d,0x18,0x69,0x10
+
+# ATT: vcvtps2ibs -512(,%ebp,2), %xmm2
+# INTEL: vcvtps2ibs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7d,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtps2ibs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvtps2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7d,0x8f,0x69,0x51,0x7f
+
+# ATT: vcvtps2ibs -512(%edx){1to4}, %xmm2 {%k7} {z}
+# INTEL: vcvtps2ibs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+0x62,0xf5,0x7d,0x9f,0x69,0x52,0x80
+
+# ATT: vcvtps2ibs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvtps2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvtps2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2ibs (%eax){1to8}, %ymm2
+# INTEL: vcvtps2ibs ymm2, dword ptr [eax]{1to8}
+0x62,0xf5,0x7d,0x38,0x69,0x10
+
+# ATT: vcvtps2ibs -1024(,%ebp,2), %ymm2
+# INTEL: vcvtps2ibs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7d,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtps2ibs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvtps2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7d,0xaf,0x69,0x51,0x7f
+
+# ATT: vcvtps2ibs -512(%edx){1to8}, %ymm2 {%k7} {z}
+# INTEL: vcvtps2ibs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+0x62,0xf5,0x7d,0xbf,0x69,0x52,0x80
+
+# ATT: vcvtps2ibs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvtps2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvtps2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2ibs (%eax){1to16}, %zmm2
+# INTEL: vcvtps2ibs zmm2, dword ptr [eax]{1to16}
+0x62,0xf5,0x7d,0x58,0x69,0x10
+
+# ATT: vcvtps2ibs -2048(,%ebp,2), %zmm2
+# INTEL: vcvtps2ibs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7d,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtps2ibs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvtps2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7d,0xcf,0x69,0x51,0x7f
+
+# ATT: vcvtps2ibs -512(%edx){1to16}, %zmm2 {%k7} {z}
+# INTEL: vcvtps2ibs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+0x62,0xf5,0x7d,0xdf,0x69,0x52,0x80
+
+# ATT: vcvtps2iubs %xmm3, %xmm2
+# INTEL: vcvtps2iubs xmm2, xmm3
+0x62,0xf5,0x7d,0x08,0x6b,0xd3
+
+# ATT: vcvtps2iubs %xmm3, %xmm2 {%k7}
+# INTEL: vcvtps2iubs xmm2 {k7}, xmm3
+0x62,0xf5,0x7d,0x0f,0x6b,0xd3
+
+# ATT: vcvtps2iubs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvtps2iubs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7d,0x8f,0x6b,0xd3
+
+# ATT: vcvtps2iubs %zmm3, %zmm2
+# INTEL: vcvtps2iubs zmm2, zmm3
+0x62,0xf5,0x7d,0x48,0x6b,0xd3
+
+# ATT: vcvtps2iubs {rn-sae}, %zmm3, %zmm2
+# INTEL: vcvtps2iubs zmm2, zmm3, {rn-sae}
+0x62,0xf5,0x7d,0x18,0x6b,0xd3
+
+# ATT: vcvtps2iubs %zmm3, %zmm2 {%k7}
+# INTEL: vcvtps2iubs zmm2 {k7}, zmm3
+0x62,0xf5,0x7d,0x4f,0x6b,0xd3
+
+# ATT: vcvtps2iubs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvtps2iubs zmm2 {k7} {z}, zmm3, {rz-sae}
+0x62,0xf5,0x7d,0xff,0x6b,0xd3
+
+# ATT: vcvtps2iubs %ymm3, %ymm2
+# INTEL: vcvtps2iubs ymm2, ymm3
+0x62,0xf5,0x7d,0x28,0x6b,0xd3
+
+# ATT: vcvtps2iubs {rn-sae}, %ymm3, %ymm2
+# INTEL: vcvtps2iubs ymm2, ymm3, {rn-sae}
+0x62,0xf5,0x79,0x18,0x6b,0xd3
+
+# ATT: vcvtps2iubs %ymm3, %ymm2 {%k7}
+# INTEL: vcvtps2iubs ymm2 {k7}, ymm3
+0x62,0xf5,0x7d,0x2f,0x6b,0xd3
+
+# ATT: vcvtps2iubs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvtps2iubs ymm2 {k7} {z}, ymm3, {rz-sae}
+0x62,0xf5,0x79,0xff,0x6b,0xd3
+
+# ATT: vcvtps2iubs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvtps2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvtps2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2iubs (%eax){1to4}, %xmm2
+# INTEL: vcvtps2iubs xmm2, dword ptr [eax]{1to4}
+0x62,0xf5,0x7d,0x18,0x6b,0x10
+
+# ATT: vcvtps2iubs -512(,%ebp,2), %xmm2
+# INTEL: vcvtps2iubs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7d,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtps2iubs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvtps2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7d,0x8f,0x6b,0x51,0x7f
+
+# ATT: vcvtps2iubs -512(%edx){1to4}, %xmm2 {%k7} {z}
+# INTEL: vcvtps2iubs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+0x62,0xf5,0x7d,0x9f,0x6b,0x52,0x80
+
+# ATT: vcvtps2iubs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvtps2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvtps2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2iubs (%eax){1to8}, %ymm2
+# INTEL: vcvtps2iubs ymm2, dword ptr [eax]{1to8}
+0x62,0xf5,0x7d,0x38,0x6b,0x10
+
+# ATT: vcvtps2iubs -1024(,%ebp,2), %ymm2
+# INTEL: vcvtps2iubs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7d,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtps2iubs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvtps2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7d,0xaf,0x6b,0x51,0x7f
+
+# ATT: vcvtps2iubs -512(%edx){1to8}, %ymm2 {%k7} {z}
+# INTEL: vcvtps2iubs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+0x62,0xf5,0x7d,0xbf,0x6b,0x52,0x80
+
+# ATT: vcvtps2iubs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvtps2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvtps2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2iubs (%eax){1to16}, %zmm2
+# INTEL: vcvtps2iubs zmm2, dword ptr [eax]{1to16}
+0x62,0xf5,0x7d,0x58,0x6b,0x10
+
+# ATT: vcvtps2iubs -2048(,%ebp,2), %zmm2
+# INTEL: vcvtps2iubs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7d,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtps2iubs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvtps2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7d,0xcf,0x6b,0x51,0x7f
+
+# ATT: vcvtps2iubs -512(%edx){1to16}, %zmm2 {%k7} {z}
+# INTEL: vcvtps2iubs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+0x62,0xf5,0x7d,0xdf,0x6b,0x52,0x80
+
+# ATT: vcvttnebf162ibs %xmm3, %xmm2
+# INTEL: vcvttnebf162ibs xmm2, xmm3
+0x62,0xf5,0x7f,0x08,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %xmm3, %xmm2 {%k7}
+# INTEL: vcvttnebf162ibs xmm2 {k7}, xmm3
+0x62,0xf5,0x7f,0x0f,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7f,0x8f,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %zmm3, %zmm2
+# INTEL: vcvttnebf162ibs zmm2, zmm3
+0x62,0xf5,0x7f,0x48,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %zmm3, %zmm2 {%k7}
+# INTEL: vcvttnebf162ibs zmm2 {k7}, zmm3
+0x62,0xf5,0x7f,0x4f,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs zmm2 {k7} {z}, zmm3
+0x62,0xf5,0x7f,0xcf,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %ymm3, %ymm2
+# INTEL: vcvttnebf162ibs ymm2, ymm3
+0x62,0xf5,0x7f,0x28,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %ymm3, %ymm2 {%k7}
+# INTEL: vcvttnebf162ibs ymm2 {k7}, ymm3
+0x62,0xf5,0x7f,0x2f,0x68,0xd3
+
+# ATT: vcvttnebf162ibs %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs ymm2 {k7} {z}, ymm3
+0x62,0xf5,0x7f,0xaf,0x68,0xd3
+
+# ATT: vcvttnebf162ibs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvttnebf162ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162ibs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvttnebf162ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162ibs (%eax){1to8}, %xmm2
+# INTEL: vcvttnebf162ibs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7f,0x18,0x68,0x10
+
+# ATT: vcvttnebf162ibs -512(,%ebp,2), %xmm2
+# INTEL: vcvttnebf162ibs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7f,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttnebf162ibs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7f,0x8f,0x68,0x51,0x7f
+
+# ATT: vcvttnebf162ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7f,0x9f,0x68,0x52,0x80
+
+# ATT: vcvttnebf162ibs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvttnebf162ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162ibs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvttnebf162ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162ibs (%eax){1to16}, %ymm2
+# INTEL: vcvttnebf162ibs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7f,0x38,0x68,0x10
+
+# ATT: vcvttnebf162ibs -1024(,%ebp,2), %ymm2
+# INTEL: vcvttnebf162ibs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7f,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttnebf162ibs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7f,0xaf,0x68,0x51,0x7f
+
+# ATT: vcvttnebf162ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7f,0xbf,0x68,0x52,0x80
+
+# ATT: vcvttnebf162ibs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvttnebf162ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162ibs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvttnebf162ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162ibs (%eax){1to32}, %zmm2
+# INTEL: vcvttnebf162ibs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7f,0x58,0x68,0x10
+
+# ATT: vcvttnebf162ibs -2048(,%ebp,2), %zmm2
+# INTEL: vcvttnebf162ibs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7f,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttnebf162ibs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7f,0xcf,0x68,0x51,0x7f
+
+# ATT: vcvttnebf162ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvttnebf162ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7f,0xdf,0x68,0x52,0x80
+
+# ATT: vcvttnebf162iubs %xmm3, %xmm2
+# INTEL: vcvttnebf162iubs xmm2, xmm3
+0x62,0xf5,0x7f,0x08,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %xmm3, %xmm2 {%k7}
+# INTEL: vcvttnebf162iubs xmm2 {k7}, xmm3
+0x62,0xf5,0x7f,0x0f,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7f,0x8f,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %zmm3, %zmm2
+# INTEL: vcvttnebf162iubs zmm2, zmm3
+0x62,0xf5,0x7f,0x48,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %zmm3, %zmm2 {%k7}
+# INTEL: vcvttnebf162iubs zmm2 {k7}, zmm3
+0x62,0xf5,0x7f,0x4f,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs zmm2 {k7} {z}, zmm3
+0x62,0xf5,0x7f,0xcf,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %ymm3, %ymm2
+# INTEL: vcvttnebf162iubs ymm2, ymm3
+0x62,0xf5,0x7f,0x28,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %ymm3, %ymm2 {%k7}
+# INTEL: vcvttnebf162iubs ymm2 {k7}, ymm3
+0x62,0xf5,0x7f,0x2f,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs ymm2 {k7} {z}, ymm3
+0x62,0xf5,0x7f,0xaf,0x6a,0xd3
+
+# ATT: vcvttnebf162iubs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvttnebf162iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162iubs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvttnebf162iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162iubs (%eax){1to8}, %xmm2
+# INTEL: vcvttnebf162iubs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7f,0x18,0x6a,0x10
+
+# ATT: vcvttnebf162iubs -512(,%ebp,2), %xmm2
+# INTEL: vcvttnebf162iubs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7f,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttnebf162iubs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7f,0x8f,0x6a,0x51,0x7f
+
+# ATT: vcvttnebf162iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7f,0x9f,0x6a,0x52,0x80
+
+# ATT: vcvttnebf162iubs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvttnebf162iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162iubs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvttnebf162iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162iubs (%eax){1to16}, %ymm2
+# INTEL: vcvttnebf162iubs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7f,0x38,0x6a,0x10
+
+# ATT: vcvttnebf162iubs -1024(,%ebp,2), %ymm2
+# INTEL: vcvttnebf162iubs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7f,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttnebf162iubs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7f,0xaf,0x6a,0x51,0x7f
+
+# ATT: vcvttnebf162iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7f,0xbf,0x6a,0x52,0x80
+
+# ATT: vcvttnebf162iubs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvttnebf162iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7f,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162iubs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvttnebf162iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7f,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162iubs (%eax){1to32}, %zmm2
+# INTEL: vcvttnebf162iubs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7f,0x58,0x6a,0x10
+
+# ATT: vcvttnebf162iubs -2048(,%ebp,2), %zmm2
+# INTEL: vcvttnebf162iubs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7f,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttnebf162iubs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7f,0xcf,0x6a,0x51,0x7f
+
+# ATT: vcvttnebf162iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvttnebf162iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7f,0xdf,0x6a,0x52,0x80
+
+# ATT: vcvttph2ibs %xmm3, %xmm2
+# INTEL: vcvttph2ibs xmm2, xmm3
+0x62,0xf5,0x7c,0x08,0x68,0xd3
+
+# ATT: vcvttph2ibs %xmm3, %xmm2 {%k7}
+# INTEL: vcvttph2ibs xmm2 {k7}, xmm3
+0x62,0xf5,0x7c,0x0f,0x68,0xd3
+
+# ATT: vcvttph2ibs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvttph2ibs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7c,0x8f,0x68,0xd3
+
+# ATT: vcvttph2ibs %zmm3, %zmm2
+# INTEL: vcvttph2ibs zmm2, zmm3
+0x62,0xf5,0x7c,0x48,0x68,0xd3
+
+# ATT: vcvttph2ibs {sae}, %zmm3, %zmm2
+# INTEL: vcvttph2ibs zmm2, zmm3, {sae}
+0x62,0xf5,0x7c,0x18,0x68,0xd3
+
+# ATT: vcvttph2ibs %zmm3, %zmm2 {%k7}
+# INTEL: vcvttph2ibs zmm2 {k7}, zmm3
+0x62,0xf5,0x7c,0x4f,0x68,0xd3
+
+# ATT: vcvttph2ibs {sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvttph2ibs zmm2 {k7} {z}, zmm3, {sae}
+0x62,0xf5,0x7c,0x9f,0x68,0xd3
+
+# ATT: vcvttph2ibs %ymm3, %ymm2
+# INTEL: vcvttph2ibs ymm2, ymm3
+0x62,0xf5,0x7c,0x28,0x68,0xd3
+
+# ATT: vcvttph2ibs {sae}, %ymm3, %ymm2
+# INTEL: vcvttph2ibs ymm2, ymm3, {sae}
+0x62,0xf5,0x78,0x18,0x68,0xd3
+
+# ATT: vcvttph2ibs %ymm3, %ymm2 {%k7}
+# INTEL: vcvttph2ibs ymm2 {k7}, ymm3
+0x62,0xf5,0x7c,0x2f,0x68,0xd3
+
+# ATT: vcvttph2ibs {sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvttph2ibs ymm2 {k7} {z}, ymm3, {sae}
+0x62,0xf5,0x78,0x9f,0x68,0xd3
+
+# ATT: vcvttph2ibs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvttph2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvttph2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2ibs (%eax){1to8}, %xmm2
+# INTEL: vcvttph2ibs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7c,0x18,0x68,0x10
+
+# ATT: vcvttph2ibs -512(,%ebp,2), %xmm2
+# INTEL: vcvttph2ibs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7c,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttph2ibs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvttph2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7c,0x8f,0x68,0x51,0x7f
+
+# ATT: vcvttph2ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvttph2ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7c,0x9f,0x68,0x52,0x80
+
+# ATT: vcvttph2ibs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvttph2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvttph2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2ibs (%eax){1to16}, %ymm2
+# INTEL: vcvttph2ibs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7c,0x38,0x68,0x10
+
+# ATT: vcvttph2ibs -1024(,%ebp,2), %ymm2
+# INTEL: vcvttph2ibs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7c,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttph2ibs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvttph2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7c,0xaf,0x68,0x51,0x7f
+
+# ATT: vcvttph2ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvttph2ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7c,0xbf,0x68,0x52,0x80
+
+# ATT: vcvttph2ibs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvttph2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvttph2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2ibs (%eax){1to32}, %zmm2
+# INTEL: vcvttph2ibs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7c,0x58,0x68,0x10
+
+# ATT: vcvttph2ibs -2048(,%ebp,2), %zmm2
+# INTEL: vcvttph2ibs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7c,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttph2ibs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvttph2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7c,0xcf,0x68,0x51,0x7f
+
+# ATT: vcvttph2ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvttph2ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7c,0xdf,0x68,0x52,0x80
+
+# ATT: vcvttph2iubs %xmm3, %xmm2
+# INTEL: vcvttph2iubs xmm2, xmm3
+0x62,0xf5,0x7c,0x08,0x6a,0xd3
+
+# ATT: vcvttph2iubs %xmm3, %xmm2 {%k7}
+# INTEL: vcvttph2iubs xmm2 {k7}, xmm3
+0x62,0xf5,0x7c,0x0f,0x6a,0xd3
+
+# ATT: vcvttph2iubs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvttph2iubs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7c,0x8f,0x6a,0xd3
+
+# ATT: vcvttph2iubs %zmm3, %zmm2
+# INTEL: vcvttph2iubs zmm2, zmm3
+0x62,0xf5,0x7c,0x48,0x6a,0xd3
+
+# ATT: vcvttph2iubs {sae}, %zmm3, %zmm2
+# INTEL: vcvttph2iubs zmm2, zmm3, {sae}
+0x62,0xf5,0x7c,0x18,0x6a,0xd3
+
+# ATT: vcvttph2iubs %zmm3, %zmm2 {%k7}
+# INTEL: vcvttph2iubs zmm2 {k7}, zmm3
+0x62,0xf5,0x7c,0x4f,0x6a,0xd3
+
+# ATT: vcvttph2iubs {sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvttph2iubs zmm2 {k7} {z}, zmm3, {sae}
+0x62,0xf5,0x7c,0x9f,0x6a,0xd3
+
+# ATT: vcvttph2iubs %ymm3, %ymm2
+# INTEL: vcvttph2iubs ymm2, ymm3
+0x62,0xf5,0x7c,0x28,0x6a,0xd3
+
+# ATT: vcvttph2iubs {sae}, %ymm3, %ymm2
+# INTEL: vcvttph2iubs ymm2, ymm3, {sae}
+0x62,0xf5,0x78,0x18,0x6a,0xd3
+
+# ATT: vcvttph2iubs %ymm3, %ymm2 {%k7}
+# INTEL: vcvttph2iubs ymm2 {k7}, ymm3
+0x62,0xf5,0x7c,0x2f,0x6a,0xd3
+
+# ATT: vcvttph2iubs {sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvttph2iubs ymm2 {k7} {z}, ymm3, {sae}
+0x62,0xf5,0x78,0x9f,0x6a,0xd3
+
+# ATT: vcvttph2iubs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvttph2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvttph2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2iubs (%eax){1to8}, %xmm2
+# INTEL: vcvttph2iubs xmm2, word ptr [eax]{1to8}
+0x62,0xf5,0x7c,0x18,0x6a,0x10
+
+# ATT: vcvttph2iubs -512(,%ebp,2), %xmm2
+# INTEL: vcvttph2iubs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7c,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttph2iubs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvttph2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7c,0x8f,0x6a,0x51,0x7f
+
+# ATT: vcvttph2iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+# INTEL: vcvttph2iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+0x62,0xf5,0x7c,0x9f,0x6a,0x52,0x80
+
+# ATT: vcvttph2iubs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvttph2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvttph2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2iubs (%eax){1to16}, %ymm2
+# INTEL: vcvttph2iubs ymm2, word ptr [eax]{1to16}
+0x62,0xf5,0x7c,0x38,0x6a,0x10
+
+# ATT: vcvttph2iubs -1024(,%ebp,2), %ymm2
+# INTEL: vcvttph2iubs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7c,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttph2iubs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvttph2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7c,0xaf,0x6a,0x51,0x7f
+
+# ATT: vcvttph2iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+# INTEL: vcvttph2iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+0x62,0xf5,0x7c,0xbf,0x6a,0x52,0x80
+
+# ATT: vcvttph2iubs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvttph2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7c,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvttph2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7c,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2iubs (%eax){1to32}, %zmm2
+# INTEL: vcvttph2iubs zmm2, word ptr [eax]{1to32}
+0x62,0xf5,0x7c,0x58,0x6a,0x10
+
+# ATT: vcvttph2iubs -2048(,%ebp,2), %zmm2
+# INTEL: vcvttph2iubs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7c,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttph2iubs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvttph2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7c,0xcf,0x6a,0x51,0x7f
+
+# ATT: vcvttph2iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+# INTEL: vcvttph2iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+0x62,0xf5,0x7c,0xdf,0x6a,0x52,0x80
+
+# ATT: vcvttps2ibs %xmm3, %xmm2
+# INTEL: vcvttps2ibs xmm2, xmm3
+0x62,0xf5,0x7d,0x08,0x68,0xd3
+
+# ATT: vcvttps2ibs %xmm3, %xmm2 {%k7}
+# INTEL: vcvttps2ibs xmm2 {k7}, xmm3
+0x62,0xf5,0x7d,0x0f,0x68,0xd3
+
+# ATT: vcvttps2ibs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvttps2ibs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7d,0x8f,0x68,0xd3
+
+# ATT: vcvttps2ibs %zmm3, %zmm2
+# INTEL: vcvttps2ibs zmm2, zmm3
+0x62,0xf5,0x7d,0x48,0x68,0xd3
+
+# ATT: vcvttps2ibs {sae}, %zmm3, %zmm2
+# INTEL: vcvttps2ibs zmm2, zmm3, {sae}
+0x62,0xf5,0x7d,0x18,0x68,0xd3
+
+# ATT: vcvttps2ibs %zmm3, %zmm2 {%k7}
+# INTEL: vcvttps2ibs zmm2 {k7}, zmm3
+0x62,0xf5,0x7d,0x4f,0x68,0xd3
+
+# ATT: vcvttps2ibs {sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvttps2ibs zmm2 {k7} {z}, zmm3, {sae}
+0x62,0xf5,0x7d,0x9f,0x68,0xd3
+
+# ATT: vcvttps2ibs %ymm3, %ymm2
+# INTEL: vcvttps2ibs ymm2, ymm3
+0x62,0xf5,0x7d,0x28,0x68,0xd3
+
+# ATT: vcvttps2ibs {sae}, %ymm3, %ymm2
+# INTEL: vcvttps2ibs ymm2, ymm3, {sae}
+0x62,0xf5,0x79,0x18,0x68,0xd3
+
+# ATT: vcvttps2ibs %ymm3, %ymm2 {%k7}
+# INTEL: vcvttps2ibs ymm2 {k7}, ymm3
+0x62,0xf5,0x7d,0x2f,0x68,0xd3
+
+# ATT: vcvttps2ibs {sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvttps2ibs ymm2 {k7} {z}, ymm3, {sae}
+0x62,0xf5,0x79,0x9f,0x68,0xd3
+
+# ATT: vcvttps2ibs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvttps2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvttps2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2ibs (%eax){1to4}, %xmm2
+# INTEL: vcvttps2ibs xmm2, dword ptr [eax]{1to4}
+0x62,0xf5,0x7d,0x18,0x68,0x10
+
+# ATT: vcvttps2ibs -512(,%ebp,2), %xmm2
+# INTEL: vcvttps2ibs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7d,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttps2ibs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvttps2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7d,0x8f,0x68,0x51,0x7f
+
+# ATT: vcvttps2ibs -512(%edx){1to4}, %xmm2 {%k7} {z}
+# INTEL: vcvttps2ibs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+0x62,0xf5,0x7d,0x9f,0x68,0x52,0x80
+
+# ATT: vcvttps2ibs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvttps2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvttps2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2ibs (%eax){1to8}, %ymm2
+# INTEL: vcvttps2ibs ymm2, dword ptr [eax]{1to8}
+0x62,0xf5,0x7d,0x38,0x68,0x10
+
+# ATT: vcvttps2ibs -1024(,%ebp,2), %ymm2
+# INTEL: vcvttps2ibs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7d,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttps2ibs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvttps2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7d,0xaf,0x68,0x51,0x7f
+
+# ATT: vcvttps2ibs -512(%edx){1to8}, %ymm2 {%k7} {z}
+# INTEL: vcvttps2ibs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+0x62,0xf5,0x7d,0xbf,0x68,0x52,0x80
+
+# ATT: vcvttps2ibs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvttps2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvttps2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2ibs (%eax){1to16}, %zmm2
+# INTEL: vcvttps2ibs zmm2, dword ptr [eax]{1to16}
+0x62,0xf5,0x7d,0x58,0x68,0x10
+
+# ATT: vcvttps2ibs -2048(,%ebp,2), %zmm2
+# INTEL: vcvttps2ibs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7d,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttps2ibs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvttps2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7d,0xcf,0x68,0x51,0x7f
+
+# ATT: vcvttps2ibs -512(%edx){1to16}, %zmm2 {%k7} {z}
+# INTEL: vcvttps2ibs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+0x62,0xf5,0x7d,0xdf,0x68,0x52,0x80
+
+# ATT: vcvttps2iubs %xmm3, %xmm2
+# INTEL: vcvttps2iubs xmm2, xmm3
+0x62,0xf5,0x7d,0x08,0x6a,0xd3
+
+# ATT: vcvttps2iubs %xmm3, %xmm2 {%k7}
+# INTEL: vcvttps2iubs xmm2 {k7}, xmm3
+0x62,0xf5,0x7d,0x0f,0x6a,0xd3
+
+# ATT: vcvttps2iubs %xmm3, %xmm2 {%k7} {z}
+# INTEL: vcvttps2iubs xmm2 {k7} {z}, xmm3
+0x62,0xf5,0x7d,0x8f,0x6a,0xd3
+
+# ATT: vcvttps2iubs %zmm3, %zmm2
+# INTEL: vcvttps2iubs zmm2, zmm3
+0x62,0xf5,0x7d,0x48,0x6a,0xd3
+
+# ATT: vcvttps2iubs {sae}, %zmm3, %zmm2
+# INTEL: vcvttps2iubs zmm2, zmm3, {sae}
+0x62,0xf5,0x7d,0x18,0x6a,0xd3
+
+# ATT: vcvttps2iubs %zmm3, %zmm2 {%k7}
+# INTEL: vcvttps2iubs zmm2 {k7}, zmm3
+0x62,0xf5,0x7d,0x4f,0x6a,0xd3
+
+# ATT: vcvttps2iubs {sae}, %zmm3, %zmm2 {%k7} {z}
+# INTEL: vcvttps2iubs zmm2 {k7} {z}, zmm3, {sae}
+0x62,0xf5,0x7d,0x9f,0x6a,0xd3
+
+# ATT: vcvttps2iubs %ymm3, %ymm2
+# INTEL: vcvttps2iubs ymm2, ymm3
+0x62,0xf5,0x7d,0x28,0x6a,0xd3
+
+# ATT: vcvttps2iubs {sae}, %ymm3, %ymm2
+# INTEL: vcvttps2iubs ymm2, ymm3, {sae}
+0x62,0xf5,0x79,0x18,0x6a,0xd3
+
+# ATT: vcvttps2iubs %ymm3, %ymm2 {%k7}
+# INTEL: vcvttps2iubs ymm2 {k7}, ymm3
+0x62,0xf5,0x7d,0x2f,0x6a,0xd3
+
+# ATT: vcvttps2iubs {sae}, %ymm3, %ymm2 {%k7} {z}
+# INTEL: vcvttps2iubs ymm2 {k7} {z}, ymm3, {sae}
+0x62,0xf5,0x79,0x9f,0x6a,0xd3
+
+# ATT: vcvttps2iubs 268435456(%esp,%esi,8), %xmm2
+# INTEL: vcvttps2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+# INTEL: vcvttps2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2iubs (%eax){1to4}, %xmm2
+# INTEL: vcvttps2iubs xmm2, dword ptr [eax]{1to4}
+0x62,0xf5,0x7d,0x18,0x6a,0x10
+
+# ATT: vcvttps2iubs -512(,%ebp,2), %xmm2
+# INTEL: vcvttps2iubs xmm2, xmmword ptr [2*ebp - 512]
+0x62,0xf5,0x7d,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttps2iubs 2032(%ecx), %xmm2 {%k7} {z}
+# INTEL: vcvttps2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+0x62,0xf5,0x7d,0x8f,0x6a,0x51,0x7f
+
+# ATT: vcvttps2iubs -512(%edx){1to4}, %xmm2 {%k7} {z}
+# INTEL: vcvttps2iubs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+0x62,0xf5,0x7d,0x9f,0x6a,0x52,0x80
+
+# ATT: vcvttps2iubs 268435456(%esp,%esi,8), %ymm2
+# INTEL: vcvttps2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+# INTEL: vcvttps2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2iubs (%eax){1to8}, %ymm2
+# INTEL: vcvttps2iubs ymm2, dword ptr [eax]{1to8}
+0x62,0xf5,0x7d,0x38,0x6a,0x10
+
+# ATT: vcvttps2iubs -1024(,%ebp,2), %ymm2
+# INTEL: vcvttps2iubs ymm2, ymmword ptr [2*ebp - 1024]
+0x62,0xf5,0x7d,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttps2iubs 4064(%ecx), %ymm2 {%k7} {z}
+# INTEL: vcvttps2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+0x62,0xf5,0x7d,0xaf,0x6a,0x51,0x7f
+
+# ATT: vcvttps2iubs -512(%edx){1to8}, %ymm2 {%k7} {z}
+# INTEL: vcvttps2iubs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+0x62,0xf5,0x7d,0xbf,0x6a,0x52,0x80
+
+# ATT: vcvttps2iubs 268435456(%esp,%esi,8), %zmm2
+# INTEL: vcvttps2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+0x62,0xf5,0x7d,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+# INTEL: vcvttps2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+0x62,0xf5,0x7d,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2iubs (%eax){1to16}, %zmm2
+# INTEL: vcvttps2iubs zmm2, dword ptr [eax]{1to16}
+0x62,0xf5,0x7d,0x58,0x6a,0x10
+
+# ATT: vcvttps2iubs -2048(,%ebp,2), %zmm2
+# INTEL: vcvttps2iubs zmm2, zmmword ptr [2*ebp - 2048]
+0x62,0xf5,0x7d,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttps2iubs 8128(%ecx), %zmm2 {%k7} {z}
+# INTEL: vcvttps2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+0x62,0xf5,0x7d,0xcf,0x6a,0x51,0x7f
+
+# ATT: vcvttps2iubs -512(%edx){1to16}, %zmm2 {%k7} {z}
+# INTEL: vcvttps2iubs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+0x62,0xf5,0x7d,0xdf,0x6a,0x52,0x80
+
diff --git a/llvm/test/MC/Disassembler/X86/avx10.2-satcvt-64.txt b/llvm/test/MC/Disassembler/X86/avx10.2-satcvt-64.txt
new file mode 100644
index 0000000..fc9ac1c
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/avx10.2-satcvt-64.txt
@@ -0,0 +1,1363 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s --check-prefixes=ATT
+# RUN: llvm-mc --disassemble %s -triple=x86_64 --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL
+
+# ATT: vcvtnebf162ibs %xmm23, %xmm22
+# INTEL: vcvtnebf162ibs xmm22, xmm23
+0x62,0xa5,0x7f,0x08,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %xmm23, %xmm22 {%k7}
+# INTEL: vcvtnebf162ibs xmm22 {k7}, xmm23
+0x62,0xa5,0x7f,0x0f,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7f,0x8f,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %zmm23, %zmm22
+# INTEL: vcvtnebf162ibs zmm22, zmm23
+0x62,0xa5,0x7f,0x48,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %zmm23, %zmm22 {%k7}
+# INTEL: vcvtnebf162ibs zmm22 {k7}, zmm23
+0x62,0xa5,0x7f,0x4f,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs zmm22 {k7} {z}, zmm23
+0x62,0xa5,0x7f,0xcf,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %ymm23, %ymm22
+# INTEL: vcvtnebf162ibs ymm22, ymm23
+0x62,0xa5,0x7f,0x28,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %ymm23, %ymm22 {%k7}
+# INTEL: vcvtnebf162ibs ymm22 {k7}, ymm23
+0x62,0xa5,0x7f,0x2f,0x69,0xf7
+
+# ATT: vcvtnebf162ibs %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs ymm22 {k7} {z}, ymm23
+0x62,0xa5,0x7f,0xaf,0x69,0xf7
+
+# ATT: vcvtnebf162ibs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvtnebf162ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162ibs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvtnebf162ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162ibs (%rip){1to8}, %xmm22
+# INTEL: vcvtnebf162ibs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7f,0x18,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtnebf162ibs -512(,%rbp,2), %xmm22
+# INTEL: vcvtnebf162ibs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7f,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtnebf162ibs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7f,0x8f,0x69,0x71,0x7f
+
+# ATT: vcvtnebf162ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7f,0x9f,0x69,0x72,0x80
+
+# ATT: vcvtnebf162ibs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvtnebf162ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162ibs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvtnebf162ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162ibs (%rip){1to16}, %ymm22
+# INTEL: vcvtnebf162ibs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7f,0x38,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtnebf162ibs -1024(,%rbp,2), %ymm22
+# INTEL: vcvtnebf162ibs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7f,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtnebf162ibs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7f,0xaf,0x69,0x71,0x7f
+
+# ATT: vcvtnebf162ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7f,0xbf,0x69,0x72,0x80
+
+# ATT: vcvtnebf162ibs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvtnebf162ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162ibs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvtnebf162ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162ibs (%rip){1to32}, %zmm22
+# INTEL: vcvtnebf162ibs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7f,0x58,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtnebf162ibs -2048(,%rbp,2), %zmm22
+# INTEL: vcvtnebf162ibs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7f,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtnebf162ibs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7f,0xcf,0x69,0x71,0x7f
+
+# ATT: vcvtnebf162ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvtnebf162ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7f,0xdf,0x69,0x72,0x80
+
+# ATT: vcvtnebf162iubs %xmm23, %xmm22
+# INTEL: vcvtnebf162iubs xmm22, xmm23
+0x62,0xa5,0x7f,0x08,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %xmm23, %xmm22 {%k7}
+# INTEL: vcvtnebf162iubs xmm22 {k7}, xmm23
+0x62,0xa5,0x7f,0x0f,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7f,0x8f,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %zmm23, %zmm22
+# INTEL: vcvtnebf162iubs zmm22, zmm23
+0x62,0xa5,0x7f,0x48,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %zmm23, %zmm22 {%k7}
+# INTEL: vcvtnebf162iubs zmm22 {k7}, zmm23
+0x62,0xa5,0x7f,0x4f,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs zmm22 {k7} {z}, zmm23
+0x62,0xa5,0x7f,0xcf,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %ymm23, %ymm22
+# INTEL: vcvtnebf162iubs ymm22, ymm23
+0x62,0xa5,0x7f,0x28,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %ymm23, %ymm22 {%k7}
+# INTEL: vcvtnebf162iubs ymm22 {k7}, ymm23
+0x62,0xa5,0x7f,0x2f,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs ymm22 {k7} {z}, ymm23
+0x62,0xa5,0x7f,0xaf,0x6b,0xf7
+
+# ATT: vcvtnebf162iubs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvtnebf162iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162iubs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvtnebf162iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162iubs (%rip){1to8}, %xmm22
+# INTEL: vcvtnebf162iubs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7f,0x18,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtnebf162iubs -512(,%rbp,2), %xmm22
+# INTEL: vcvtnebf162iubs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7f,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtnebf162iubs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7f,0x8f,0x6b,0x71,0x7f
+
+# ATT: vcvtnebf162iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7f,0x9f,0x6b,0x72,0x80
+
+# ATT: vcvtnebf162iubs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvtnebf162iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162iubs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvtnebf162iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162iubs (%rip){1to16}, %ymm22
+# INTEL: vcvtnebf162iubs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7f,0x38,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtnebf162iubs -1024(,%rbp,2), %ymm22
+# INTEL: vcvtnebf162iubs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7f,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtnebf162iubs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7f,0xaf,0x6b,0x71,0x7f
+
+# ATT: vcvtnebf162iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7f,0xbf,0x6b,0x72,0x80
+
+# ATT: vcvtnebf162iubs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvtnebf162iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtnebf162iubs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvtnebf162iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtnebf162iubs (%rip){1to32}, %zmm22
+# INTEL: vcvtnebf162iubs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7f,0x58,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtnebf162iubs -2048(,%rbp,2), %zmm22
+# INTEL: vcvtnebf162iubs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7f,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtnebf162iubs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7f,0xcf,0x6b,0x71,0x7f
+
+# ATT: vcvtnebf162iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvtnebf162iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7f,0xdf,0x6b,0x72,0x80
+
+# ATT: vcvtph2ibs %xmm23, %xmm22
+# INTEL: vcvtph2ibs xmm22, xmm23
+0x62,0xa5,0x7c,0x08,0x69,0xf7
+
+# ATT: vcvtph2ibs %xmm23, %xmm22 {%k7}
+# INTEL: vcvtph2ibs xmm22 {k7}, xmm23
+0x62,0xa5,0x7c,0x0f,0x69,0xf7
+
+# ATT: vcvtph2ibs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvtph2ibs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7c,0x8f,0x69,0xf7
+
+# ATT: vcvtph2ibs %zmm23, %zmm22
+# INTEL: vcvtph2ibs zmm22, zmm23
+0x62,0xa5,0x7c,0x48,0x69,0xf7
+
+# ATT: vcvtph2ibs {rn-sae}, %zmm23, %zmm22
+# INTEL: vcvtph2ibs zmm22, zmm23, {rn-sae}
+0x62,0xa5,0x7c,0x18,0x69,0xf7
+
+# ATT: vcvtph2ibs %zmm23, %zmm22 {%k7}
+# INTEL: vcvtph2ibs zmm22 {k7}, zmm23
+0x62,0xa5,0x7c,0x4f,0x69,0xf7
+
+# ATT: vcvtph2ibs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvtph2ibs zmm22 {k7} {z}, zmm23, {rz-sae}
+0x62,0xa5,0x7c,0xff,0x69,0xf7
+
+# ATT: vcvtph2ibs %ymm23, %ymm22
+# INTEL: vcvtph2ibs ymm22, ymm23
+0x62,0xa5,0x7c,0x28,0x69,0xf7
+
+# ATT: vcvtph2ibs {rn-sae}, %ymm23, %ymm22
+# INTEL: vcvtph2ibs ymm22, ymm23, {rn-sae}
+0x62,0xa5,0x78,0x18,0x69,0xf7
+
+# ATT: vcvtph2ibs %ymm23, %ymm22 {%k7}
+# INTEL: vcvtph2ibs ymm22 {k7}, ymm23
+0x62,0xa5,0x7c,0x2f,0x69,0xf7
+
+# ATT: vcvtph2ibs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvtph2ibs ymm22 {k7} {z}, ymm23, {rz-sae}
+0x62,0xa5,0x78,0xff,0x69,0xf7
+
+# ATT: vcvtph2ibs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvtph2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvtph2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2ibs (%rip){1to8}, %xmm22
+# INTEL: vcvtph2ibs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7c,0x18,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtph2ibs -512(,%rbp,2), %xmm22
+# INTEL: vcvtph2ibs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7c,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtph2ibs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvtph2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7c,0x8f,0x69,0x71,0x7f
+
+# ATT: vcvtph2ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvtph2ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7c,0x9f,0x69,0x72,0x80
+
+# ATT: vcvtph2ibs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvtph2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvtph2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2ibs (%rip){1to16}, %ymm22
+# INTEL: vcvtph2ibs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7c,0x38,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtph2ibs -1024(,%rbp,2), %ymm22
+# INTEL: vcvtph2ibs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7c,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtph2ibs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvtph2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7c,0xaf,0x69,0x71,0x7f
+
+# ATT: vcvtph2ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvtph2ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7c,0xbf,0x69,0x72,0x80
+
+# ATT: vcvtph2ibs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvtph2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvtph2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2ibs (%rip){1to32}, %zmm22
+# INTEL: vcvtph2ibs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7c,0x58,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtph2ibs -2048(,%rbp,2), %zmm22
+# INTEL: vcvtph2ibs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7c,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtph2ibs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvtph2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7c,0xcf,0x69,0x71,0x7f
+
+# ATT: vcvtph2ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvtph2ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7c,0xdf,0x69,0x72,0x80
+
+# ATT: vcvtph2iubs %xmm23, %xmm22
+# INTEL: vcvtph2iubs xmm22, xmm23
+0x62,0xa5,0x7c,0x08,0x6b,0xf7
+
+# ATT: vcvtph2iubs %xmm23, %xmm22 {%k7}
+# INTEL: vcvtph2iubs xmm22 {k7}, xmm23
+0x62,0xa5,0x7c,0x0f,0x6b,0xf7
+
+# ATT: vcvtph2iubs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvtph2iubs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7c,0x8f,0x6b,0xf7
+
+# ATT: vcvtph2iubs %zmm23, %zmm22
+# INTEL: vcvtph2iubs zmm22, zmm23
+0x62,0xa5,0x7c,0x48,0x6b,0xf7
+
+# ATT: vcvtph2iubs {rn-sae}, %zmm23, %zmm22
+# INTEL: vcvtph2iubs zmm22, zmm23, {rn-sae}
+0x62,0xa5,0x7c,0x18,0x6b,0xf7
+
+# ATT: vcvtph2iubs %zmm23, %zmm22 {%k7}
+# INTEL: vcvtph2iubs zmm22 {k7}, zmm23
+0x62,0xa5,0x7c,0x4f,0x6b,0xf7
+
+# ATT: vcvtph2iubs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvtph2iubs zmm22 {k7} {z}, zmm23, {rz-sae}
+0x62,0xa5,0x7c,0xff,0x6b,0xf7
+
+# ATT: vcvtph2iubs %ymm23, %ymm22
+# INTEL: vcvtph2iubs ymm22, ymm23
+0x62,0xa5,0x7c,0x28,0x6b,0xf7
+
+# ATT: vcvtph2iubs {rn-sae}, %ymm23, %ymm22
+# INTEL: vcvtph2iubs ymm22, ymm23, {rn-sae}
+0x62,0xa5,0x78,0x18,0x6b,0xf7
+
+# ATT: vcvtph2iubs %ymm23, %ymm22 {%k7}
+# INTEL: vcvtph2iubs ymm22 {k7}, ymm23
+0x62,0xa5,0x7c,0x2f,0x6b,0xf7
+
+# ATT: vcvtph2iubs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvtph2iubs ymm22 {k7} {z}, ymm23, {rz-sae}
+0x62,0xa5,0x78,0xff,0x6b,0xf7
+
+# ATT: vcvtph2iubs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvtph2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvtph2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2iubs (%rip){1to8}, %xmm22
+# INTEL: vcvtph2iubs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7c,0x18,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtph2iubs -512(,%rbp,2), %xmm22
+# INTEL: vcvtph2iubs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7c,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtph2iubs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvtph2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7c,0x8f,0x6b,0x71,0x7f
+
+# ATT: vcvtph2iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvtph2iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7c,0x9f,0x6b,0x72,0x80
+
+# ATT: vcvtph2iubs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvtph2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvtph2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2iubs (%rip){1to16}, %ymm22
+# INTEL: vcvtph2iubs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7c,0x38,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtph2iubs -1024(,%rbp,2), %ymm22
+# INTEL: vcvtph2iubs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7c,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtph2iubs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvtph2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7c,0xaf,0x6b,0x71,0x7f
+
+# ATT: vcvtph2iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvtph2iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7c,0xbf,0x6b,0x72,0x80
+
+# ATT: vcvtph2iubs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvtph2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtph2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvtph2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtph2iubs (%rip){1to32}, %zmm22
+# INTEL: vcvtph2iubs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7c,0x58,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtph2iubs -2048(,%rbp,2), %zmm22
+# INTEL: vcvtph2iubs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7c,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtph2iubs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvtph2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7c,0xcf,0x6b,0x71,0x7f
+
+# ATT: vcvtph2iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvtph2iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7c,0xdf,0x6b,0x72,0x80
+
+# ATT: vcvtps2ibs %xmm23, %xmm22
+# INTEL: vcvtps2ibs xmm22, xmm23
+0x62,0xa5,0x7d,0x08,0x69,0xf7
+
+# ATT: vcvtps2ibs %xmm23, %xmm22 {%k7}
+# INTEL: vcvtps2ibs xmm22 {k7}, xmm23
+0x62,0xa5,0x7d,0x0f,0x69,0xf7
+
+# ATT: vcvtps2ibs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvtps2ibs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7d,0x8f,0x69,0xf7
+
+# ATT: vcvtps2ibs %zmm23, %zmm22
+# INTEL: vcvtps2ibs zmm22, zmm23
+0x62,0xa5,0x7d,0x48,0x69,0xf7
+
+# ATT: vcvtps2ibs {rn-sae}, %zmm23, %zmm22
+# INTEL: vcvtps2ibs zmm22, zmm23, {rn-sae}
+0x62,0xa5,0x7d,0x18,0x69,0xf7
+
+# ATT: vcvtps2ibs %zmm23, %zmm22 {%k7}
+# INTEL: vcvtps2ibs zmm22 {k7}, zmm23
+0x62,0xa5,0x7d,0x4f,0x69,0xf7
+
+# ATT: vcvtps2ibs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvtps2ibs zmm22 {k7} {z}, zmm23, {rz-sae}
+0x62,0xa5,0x7d,0xff,0x69,0xf7
+
+# ATT: vcvtps2ibs %ymm23, %ymm22
+# INTEL: vcvtps2ibs ymm22, ymm23
+0x62,0xa5,0x7d,0x28,0x69,0xf7
+
+# ATT: vcvtps2ibs {rn-sae}, %ymm23, %ymm22
+# INTEL: vcvtps2ibs ymm22, ymm23, {rn-sae}
+0x62,0xa5,0x79,0x18,0x69,0xf7
+
+# ATT: vcvtps2ibs %ymm23, %ymm22 {%k7}
+# INTEL: vcvtps2ibs ymm22 {k7}, ymm23
+0x62,0xa5,0x7d,0x2f,0x69,0xf7
+
+# ATT: vcvtps2ibs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvtps2ibs ymm22 {k7} {z}, ymm23, {rz-sae}
+0x62,0xa5,0x79,0xff,0x69,0xf7
+
+# ATT: vcvtps2ibs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvtps2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvtps2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2ibs (%rip){1to4}, %xmm22
+# INTEL: vcvtps2ibs xmm22, dword ptr [rip]{1to4}
+0x62,0xe5,0x7d,0x18,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtps2ibs -512(,%rbp,2), %xmm22
+# INTEL: vcvtps2ibs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7d,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtps2ibs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvtps2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7d,0x8f,0x69,0x71,0x7f
+
+# ATT: vcvtps2ibs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+# INTEL: vcvtps2ibs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+0x62,0xe5,0x7d,0x9f,0x69,0x72,0x80
+
+# ATT: vcvtps2ibs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvtps2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvtps2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2ibs (%rip){1to8}, %ymm22
+# INTEL: vcvtps2ibs ymm22, dword ptr [rip]{1to8}
+0x62,0xe5,0x7d,0x38,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtps2ibs -1024(,%rbp,2), %ymm22
+# INTEL: vcvtps2ibs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7d,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtps2ibs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvtps2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7d,0xaf,0x69,0x71,0x7f
+
+# ATT: vcvtps2ibs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+# INTEL: vcvtps2ibs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+0x62,0xe5,0x7d,0xbf,0x69,0x72,0x80
+
+# ATT: vcvtps2ibs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvtps2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvtps2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2ibs (%rip){1to16}, %zmm22
+# INTEL: vcvtps2ibs zmm22, dword ptr [rip]{1to16}
+0x62,0xe5,0x7d,0x58,0x69,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtps2ibs -2048(,%rbp,2), %zmm22
+# INTEL: vcvtps2ibs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7d,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtps2ibs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvtps2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7d,0xcf,0x69,0x71,0x7f
+
+# ATT: vcvtps2ibs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+# INTEL: vcvtps2ibs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+0x62,0xe5,0x7d,0xdf,0x69,0x72,0x80
+
+# ATT: vcvtps2iubs %xmm23, %xmm22
+# INTEL: vcvtps2iubs xmm22, xmm23
+0x62,0xa5,0x7d,0x08,0x6b,0xf7
+
+# ATT: vcvtps2iubs %xmm23, %xmm22 {%k7}
+# INTEL: vcvtps2iubs xmm22 {k7}, xmm23
+0x62,0xa5,0x7d,0x0f,0x6b,0xf7
+
+# ATT: vcvtps2iubs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvtps2iubs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7d,0x8f,0x6b,0xf7
+
+# ATT: vcvtps2iubs %zmm23, %zmm22
+# INTEL: vcvtps2iubs zmm22, zmm23
+0x62,0xa5,0x7d,0x48,0x6b,0xf7
+
+# ATT: vcvtps2iubs {rn-sae}, %zmm23, %zmm22
+# INTEL: vcvtps2iubs zmm22, zmm23, {rn-sae}
+0x62,0xa5,0x7d,0x18,0x6b,0xf7
+
+# ATT: vcvtps2iubs %zmm23, %zmm22 {%k7}
+# INTEL: vcvtps2iubs zmm22 {k7}, zmm23
+0x62,0xa5,0x7d,0x4f,0x6b,0xf7
+
+# ATT: vcvtps2iubs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvtps2iubs zmm22 {k7} {z}, zmm23, {rz-sae}
+0x62,0xa5,0x7d,0xff,0x6b,0xf7
+
+# ATT: vcvtps2iubs %ymm23, %ymm22
+# INTEL: vcvtps2iubs ymm22, ymm23
+0x62,0xa5,0x7d,0x28,0x6b,0xf7
+
+# ATT: vcvtps2iubs {rn-sae}, %ymm23, %ymm22
+# INTEL: vcvtps2iubs ymm22, ymm23, {rn-sae}
+0x62,0xa5,0x79,0x18,0x6b,0xf7
+
+# ATT: vcvtps2iubs %ymm23, %ymm22 {%k7}
+# INTEL: vcvtps2iubs ymm22 {k7}, ymm23
+0x62,0xa5,0x7d,0x2f,0x6b,0xf7
+
+# ATT: vcvtps2iubs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvtps2iubs ymm22 {k7} {z}, ymm23, {rz-sae}
+0x62,0xa5,0x79,0xff,0x6b,0xf7
+
+# ATT: vcvtps2iubs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvtps2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvtps2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2iubs (%rip){1to4}, %xmm22
+# INTEL: vcvtps2iubs xmm22, dword ptr [rip]{1to4}
+0x62,0xe5,0x7d,0x18,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtps2iubs -512(,%rbp,2), %xmm22
+# INTEL: vcvtps2iubs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7d,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvtps2iubs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvtps2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7d,0x8f,0x6b,0x71,0x7f
+
+# ATT: vcvtps2iubs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+# INTEL: vcvtps2iubs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+0x62,0xe5,0x7d,0x9f,0x6b,0x72,0x80
+
+# ATT: vcvtps2iubs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvtps2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvtps2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2iubs (%rip){1to8}, %ymm22
+# INTEL: vcvtps2iubs ymm22, dword ptr [rip]{1to8}
+0x62,0xe5,0x7d,0x38,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtps2iubs -1024(,%rbp,2), %ymm22
+# INTEL: vcvtps2iubs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7d,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvtps2iubs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvtps2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7d,0xaf,0x6b,0x71,0x7f
+
+# ATT: vcvtps2iubs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+# INTEL: vcvtps2iubs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+0x62,0xe5,0x7d,0xbf,0x6b,0x72,0x80
+
+# ATT: vcvtps2iubs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvtps2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvtps2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvtps2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvtps2iubs (%rip){1to16}, %zmm22
+# INTEL: vcvtps2iubs zmm22, dword ptr [rip]{1to16}
+0x62,0xe5,0x7d,0x58,0x6b,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvtps2iubs -2048(,%rbp,2), %zmm22
+# INTEL: vcvtps2iubs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7d,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvtps2iubs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvtps2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7d,0xcf,0x6b,0x71,0x7f
+
+# ATT: vcvtps2iubs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+# INTEL: vcvtps2iubs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+0x62,0xe5,0x7d,0xdf,0x6b,0x72,0x80
+
+# ATT: vcvttnebf162ibs %xmm23, %xmm22
+# INTEL: vcvttnebf162ibs xmm22, xmm23
+0x62,0xa5,0x7f,0x08,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %xmm23, %xmm22 {%k7}
+# INTEL: vcvttnebf162ibs xmm22 {k7}, xmm23
+0x62,0xa5,0x7f,0x0f,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7f,0x8f,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %zmm23, %zmm22
+# INTEL: vcvttnebf162ibs zmm22, zmm23
+0x62,0xa5,0x7f,0x48,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %zmm23, %zmm22 {%k7}
+# INTEL: vcvttnebf162ibs zmm22 {k7}, zmm23
+0x62,0xa5,0x7f,0x4f,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs zmm22 {k7} {z}, zmm23
+0x62,0xa5,0x7f,0xcf,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %ymm23, %ymm22
+# INTEL: vcvttnebf162ibs ymm22, ymm23
+0x62,0xa5,0x7f,0x28,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %ymm23, %ymm22 {%k7}
+# INTEL: vcvttnebf162ibs ymm22 {k7}, ymm23
+0x62,0xa5,0x7f,0x2f,0x68,0xf7
+
+# ATT: vcvttnebf162ibs %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs ymm22 {k7} {z}, ymm23
+0x62,0xa5,0x7f,0xaf,0x68,0xf7
+
+# ATT: vcvttnebf162ibs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvttnebf162ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162ibs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvttnebf162ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162ibs (%rip){1to8}, %xmm22
+# INTEL: vcvttnebf162ibs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7f,0x18,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttnebf162ibs -512(,%rbp,2), %xmm22
+# INTEL: vcvttnebf162ibs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7f,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttnebf162ibs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7f,0x8f,0x68,0x71,0x7f
+
+# ATT: vcvttnebf162ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7f,0x9f,0x68,0x72,0x80
+
+# ATT: vcvttnebf162ibs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvttnebf162ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162ibs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvttnebf162ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162ibs (%rip){1to16}, %ymm22
+# INTEL: vcvttnebf162ibs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7f,0x38,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttnebf162ibs -1024(,%rbp,2), %ymm22
+# INTEL: vcvttnebf162ibs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7f,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttnebf162ibs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7f,0xaf,0x68,0x71,0x7f
+
+# ATT: vcvttnebf162ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7f,0xbf,0x68,0x72,0x80
+
+# ATT: vcvttnebf162ibs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvttnebf162ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162ibs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvttnebf162ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162ibs (%rip){1to32}, %zmm22
+# INTEL: vcvttnebf162ibs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7f,0x58,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttnebf162ibs -2048(,%rbp,2), %zmm22
+# INTEL: vcvttnebf162ibs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7f,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttnebf162ibs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7f,0xcf,0x68,0x71,0x7f
+
+# ATT: vcvttnebf162ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvttnebf162ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7f,0xdf,0x68,0x72,0x80
+
+# ATT: vcvttnebf162iubs %xmm23, %xmm22
+# INTEL: vcvttnebf162iubs xmm22, xmm23
+0x62,0xa5,0x7f,0x08,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %xmm23, %xmm22 {%k7}
+# INTEL: vcvttnebf162iubs xmm22 {k7}, xmm23
+0x62,0xa5,0x7f,0x0f,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7f,0x8f,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %zmm23, %zmm22
+# INTEL: vcvttnebf162iubs zmm22, zmm23
+0x62,0xa5,0x7f,0x48,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %zmm23, %zmm22 {%k7}
+# INTEL: vcvttnebf162iubs zmm22 {k7}, zmm23
+0x62,0xa5,0x7f,0x4f,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs zmm22 {k7} {z}, zmm23
+0x62,0xa5,0x7f,0xcf,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %ymm23, %ymm22
+# INTEL: vcvttnebf162iubs ymm22, ymm23
+0x62,0xa5,0x7f,0x28,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %ymm23, %ymm22 {%k7}
+# INTEL: vcvttnebf162iubs ymm22 {k7}, ymm23
+0x62,0xa5,0x7f,0x2f,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs ymm22 {k7} {z}, ymm23
+0x62,0xa5,0x7f,0xaf,0x6a,0xf7
+
+# ATT: vcvttnebf162iubs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvttnebf162iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162iubs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvttnebf162iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162iubs (%rip){1to8}, %xmm22
+# INTEL: vcvttnebf162iubs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7f,0x18,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttnebf162iubs -512(,%rbp,2), %xmm22
+# INTEL: vcvttnebf162iubs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7f,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttnebf162iubs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7f,0x8f,0x6a,0x71,0x7f
+
+# ATT: vcvttnebf162iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7f,0x9f,0x6a,0x72,0x80
+
+# ATT: vcvttnebf162iubs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvttnebf162iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162iubs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvttnebf162iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162iubs (%rip){1to16}, %ymm22
+# INTEL: vcvttnebf162iubs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7f,0x38,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttnebf162iubs -1024(,%rbp,2), %ymm22
+# INTEL: vcvttnebf162iubs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7f,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttnebf162iubs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7f,0xaf,0x6a,0x71,0x7f
+
+# ATT: vcvttnebf162iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7f,0xbf,0x6a,0x72,0x80
+
+# ATT: vcvttnebf162iubs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvttnebf162iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7f,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttnebf162iubs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvttnebf162iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7f,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttnebf162iubs (%rip){1to32}, %zmm22
+# INTEL: vcvttnebf162iubs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7f,0x58,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttnebf162iubs -2048(,%rbp,2), %zmm22
+# INTEL: vcvttnebf162iubs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7f,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttnebf162iubs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7f,0xcf,0x6a,0x71,0x7f
+
+# ATT: vcvttnebf162iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvttnebf162iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7f,0xdf,0x6a,0x72,0x80
+
+# ATT: vcvttph2ibs %xmm23, %xmm22
+# INTEL: vcvttph2ibs xmm22, xmm23
+0x62,0xa5,0x7c,0x08,0x68,0xf7
+
+# ATT: vcvttph2ibs %xmm23, %xmm22 {%k7}
+# INTEL: vcvttph2ibs xmm22 {k7}, xmm23
+0x62,0xa5,0x7c,0x0f,0x68,0xf7
+
+# ATT: vcvttph2ibs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvttph2ibs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7c,0x8f,0x68,0xf7
+
+# ATT: vcvttph2ibs %zmm23, %zmm22
+# INTEL: vcvttph2ibs zmm22, zmm23
+0x62,0xa5,0x7c,0x48,0x68,0xf7
+
+# ATT: vcvttph2ibs {sae}, %zmm23, %zmm22
+# INTEL: vcvttph2ibs zmm22, zmm23, {sae}
+0x62,0xa5,0x7c,0x18,0x68,0xf7
+
+# ATT: vcvttph2ibs %zmm23, %zmm22 {%k7}
+# INTEL: vcvttph2ibs zmm22 {k7}, zmm23
+0x62,0xa5,0x7c,0x4f,0x68,0xf7
+
+# ATT: vcvttph2ibs {sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvttph2ibs zmm22 {k7} {z}, zmm23, {sae}
+0x62,0xa5,0x7c,0x9f,0x68,0xf7
+
+# ATT: vcvttph2ibs %ymm23, %ymm22
+# INTEL: vcvttph2ibs ymm22, ymm23
+0x62,0xa5,0x7c,0x28,0x68,0xf7
+
+# ATT: vcvttph2ibs {sae}, %ymm23, %ymm22
+# INTEL: vcvttph2ibs ymm22, ymm23, {sae}
+0x62,0xa5,0x78,0x18,0x68,0xf7
+
+# ATT: vcvttph2ibs %ymm23, %ymm22 {%k7}
+# INTEL: vcvttph2ibs ymm22 {k7}, ymm23
+0x62,0xa5,0x7c,0x2f,0x68,0xf7
+
+# ATT: vcvttph2ibs {sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvttph2ibs ymm22 {k7} {z}, ymm23, {sae}
+0x62,0xa5,0x78,0x9f,0x68,0xf7
+
+# ATT: vcvttph2ibs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvttph2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvttph2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2ibs (%rip){1to8}, %xmm22
+# INTEL: vcvttph2ibs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7c,0x18,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttph2ibs -512(,%rbp,2), %xmm22
+# INTEL: vcvttph2ibs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7c,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttph2ibs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvttph2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7c,0x8f,0x68,0x71,0x7f
+
+# ATT: vcvttph2ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvttph2ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7c,0x9f,0x68,0x72,0x80
+
+# ATT: vcvttph2ibs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvttph2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvttph2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2ibs (%rip){1to16}, %ymm22
+# INTEL: vcvttph2ibs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7c,0x38,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttph2ibs -1024(,%rbp,2), %ymm22
+# INTEL: vcvttph2ibs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7c,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttph2ibs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvttph2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7c,0xaf,0x68,0x71,0x7f
+
+# ATT: vcvttph2ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvttph2ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7c,0xbf,0x68,0x72,0x80
+
+# ATT: vcvttph2ibs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvttph2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvttph2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2ibs (%rip){1to32}, %zmm22
+# INTEL: vcvttph2ibs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7c,0x58,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttph2ibs -2048(,%rbp,2), %zmm22
+# INTEL: vcvttph2ibs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7c,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttph2ibs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvttph2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7c,0xcf,0x68,0x71,0x7f
+
+# ATT: vcvttph2ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvttph2ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7c,0xdf,0x68,0x72,0x80
+
+# ATT: vcvttph2iubs %xmm23, %xmm22
+# INTEL: vcvttph2iubs xmm22, xmm23
+0x62,0xa5,0x7c,0x08,0x6a,0xf7
+
+# ATT: vcvttph2iubs %xmm23, %xmm22 {%k7}
+# INTEL: vcvttph2iubs xmm22 {k7}, xmm23
+0x62,0xa5,0x7c,0x0f,0x6a,0xf7
+
+# ATT: vcvttph2iubs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvttph2iubs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7c,0x8f,0x6a,0xf7
+
+# ATT: vcvttph2iubs %zmm23, %zmm22
+# INTEL: vcvttph2iubs zmm22, zmm23
+0x62,0xa5,0x7c,0x48,0x6a,0xf7
+
+# ATT: vcvttph2iubs {sae}, %zmm23, %zmm22
+# INTEL: vcvttph2iubs zmm22, zmm23, {sae}
+0x62,0xa5,0x7c,0x18,0x6a,0xf7
+
+# ATT: vcvttph2iubs %zmm23, %zmm22 {%k7}
+# INTEL: vcvttph2iubs zmm22 {k7}, zmm23
+0x62,0xa5,0x7c,0x4f,0x6a,0xf7
+
+# ATT: vcvttph2iubs {sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvttph2iubs zmm22 {k7} {z}, zmm23, {sae}
+0x62,0xa5,0x7c,0x9f,0x6a,0xf7
+
+# ATT: vcvttph2iubs %ymm23, %ymm22
+# INTEL: vcvttph2iubs ymm22, ymm23
+0x62,0xa5,0x7c,0x28,0x6a,0xf7
+
+# ATT: vcvttph2iubs {sae}, %ymm23, %ymm22
+# INTEL: vcvttph2iubs ymm22, ymm23, {sae}
+0x62,0xa5,0x78,0x18,0x6a,0xf7
+
+# ATT: vcvttph2iubs %ymm23, %ymm22 {%k7}
+# INTEL: vcvttph2iubs ymm22 {k7}, ymm23
+0x62,0xa5,0x7c,0x2f,0x6a,0xf7
+
+# ATT: vcvttph2iubs {sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvttph2iubs ymm22 {k7} {z}, ymm23, {sae}
+0x62,0xa5,0x78,0x9f,0x6a,0xf7
+
+# ATT: vcvttph2iubs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvttph2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvttph2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2iubs (%rip){1to8}, %xmm22
+# INTEL: vcvttph2iubs xmm22, word ptr [rip]{1to8}
+0x62,0xe5,0x7c,0x18,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttph2iubs -512(,%rbp,2), %xmm22
+# INTEL: vcvttph2iubs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7c,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttph2iubs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvttph2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7c,0x8f,0x6a,0x71,0x7f
+
+# ATT: vcvttph2iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+# INTEL: vcvttph2iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+0x62,0xe5,0x7c,0x9f,0x6a,0x72,0x80
+
+# ATT: vcvttph2iubs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvttph2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvttph2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2iubs (%rip){1to16}, %ymm22
+# INTEL: vcvttph2iubs ymm22, word ptr [rip]{1to16}
+0x62,0xe5,0x7c,0x38,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttph2iubs -1024(,%rbp,2), %ymm22
+# INTEL: vcvttph2iubs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7c,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttph2iubs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvttph2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7c,0xaf,0x6a,0x71,0x7f
+
+# ATT: vcvttph2iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+# INTEL: vcvttph2iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+0x62,0xe5,0x7c,0xbf,0x6a,0x72,0x80
+
+# ATT: vcvttph2iubs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvttph2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7c,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttph2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvttph2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7c,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttph2iubs (%rip){1to32}, %zmm22
+# INTEL: vcvttph2iubs zmm22, word ptr [rip]{1to32}
+0x62,0xe5,0x7c,0x58,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttph2iubs -2048(,%rbp,2), %zmm22
+# INTEL: vcvttph2iubs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7c,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttph2iubs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvttph2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7c,0xcf,0x6a,0x71,0x7f
+
+# ATT: vcvttph2iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+# INTEL: vcvttph2iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+0x62,0xe5,0x7c,0xdf,0x6a,0x72,0x80
+
+# ATT: vcvttps2ibs %xmm23, %xmm22
+# INTEL: vcvttps2ibs xmm22, xmm23
+0x62,0xa5,0x7d,0x08,0x68,0xf7
+
+# ATT: vcvttps2ibs %xmm23, %xmm22 {%k7}
+# INTEL: vcvttps2ibs xmm22 {k7}, xmm23
+0x62,0xa5,0x7d,0x0f,0x68,0xf7
+
+# ATT: vcvttps2ibs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvttps2ibs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7d,0x8f,0x68,0xf7
+
+# ATT: vcvttps2ibs %zmm23, %zmm22
+# INTEL: vcvttps2ibs zmm22, zmm23
+0x62,0xa5,0x7d,0x48,0x68,0xf7
+
+# ATT: vcvttps2ibs {sae}, %zmm23, %zmm22
+# INTEL: vcvttps2ibs zmm22, zmm23, {sae}
+0x62,0xa5,0x7d,0x18,0x68,0xf7
+
+# ATT: vcvttps2ibs %zmm23, %zmm22 {%k7}
+# INTEL: vcvttps2ibs zmm22 {k7}, zmm23
+0x62,0xa5,0x7d,0x4f,0x68,0xf7
+
+# ATT: vcvttps2ibs {sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvttps2ibs zmm22 {k7} {z}, zmm23, {sae}
+0x62,0xa5,0x7d,0x9f,0x68,0xf7
+
+# ATT: vcvttps2ibs %ymm23, %ymm22
+# INTEL: vcvttps2ibs ymm22, ymm23
+0x62,0xa5,0x7d,0x28,0x68,0xf7
+
+# ATT: vcvttps2ibs {sae}, %ymm23, %ymm22
+# INTEL: vcvttps2ibs ymm22, ymm23, {sae}
+0x62,0xa5,0x79,0x18,0x68,0xf7
+
+# ATT: vcvttps2ibs %ymm23, %ymm22 {%k7}
+# INTEL: vcvttps2ibs ymm22 {k7}, ymm23
+0x62,0xa5,0x7d,0x2f,0x68,0xf7
+
+# ATT: vcvttps2ibs {sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvttps2ibs ymm22 {k7} {z}, ymm23, {sae}
+0x62,0xa5,0x79,0x9f,0x68,0xf7
+
+# ATT: vcvttps2ibs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvttps2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvttps2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2ibs (%rip){1to4}, %xmm22
+# INTEL: vcvttps2ibs xmm22, dword ptr [rip]{1to4}
+0x62,0xe5,0x7d,0x18,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttps2ibs -512(,%rbp,2), %xmm22
+# INTEL: vcvttps2ibs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7d,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttps2ibs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvttps2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7d,0x8f,0x68,0x71,0x7f
+
+# ATT: vcvttps2ibs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+# INTEL: vcvttps2ibs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+0x62,0xe5,0x7d,0x9f,0x68,0x72,0x80
+
+# ATT: vcvttps2ibs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvttps2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvttps2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2ibs (%rip){1to8}, %ymm22
+# INTEL: vcvttps2ibs ymm22, dword ptr [rip]{1to8}
+0x62,0xe5,0x7d,0x38,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttps2ibs -1024(,%rbp,2), %ymm22
+# INTEL: vcvttps2ibs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7d,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttps2ibs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvttps2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7d,0xaf,0x68,0x71,0x7f
+
+# ATT: vcvttps2ibs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+# INTEL: vcvttps2ibs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+0x62,0xe5,0x7d,0xbf,0x68,0x72,0x80
+
+# ATT: vcvttps2ibs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvttps2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvttps2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2ibs (%rip){1to16}, %zmm22
+# INTEL: vcvttps2ibs zmm22, dword ptr [rip]{1to16}
+0x62,0xe5,0x7d,0x58,0x68,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttps2ibs -2048(,%rbp,2), %zmm22
+# INTEL: vcvttps2ibs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7d,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttps2ibs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvttps2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7d,0xcf,0x68,0x71,0x7f
+
+# ATT: vcvttps2ibs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+# INTEL: vcvttps2ibs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+0x62,0xe5,0x7d,0xdf,0x68,0x72,0x80
+
+# ATT: vcvttps2iubs %xmm23, %xmm22
+# INTEL: vcvttps2iubs xmm22, xmm23
+0x62,0xa5,0x7d,0x08,0x6a,0xf7
+
+# ATT: vcvttps2iubs %xmm23, %xmm22 {%k7}
+# INTEL: vcvttps2iubs xmm22 {k7}, xmm23
+0x62,0xa5,0x7d,0x0f,0x6a,0xf7
+
+# ATT: vcvttps2iubs %xmm23, %xmm22 {%k7} {z}
+# INTEL: vcvttps2iubs xmm22 {k7} {z}, xmm23
+0x62,0xa5,0x7d,0x8f,0x6a,0xf7
+
+# ATT: vcvttps2iubs %zmm23, %zmm22
+# INTEL: vcvttps2iubs zmm22, zmm23
+0x62,0xa5,0x7d,0x48,0x6a,0xf7
+
+# ATT: vcvttps2iubs {sae}, %zmm23, %zmm22
+# INTEL: vcvttps2iubs zmm22, zmm23, {sae}
+0x62,0xa5,0x7d,0x18,0x6a,0xf7
+
+# ATT: vcvttps2iubs %zmm23, %zmm22 {%k7}
+# INTEL: vcvttps2iubs zmm22 {k7}, zmm23
+0x62,0xa5,0x7d,0x4f,0x6a,0xf7
+
+# ATT: vcvttps2iubs {sae}, %zmm23, %zmm22 {%k7} {z}
+# INTEL: vcvttps2iubs zmm22 {k7} {z}, zmm23, {sae}
+0x62,0xa5,0x7d,0x9f,0x6a,0xf7
+
+# ATT: vcvttps2iubs %ymm23, %ymm22
+# INTEL: vcvttps2iubs ymm22, ymm23
+0x62,0xa5,0x7d,0x28,0x6a,0xf7
+
+# ATT: vcvttps2iubs {sae}, %ymm23, %ymm22
+# INTEL: vcvttps2iubs ymm22, ymm23, {sae}
+0x62,0xa5,0x79,0x18,0x6a,0xf7
+
+# ATT: vcvttps2iubs %ymm23, %ymm22 {%k7}
+# INTEL: vcvttps2iubs ymm22 {k7}, ymm23
+0x62,0xa5,0x7d,0x2f,0x6a,0xf7
+
+# ATT: vcvttps2iubs {sae}, %ymm23, %ymm22 {%k7} {z}
+# INTEL: vcvttps2iubs ymm22 {k7} {z}, ymm23, {sae}
+0x62,0xa5,0x79,0x9f,0x6a,0xf7
+
+# ATT: vcvttps2iubs 268435456(%rbp,%r14,8), %xmm22
+# INTEL: vcvttps2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+# INTEL: vcvttps2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2iubs (%rip){1to4}, %xmm22
+# INTEL: vcvttps2iubs xmm22, dword ptr [rip]{1to4}
+0x62,0xe5,0x7d,0x18,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttps2iubs -512(,%rbp,2), %xmm22
+# INTEL: vcvttps2iubs xmm22, xmmword ptr [2*rbp - 512]
+0x62,0xe5,0x7d,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff
+
+# ATT: vcvttps2iubs 2032(%rcx), %xmm22 {%k7} {z}
+# INTEL: vcvttps2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+0x62,0xe5,0x7d,0x8f,0x6a,0x71,0x7f
+
+# ATT: vcvttps2iubs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+# INTEL: vcvttps2iubs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+0x62,0xe5,0x7d,0x9f,0x6a,0x72,0x80
+
+# ATT: vcvttps2iubs 268435456(%rbp,%r14,8), %ymm22
+# INTEL: vcvttps2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+# INTEL: vcvttps2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2iubs (%rip){1to8}, %ymm22
+# INTEL: vcvttps2iubs ymm22, dword ptr [rip]{1to8}
+0x62,0xe5,0x7d,0x38,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttps2iubs -1024(,%rbp,2), %ymm22
+# INTEL: vcvttps2iubs ymm22, ymmword ptr [2*rbp - 1024]
+0x62,0xe5,0x7d,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff
+
+# ATT: vcvttps2iubs 4064(%rcx), %ymm22 {%k7} {z}
+# INTEL: vcvttps2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+0x62,0xe5,0x7d,0xaf,0x6a,0x71,0x7f
+
+# ATT: vcvttps2iubs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+# INTEL: vcvttps2iubs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+0x62,0xe5,0x7d,0xbf,0x6a,0x72,0x80
+
+# ATT: vcvttps2iubs 268435456(%rbp,%r14,8), %zmm22
+# INTEL: vcvttps2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+0x62,0xa5,0x7d,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10
+
+# ATT: vcvttps2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+# INTEL: vcvttps2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+0x62,0xc5,0x7d,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00
+
+# ATT: vcvttps2iubs (%rip){1to16}, %zmm22
+# INTEL: vcvttps2iubs zmm22, dword ptr [rip]{1to16}
+0x62,0xe5,0x7d,0x58,0x6a,0x35,0x00,0x00,0x00,0x00
+
+# ATT: vcvttps2iubs -2048(,%rbp,2), %zmm22
+# INTEL: vcvttps2iubs zmm22, zmmword ptr [2*rbp - 2048]
+0x62,0xe5,0x7d,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff
+
+# ATT: vcvttps2iubs 8128(%rcx), %zmm22 {%k7} {z}
+# INTEL: vcvttps2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+0x62,0xe5,0x7d,0xcf,0x6a,0x71,0x7f
+
+# ATT: vcvttps2iubs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+# INTEL: vcvttps2iubs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+0x62,0xe5,0x7d,0xdf,0x6a,0x72,0x80
+
diff --git a/llvm/test/MC/X86/avx10.2satcvt-32-att.s b/llvm/test/MC/X86/avx10.2satcvt-32-att.s
new file mode 100644
index 0000000..b69b850
--- /dev/null
+++ b/llvm/test/MC/X86/avx10.2satcvt-32-att.s
@@ -0,0 +1,1362 @@
+// RUN: llvm-mc -triple i386 --show-encoding %s | FileCheck %s
+
+// CHECK: vcvtnebf162ibs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x69,0xd3]
+ vcvtnebf162ibs %xmm3, %xmm2
+
+// CHECK: vcvtnebf162ibs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x69,0xd3]
+ vcvtnebf162ibs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvtnebf162ibs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x69,0xd3]
+ vcvtnebf162ibs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x69,0xd3]
+ vcvtnebf162ibs %zmm3, %zmm2
+
+// CHECK: vcvtnebf162ibs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x69,0xd3]
+ vcvtnebf162ibs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvtnebf162ibs %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x69,0xd3]
+ vcvtnebf162ibs %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x69,0xd3]
+ vcvtnebf162ibs %ymm3, %ymm2
+
+// CHECK: vcvtnebf162ibs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x69,0xd3]
+ vcvtnebf162ibs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvtnebf162ibs %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x69,0xd3]
+ vcvtnebf162ibs %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvtnebf162ibs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvtnebf162ibs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x69,0x10]
+ vcvtnebf162ibs (%eax){1to8}, %xmm2
+
+// CHECK: vcvtnebf162ibs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162ibs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvtnebf162ibs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x69,0x51,0x7f]
+ vcvtnebf162ibs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x69,0x52,0x80]
+ vcvtnebf162ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvtnebf162ibs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvtnebf162ibs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x69,0x10]
+ vcvtnebf162ibs (%eax){1to16}, %ymm2
+
+// CHECK: vcvtnebf162ibs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162ibs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvtnebf162ibs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x69,0x51,0x7f]
+ vcvtnebf162ibs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x69,0x52,0x80]
+ vcvtnebf162ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvtnebf162ibs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvtnebf162ibs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x69,0x10]
+ vcvtnebf162ibs (%eax){1to32}, %zmm2
+
+// CHECK: vcvtnebf162ibs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162ibs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvtnebf162ibs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x69,0x51,0x7f]
+ vcvtnebf162ibs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x69,0x52,0x80]
+ vcvtnebf162ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6b,0xd3]
+ vcvtnebf162iubs %xmm3, %xmm2
+
+// CHECK: vcvtnebf162iubs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6b,0xd3]
+ vcvtnebf162iubs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvtnebf162iubs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6b,0xd3]
+ vcvtnebf162iubs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6b,0xd3]
+ vcvtnebf162iubs %zmm3, %zmm2
+
+// CHECK: vcvtnebf162iubs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6b,0xd3]
+ vcvtnebf162iubs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvtnebf162iubs %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6b,0xd3]
+ vcvtnebf162iubs %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6b,0xd3]
+ vcvtnebf162iubs %ymm3, %ymm2
+
+// CHECK: vcvtnebf162iubs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6b,0xd3]
+ vcvtnebf162iubs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvtnebf162iubs %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6b,0xd3]
+ vcvtnebf162iubs %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvtnebf162iubs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvtnebf162iubs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6b,0x10]
+ vcvtnebf162iubs (%eax){1to8}, %xmm2
+
+// CHECK: vcvtnebf162iubs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162iubs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvtnebf162iubs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6b,0x51,0x7f]
+ vcvtnebf162iubs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x6b,0x52,0x80]
+ vcvtnebf162iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvtnebf162iubs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvtnebf162iubs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x6b,0x10]
+ vcvtnebf162iubs (%eax){1to16}, %ymm2
+
+// CHECK: vcvtnebf162iubs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162iubs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvtnebf162iubs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6b,0x51,0x7f]
+ vcvtnebf162iubs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x6b,0x52,0x80]
+ vcvtnebf162iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvtnebf162iubs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvtnebf162iubs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x6b,0x10]
+ vcvtnebf162iubs (%eax){1to32}, %zmm2
+
+// CHECK: vcvtnebf162iubs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162iubs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvtnebf162iubs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6b,0x51,0x7f]
+ vcvtnebf162iubs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x6b,0x52,0x80]
+ vcvtnebf162iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x69,0xd3]
+ vcvtph2ibs %xmm3, %xmm2
+
+// CHECK: vcvtph2ibs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x69,0xd3]
+ vcvtph2ibs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvtph2ibs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x69,0xd3]
+ vcvtph2ibs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x69,0xd3]
+ vcvtph2ibs %zmm3, %zmm2
+
+// CHECK: vcvtph2ibs {rn-sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x69,0xd3]
+ vcvtph2ibs {rn-sae}, %zmm3, %zmm2
+
+// CHECK: vcvtph2ibs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x69,0xd3]
+ vcvtph2ibs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvtph2ibs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xff,0x69,0xd3]
+ vcvtph2ibs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x69,0xd3]
+ vcvtph2ibs %ymm3, %ymm2
+
+// CHECK: vcvtph2ibs {rn-sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x69,0xd3]
+ vcvtph2ibs {rn-sae}, %ymm3, %ymm2
+
+// CHECK: vcvtph2ibs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x69,0xd3]
+ vcvtph2ibs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvtph2ibs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x78,0xff,0x69,0xd3]
+ vcvtph2ibs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2ibs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvtph2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvtph2ibs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x69,0x10]
+ vcvtph2ibs (%eax){1to8}, %xmm2
+
+// CHECK: vcvtph2ibs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2ibs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvtph2ibs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x69,0x51,0x7f]
+ vcvtph2ibs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x69,0x52,0x80]
+ vcvtph2ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2ibs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvtph2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvtph2ibs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x69,0x10]
+ vcvtph2ibs (%eax){1to16}, %ymm2
+
+// CHECK: vcvtph2ibs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2ibs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvtph2ibs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x69,0x51,0x7f]
+ vcvtph2ibs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x69,0x52,0x80]
+ vcvtph2ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2ibs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvtph2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvtph2ibs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x69,0x10]
+ vcvtph2ibs (%eax){1to32}, %zmm2
+
+// CHECK: vcvtph2ibs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2ibs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvtph2ibs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x69,0x51,0x7f]
+ vcvtph2ibs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvtph2ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x69,0x52,0x80]
+ vcvtph2ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6b,0xd3]
+ vcvtph2iubs %xmm3, %xmm2
+
+// CHECK: vcvtph2iubs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6b,0xd3]
+ vcvtph2iubs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvtph2iubs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6b,0xd3]
+ vcvtph2iubs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6b,0xd3]
+ vcvtph2iubs %zmm3, %zmm2
+
+// CHECK: vcvtph2iubs {rn-sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6b,0xd3]
+ vcvtph2iubs {rn-sae}, %zmm3, %zmm2
+
+// CHECK: vcvtph2iubs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6b,0xd3]
+ vcvtph2iubs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvtph2iubs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xff,0x6b,0xd3]
+ vcvtph2iubs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6b,0xd3]
+ vcvtph2iubs %ymm3, %ymm2
+
+// CHECK: vcvtph2iubs {rn-sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6b,0xd3]
+ vcvtph2iubs {rn-sae}, %ymm3, %ymm2
+
+// CHECK: vcvtph2iubs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6b,0xd3]
+ vcvtph2iubs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvtph2iubs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x78,0xff,0x6b,0xd3]
+ vcvtph2iubs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2iubs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvtph2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvtph2iubs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6b,0x10]
+ vcvtph2iubs (%eax){1to8}, %xmm2
+
+// CHECK: vcvtph2iubs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2iubs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvtph2iubs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6b,0x51,0x7f]
+ vcvtph2iubs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6b,0x52,0x80]
+ vcvtph2iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2iubs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvtph2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvtph2iubs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6b,0x10]
+ vcvtph2iubs (%eax){1to16}, %ymm2
+
+// CHECK: vcvtph2iubs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2iubs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvtph2iubs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6b,0x51,0x7f]
+ vcvtph2iubs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6b,0x52,0x80]
+ vcvtph2iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2iubs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvtph2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvtph2iubs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6b,0x10]
+ vcvtph2iubs (%eax){1to32}, %zmm2
+
+// CHECK: vcvtph2iubs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2iubs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvtph2iubs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6b,0x51,0x7f]
+ vcvtph2iubs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvtph2iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6b,0x52,0x80]
+ vcvtph2iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x69,0xd3]
+ vcvtps2ibs %xmm3, %xmm2
+
+// CHECK: vcvtps2ibs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x69,0xd3]
+ vcvtps2ibs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvtps2ibs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x69,0xd3]
+ vcvtps2ibs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x69,0xd3]
+ vcvtps2ibs %zmm3, %zmm2
+
+// CHECK: vcvtps2ibs {rn-sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x69,0xd3]
+ vcvtps2ibs {rn-sae}, %zmm3, %zmm2
+
+// CHECK: vcvtps2ibs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x69,0xd3]
+ vcvtps2ibs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvtps2ibs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xff,0x69,0xd3]
+ vcvtps2ibs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x69,0xd3]
+ vcvtps2ibs %ymm3, %ymm2
+
+// CHECK: vcvtps2ibs {rn-sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x69,0xd3]
+ vcvtps2ibs {rn-sae}, %ymm3, %ymm2
+
+// CHECK: vcvtps2ibs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x69,0xd3]
+ vcvtps2ibs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvtps2ibs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x79,0xff,0x69,0xd3]
+ vcvtps2ibs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2ibs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvtps2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvtps2ibs (%eax){1to4}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x69,0x10]
+ vcvtps2ibs (%eax){1to4}, %xmm2
+
+// CHECK: vcvtps2ibs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2ibs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvtps2ibs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x69,0x51,0x7f]
+ vcvtps2ibs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs -512(%edx){1to4}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x69,0x52,0x80]
+ vcvtps2ibs -512(%edx){1to4}, %xmm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2ibs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvtps2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvtps2ibs (%eax){1to8}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x69,0x10]
+ vcvtps2ibs (%eax){1to8}, %ymm2
+
+// CHECK: vcvtps2ibs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2ibs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvtps2ibs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x69,0x51,0x7f]
+ vcvtps2ibs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs -512(%edx){1to8}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x69,0x52,0x80]
+ vcvtps2ibs -512(%edx){1to8}, %ymm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2ibs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvtps2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvtps2ibs (%eax){1to16}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x69,0x10]
+ vcvtps2ibs (%eax){1to16}, %zmm2
+
+// CHECK: vcvtps2ibs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2ibs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvtps2ibs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x69,0x51,0x7f]
+ vcvtps2ibs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvtps2ibs -512(%edx){1to16}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x69,0x52,0x80]
+ vcvtps2ibs -512(%edx){1to16}, %zmm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6b,0xd3]
+ vcvtps2iubs %xmm3, %xmm2
+
+// CHECK: vcvtps2iubs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6b,0xd3]
+ vcvtps2iubs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvtps2iubs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6b,0xd3]
+ vcvtps2iubs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6b,0xd3]
+ vcvtps2iubs %zmm3, %zmm2
+
+// CHECK: vcvtps2iubs {rn-sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6b,0xd3]
+ vcvtps2iubs {rn-sae}, %zmm3, %zmm2
+
+// CHECK: vcvtps2iubs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6b,0xd3]
+ vcvtps2iubs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvtps2iubs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xff,0x6b,0xd3]
+ vcvtps2iubs {rz-sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6b,0xd3]
+ vcvtps2iubs %ymm3, %ymm2
+
+// CHECK: vcvtps2iubs {rn-sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6b,0xd3]
+ vcvtps2iubs {rn-sae}, %ymm3, %ymm2
+
+// CHECK: vcvtps2iubs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6b,0xd3]
+ vcvtps2iubs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvtps2iubs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x79,0xff,0x6b,0xd3]
+ vcvtps2iubs {rz-sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2iubs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvtps2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvtps2iubs (%eax){1to4}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6b,0x10]
+ vcvtps2iubs (%eax){1to4}, %xmm2
+
+// CHECK: vcvtps2iubs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2iubs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvtps2iubs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6b,0x51,0x7f]
+ vcvtps2iubs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs -512(%edx){1to4}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6b,0x52,0x80]
+ vcvtps2iubs -512(%edx){1to4}, %xmm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2iubs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvtps2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvtps2iubs (%eax){1to8}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6b,0x10]
+ vcvtps2iubs (%eax){1to8}, %ymm2
+
+// CHECK: vcvtps2iubs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2iubs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvtps2iubs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6b,0x51,0x7f]
+ vcvtps2iubs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs -512(%edx){1to8}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6b,0x52,0x80]
+ vcvtps2iubs -512(%edx){1to8}, %ymm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2iubs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvtps2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvtps2iubs (%eax){1to16}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6b,0x10]
+ vcvtps2iubs (%eax){1to16}, %zmm2
+
+// CHECK: vcvtps2iubs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2iubs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvtps2iubs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6b,0x51,0x7f]
+ vcvtps2iubs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvtps2iubs -512(%edx){1to16}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6b,0x52,0x80]
+ vcvtps2iubs -512(%edx){1to16}, %zmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x68,0xd3]
+ vcvttnebf162ibs %xmm3, %xmm2
+
+// CHECK: vcvttnebf162ibs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x68,0xd3]
+ vcvttnebf162ibs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvttnebf162ibs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x68,0xd3]
+ vcvttnebf162ibs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x68,0xd3]
+ vcvttnebf162ibs %zmm3, %zmm2
+
+// CHECK: vcvttnebf162ibs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x68,0xd3]
+ vcvttnebf162ibs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvttnebf162ibs %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x68,0xd3]
+ vcvttnebf162ibs %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x68,0xd3]
+ vcvttnebf162ibs %ymm3, %ymm2
+
+// CHECK: vcvttnebf162ibs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x68,0xd3]
+ vcvttnebf162ibs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvttnebf162ibs %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x68,0xd3]
+ vcvttnebf162ibs %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvttnebf162ibs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvttnebf162ibs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x68,0x10]
+ vcvttnebf162ibs (%eax){1to8}, %xmm2
+
+// CHECK: vcvttnebf162ibs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162ibs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvttnebf162ibs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x68,0x51,0x7f]
+ vcvttnebf162ibs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x68,0x52,0x80]
+ vcvttnebf162ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvttnebf162ibs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvttnebf162ibs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x68,0x10]
+ vcvttnebf162ibs (%eax){1to16}, %ymm2
+
+// CHECK: vcvttnebf162ibs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162ibs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvttnebf162ibs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x68,0x51,0x7f]
+ vcvttnebf162ibs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x68,0x52,0x80]
+ vcvttnebf162ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvttnebf162ibs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvttnebf162ibs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x68,0x10]
+ vcvttnebf162ibs (%eax){1to32}, %zmm2
+
+// CHECK: vcvttnebf162ibs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162ibs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvttnebf162ibs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x68,0x51,0x7f]
+ vcvttnebf162ibs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x68,0x52,0x80]
+ vcvttnebf162ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6a,0xd3]
+ vcvttnebf162iubs %xmm3, %xmm2
+
+// CHECK: vcvttnebf162iubs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6a,0xd3]
+ vcvttnebf162iubs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvttnebf162iubs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6a,0xd3]
+ vcvttnebf162iubs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6a,0xd3]
+ vcvttnebf162iubs %zmm3, %zmm2
+
+// CHECK: vcvttnebf162iubs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6a,0xd3]
+ vcvttnebf162iubs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvttnebf162iubs %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6a,0xd3]
+ vcvttnebf162iubs %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6a,0xd3]
+ vcvttnebf162iubs %ymm3, %ymm2
+
+// CHECK: vcvttnebf162iubs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6a,0xd3]
+ vcvttnebf162iubs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvttnebf162iubs %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6a,0xd3]
+ vcvttnebf162iubs %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvttnebf162iubs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvttnebf162iubs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6a,0x10]
+ vcvttnebf162iubs (%eax){1to8}, %xmm2
+
+// CHECK: vcvttnebf162iubs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162iubs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvttnebf162iubs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6a,0x51,0x7f]
+ vcvttnebf162iubs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x6a,0x52,0x80]
+ vcvttnebf162iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvttnebf162iubs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvttnebf162iubs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x6a,0x10]
+ vcvttnebf162iubs (%eax){1to16}, %ymm2
+
+// CHECK: vcvttnebf162iubs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162iubs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvttnebf162iubs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6a,0x51,0x7f]
+ vcvttnebf162iubs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x6a,0x52,0x80]
+ vcvttnebf162iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvttnebf162iubs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvttnebf162iubs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x6a,0x10]
+ vcvttnebf162iubs (%eax){1to32}, %zmm2
+
+// CHECK: vcvttnebf162iubs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162iubs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvttnebf162iubs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6a,0x51,0x7f]
+ vcvttnebf162iubs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x6a,0x52,0x80]
+ vcvttnebf162iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x68,0xd3]
+ vcvttph2ibs %xmm3, %xmm2
+
+// CHECK: vcvttph2ibs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x68,0xd3]
+ vcvttph2ibs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvttph2ibs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x68,0xd3]
+ vcvttph2ibs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x68,0xd3]
+ vcvttph2ibs %zmm3, %zmm2
+
+// CHECK: vcvttph2ibs {sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x68,0xd3]
+ vcvttph2ibs {sae}, %zmm3, %zmm2
+
+// CHECK: vcvttph2ibs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x68,0xd3]
+ vcvttph2ibs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvttph2ibs {sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x68,0xd3]
+ vcvttph2ibs {sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x68,0xd3]
+ vcvttph2ibs %ymm3, %ymm2
+
+// CHECK: vcvttph2ibs {sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x68,0xd3]
+ vcvttph2ibs {sae}, %ymm3, %ymm2
+
+// CHECK: vcvttph2ibs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x68,0xd3]
+ vcvttph2ibs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvttph2ibs {sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x68,0xd3]
+ vcvttph2ibs {sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2ibs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvttph2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvttph2ibs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x68,0x10]
+ vcvttph2ibs (%eax){1to8}, %xmm2
+
+// CHECK: vcvttph2ibs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2ibs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvttph2ibs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x68,0x51,0x7f]
+ vcvttph2ibs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x68,0x52,0x80]
+ vcvttph2ibs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2ibs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvttph2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvttph2ibs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x68,0x10]
+ vcvttph2ibs (%eax){1to16}, %ymm2
+
+// CHECK: vcvttph2ibs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2ibs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvttph2ibs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x68,0x51,0x7f]
+ vcvttph2ibs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x68,0x52,0x80]
+ vcvttph2ibs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2ibs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvttph2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvttph2ibs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x68,0x10]
+ vcvttph2ibs (%eax){1to32}, %zmm2
+
+// CHECK: vcvttph2ibs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2ibs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvttph2ibs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x68,0x51,0x7f]
+ vcvttph2ibs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvttph2ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x68,0x52,0x80]
+ vcvttph2ibs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6a,0xd3]
+ vcvttph2iubs %xmm3, %xmm2
+
+// CHECK: vcvttph2iubs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6a,0xd3]
+ vcvttph2iubs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvttph2iubs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6a,0xd3]
+ vcvttph2iubs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6a,0xd3]
+ vcvttph2iubs %zmm3, %zmm2
+
+// CHECK: vcvttph2iubs {sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6a,0xd3]
+ vcvttph2iubs {sae}, %zmm3, %zmm2
+
+// CHECK: vcvttph2iubs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6a,0xd3]
+ vcvttph2iubs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvttph2iubs {sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6a,0xd3]
+ vcvttph2iubs {sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6a,0xd3]
+ vcvttph2iubs %ymm3, %ymm2
+
+// CHECK: vcvttph2iubs {sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6a,0xd3]
+ vcvttph2iubs {sae}, %ymm3, %ymm2
+
+// CHECK: vcvttph2iubs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6a,0xd3]
+ vcvttph2iubs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvttph2iubs {sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x6a,0xd3]
+ vcvttph2iubs {sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2iubs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvttph2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvttph2iubs (%eax){1to8}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6a,0x10]
+ vcvttph2iubs (%eax){1to8}, %xmm2
+
+// CHECK: vcvttph2iubs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2iubs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvttph2iubs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6a,0x51,0x7f]
+ vcvttph2iubs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6a,0x52,0x80]
+ vcvttph2iubs -256(%edx){1to8}, %xmm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2iubs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvttph2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvttph2iubs (%eax){1to16}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6a,0x10]
+ vcvttph2iubs (%eax){1to16}, %ymm2
+
+// CHECK: vcvttph2iubs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2iubs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvttph2iubs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6a,0x51,0x7f]
+ vcvttph2iubs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6a,0x52,0x80]
+ vcvttph2iubs -256(%edx){1to16}, %ymm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2iubs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvttph2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvttph2iubs (%eax){1to32}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6a,0x10]
+ vcvttph2iubs (%eax){1to32}, %zmm2
+
+// CHECK: vcvttph2iubs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2iubs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvttph2iubs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6a,0x51,0x7f]
+ vcvttph2iubs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvttph2iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6a,0x52,0x80]
+ vcvttph2iubs -256(%edx){1to32}, %zmm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x68,0xd3]
+ vcvttps2ibs %xmm3, %xmm2
+
+// CHECK: vcvttps2ibs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x68,0xd3]
+ vcvttps2ibs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvttps2ibs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x68,0xd3]
+ vcvttps2ibs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x68,0xd3]
+ vcvttps2ibs %zmm3, %zmm2
+
+// CHECK: vcvttps2ibs {sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x68,0xd3]
+ vcvttps2ibs {sae}, %zmm3, %zmm2
+
+// CHECK: vcvttps2ibs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x68,0xd3]
+ vcvttps2ibs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvttps2ibs {sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x68,0xd3]
+ vcvttps2ibs {sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x68,0xd3]
+ vcvttps2ibs %ymm3, %ymm2
+
+// CHECK: vcvttps2ibs {sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x68,0xd3]
+ vcvttps2ibs {sae}, %ymm3, %ymm2
+
+// CHECK: vcvttps2ibs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x68,0xd3]
+ vcvttps2ibs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvttps2ibs {sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x68,0xd3]
+ vcvttps2ibs {sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2ibs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvttps2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2ibs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvttps2ibs (%eax){1to4}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x68,0x10]
+ vcvttps2ibs (%eax){1to4}, %xmm2
+
+// CHECK: vcvttps2ibs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2ibs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvttps2ibs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x68,0x51,0x7f]
+ vcvttps2ibs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs -512(%edx){1to4}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x68,0x52,0x80]
+ vcvttps2ibs -512(%edx){1to4}, %xmm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2ibs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvttps2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2ibs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvttps2ibs (%eax){1to8}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x68,0x10]
+ vcvttps2ibs (%eax){1to8}, %ymm2
+
+// CHECK: vcvttps2ibs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2ibs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvttps2ibs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x68,0x51,0x7f]
+ vcvttps2ibs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs -512(%edx){1to8}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x68,0x52,0x80]
+ vcvttps2ibs -512(%edx){1to8}, %ymm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2ibs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvttps2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2ibs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvttps2ibs (%eax){1to16}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x68,0x10]
+ vcvttps2ibs (%eax){1to16}, %zmm2
+
+// CHECK: vcvttps2ibs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2ibs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvttps2ibs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x68,0x51,0x7f]
+ vcvttps2ibs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvttps2ibs -512(%edx){1to16}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x68,0x52,0x80]
+ vcvttps2ibs -512(%edx){1to16}, %zmm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs %xmm3, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6a,0xd3]
+ vcvttps2iubs %xmm3, %xmm2
+
+// CHECK: vcvttps2iubs %xmm3, %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6a,0xd3]
+ vcvttps2iubs %xmm3, %xmm2 {%k7}
+
+// CHECK: vcvttps2iubs %xmm3, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6a,0xd3]
+ vcvttps2iubs %xmm3, %xmm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6a,0xd3]
+ vcvttps2iubs %zmm3, %zmm2
+
+// CHECK: vcvttps2iubs {sae}, %zmm3, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6a,0xd3]
+ vcvttps2iubs {sae}, %zmm3, %zmm2
+
+// CHECK: vcvttps2iubs %zmm3, %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6a,0xd3]
+ vcvttps2iubs %zmm3, %zmm2 {%k7}
+
+// CHECK: vcvttps2iubs {sae}, %zmm3, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6a,0xd3]
+ vcvttps2iubs {sae}, %zmm3, %zmm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6a,0xd3]
+ vcvttps2iubs %ymm3, %ymm2
+
+// CHECK: vcvttps2iubs {sae}, %ymm3, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6a,0xd3]
+ vcvttps2iubs {sae}, %ymm3, %ymm2
+
+// CHECK: vcvttps2iubs %ymm3, %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6a,0xd3]
+ vcvttps2iubs %ymm3, %ymm2 {%k7}
+
+// CHECK: vcvttps2iubs {sae}, %ymm3, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x6a,0xd3]
+ vcvttps2iubs {sae}, %ymm3, %ymm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs 268435456(%esp,%esi,8), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2iubs 268435456(%esp,%esi,8), %xmm2
+
+// CHECK: vcvttps2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2iubs 291(%edi,%eax,4), %xmm2 {%k7}
+
+// CHECK: vcvttps2iubs (%eax){1to4}, %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6a,0x10]
+ vcvttps2iubs (%eax){1to4}, %xmm2
+
+// CHECK: vcvttps2iubs -512(,%ebp,2), %xmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2iubs -512(,%ebp,2), %xmm2
+
+// CHECK: vcvttps2iubs 2032(%ecx), %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6a,0x51,0x7f]
+ vcvttps2iubs 2032(%ecx), %xmm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs -512(%edx){1to4}, %xmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6a,0x52,0x80]
+ vcvttps2iubs -512(%edx){1to4}, %xmm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs 268435456(%esp,%esi,8), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2iubs 268435456(%esp,%esi,8), %ymm2
+
+// CHECK: vcvttps2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2iubs 291(%edi,%eax,4), %ymm2 {%k7}
+
+// CHECK: vcvttps2iubs (%eax){1to8}, %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6a,0x10]
+ vcvttps2iubs (%eax){1to8}, %ymm2
+
+// CHECK: vcvttps2iubs -1024(,%ebp,2), %ymm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2iubs -1024(,%ebp,2), %ymm2
+
+// CHECK: vcvttps2iubs 4064(%ecx), %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6a,0x51,0x7f]
+ vcvttps2iubs 4064(%ecx), %ymm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs -512(%edx){1to8}, %ymm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6a,0x52,0x80]
+ vcvttps2iubs -512(%edx){1to8}, %ymm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs 268435456(%esp,%esi,8), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2iubs 268435456(%esp,%esi,8), %zmm2
+
+// CHECK: vcvttps2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2iubs 291(%edi,%eax,4), %zmm2 {%k7}
+
+// CHECK: vcvttps2iubs (%eax){1to16}, %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6a,0x10]
+ vcvttps2iubs (%eax){1to16}, %zmm2
+
+// CHECK: vcvttps2iubs -2048(,%ebp,2), %zmm2
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2iubs -2048(,%ebp,2), %zmm2
+
+// CHECK: vcvttps2iubs 8128(%ecx), %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6a,0x51,0x7f]
+ vcvttps2iubs 8128(%ecx), %zmm2 {%k7} {z}
+
+// CHECK: vcvttps2iubs -512(%edx){1to16}, %zmm2 {%k7} {z}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6a,0x52,0x80]
+ vcvttps2iubs -512(%edx){1to16}, %zmm2 {%k7} {z}
+
diff --git a/llvm/test/MC/X86/avx10.2satcvt-32-intel.s b/llvm/test/MC/X86/avx10.2satcvt-32-intel.s
new file mode 100644
index 0000000..4c22544
--- /dev/null
+++ b/llvm/test/MC/X86/avx10.2satcvt-32-intel.s
@@ -0,0 +1,1362 @@
+// RUN: llvm-mc -triple i386 -x86-asm-syntax=intel -output-asm-variant=1 --show-encoding %s | FileCheck %s
+
+// CHECK: vcvtnebf162ibs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x69,0xd3]
+ vcvtnebf162ibs xmm2, xmm3
+
+// CHECK: vcvtnebf162ibs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x69,0xd3]
+ vcvtnebf162ibs xmm2 {k7}, xmm3
+
+// CHECK: vcvtnebf162ibs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x69,0xd3]
+ vcvtnebf162ibs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvtnebf162ibs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x69,0xd3]
+ vcvtnebf162ibs zmm2, zmm3
+
+// CHECK: vcvtnebf162ibs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x69,0xd3]
+ vcvtnebf162ibs zmm2 {k7}, zmm3
+
+// CHECK: vcvtnebf162ibs zmm2 {k7} {z}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x69,0xd3]
+ vcvtnebf162ibs zmm2 {k7} {z}, zmm3
+
+// CHECK: vcvtnebf162ibs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x69,0xd3]
+ vcvtnebf162ibs ymm2, ymm3
+
+// CHECK: vcvtnebf162ibs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x69,0xd3]
+ vcvtnebf162ibs ymm2 {k7}, ymm3
+
+// CHECK: vcvtnebf162ibs ymm2 {k7} {z}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x69,0xd3]
+ vcvtnebf162ibs ymm2 {k7} {z}, ymm3
+
+// CHECK: vcvtnebf162ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtnebf162ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtnebf162ibs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x69,0x10]
+ vcvtnebf162ibs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvtnebf162ibs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162ibs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvtnebf162ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x69,0x51,0x7f]
+ vcvtnebf162ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvtnebf162ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x69,0x52,0x80]
+ vcvtnebf162ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvtnebf162ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtnebf162ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtnebf162ibs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x69,0x10]
+ vcvtnebf162ibs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvtnebf162ibs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162ibs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvtnebf162ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x69,0x51,0x7f]
+ vcvtnebf162ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvtnebf162ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x69,0x52,0x80]
+ vcvtnebf162ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvtnebf162ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtnebf162ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtnebf162ibs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x69,0x10]
+ vcvtnebf162ibs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvtnebf162ibs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162ibs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvtnebf162ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x69,0x51,0x7f]
+ vcvtnebf162ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvtnebf162ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x69,0x52,0x80]
+ vcvtnebf162ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvtnebf162iubs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6b,0xd3]
+ vcvtnebf162iubs xmm2, xmm3
+
+// CHECK: vcvtnebf162iubs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6b,0xd3]
+ vcvtnebf162iubs xmm2 {k7}, xmm3
+
+// CHECK: vcvtnebf162iubs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6b,0xd3]
+ vcvtnebf162iubs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvtnebf162iubs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6b,0xd3]
+ vcvtnebf162iubs zmm2, zmm3
+
+// CHECK: vcvtnebf162iubs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6b,0xd3]
+ vcvtnebf162iubs zmm2 {k7}, zmm3
+
+// CHECK: vcvtnebf162iubs zmm2 {k7} {z}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6b,0xd3]
+ vcvtnebf162iubs zmm2 {k7} {z}, zmm3
+
+// CHECK: vcvtnebf162iubs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6b,0xd3]
+ vcvtnebf162iubs ymm2, ymm3
+
+// CHECK: vcvtnebf162iubs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6b,0xd3]
+ vcvtnebf162iubs ymm2 {k7}, ymm3
+
+// CHECK: vcvtnebf162iubs ymm2 {k7} {z}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6b,0xd3]
+ vcvtnebf162iubs ymm2 {k7} {z}, ymm3
+
+// CHECK: vcvtnebf162iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtnebf162iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtnebf162iubs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6b,0x10]
+ vcvtnebf162iubs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvtnebf162iubs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162iubs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvtnebf162iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6b,0x51,0x7f]
+ vcvtnebf162iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvtnebf162iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x6b,0x52,0x80]
+ vcvtnebf162iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvtnebf162iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtnebf162iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtnebf162iubs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x6b,0x10]
+ vcvtnebf162iubs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvtnebf162iubs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162iubs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvtnebf162iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6b,0x51,0x7f]
+ vcvtnebf162iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvtnebf162iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x6b,0x52,0x80]
+ vcvtnebf162iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvtnebf162iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtnebf162iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtnebf162iubs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x6b,0x10]
+ vcvtnebf162iubs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvtnebf162iubs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162iubs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvtnebf162iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6b,0x51,0x7f]
+ vcvtnebf162iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvtnebf162iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x6b,0x52,0x80]
+ vcvtnebf162iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvtph2ibs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x69,0xd3]
+ vcvtph2ibs xmm2, xmm3
+
+// CHECK: vcvtph2ibs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x69,0xd3]
+ vcvtph2ibs xmm2 {k7}, xmm3
+
+// CHECK: vcvtph2ibs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x69,0xd3]
+ vcvtph2ibs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvtph2ibs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x69,0xd3]
+ vcvtph2ibs zmm2, zmm3
+
+// CHECK: vcvtph2ibs zmm2, zmm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x69,0xd3]
+ vcvtph2ibs zmm2, zmm3, {rn-sae}
+
+// CHECK: vcvtph2ibs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x69,0xd3]
+ vcvtph2ibs zmm2 {k7}, zmm3
+
+// CHECK: vcvtph2ibs zmm2 {k7} {z}, zmm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xff,0x69,0xd3]
+ vcvtph2ibs zmm2 {k7} {z}, zmm3, {rz-sae}
+
+// CHECK: vcvtph2ibs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x69,0xd3]
+ vcvtph2ibs ymm2, ymm3
+
+// CHECK: vcvtph2ibs ymm2, ymm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x69,0xd3]
+ vcvtph2ibs ymm2, ymm3, {rn-sae}
+
+// CHECK: vcvtph2ibs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x69,0xd3]
+ vcvtph2ibs ymm2 {k7}, ymm3
+
+// CHECK: vcvtph2ibs ymm2 {k7} {z}, ymm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0xff,0x69,0xd3]
+ vcvtph2ibs ymm2 {k7} {z}, ymm3, {rz-sae}
+
+// CHECK: vcvtph2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtph2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtph2ibs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x69,0x10]
+ vcvtph2ibs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvtph2ibs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2ibs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvtph2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x69,0x51,0x7f]
+ vcvtph2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvtph2ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x69,0x52,0x80]
+ vcvtph2ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvtph2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtph2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtph2ibs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x69,0x10]
+ vcvtph2ibs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvtph2ibs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2ibs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvtph2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x69,0x51,0x7f]
+ vcvtph2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvtph2ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x69,0x52,0x80]
+ vcvtph2ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvtph2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtph2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtph2ibs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x69,0x10]
+ vcvtph2ibs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvtph2ibs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2ibs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvtph2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x69,0x51,0x7f]
+ vcvtph2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvtph2ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x69,0x52,0x80]
+ vcvtph2ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvtph2iubs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6b,0xd3]
+ vcvtph2iubs xmm2, xmm3
+
+// CHECK: vcvtph2iubs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6b,0xd3]
+ vcvtph2iubs xmm2 {k7}, xmm3
+
+// CHECK: vcvtph2iubs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6b,0xd3]
+ vcvtph2iubs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvtph2iubs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6b,0xd3]
+ vcvtph2iubs zmm2, zmm3
+
+// CHECK: vcvtph2iubs zmm2, zmm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6b,0xd3]
+ vcvtph2iubs zmm2, zmm3, {rn-sae}
+
+// CHECK: vcvtph2iubs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6b,0xd3]
+ vcvtph2iubs zmm2 {k7}, zmm3
+
+// CHECK: vcvtph2iubs zmm2 {k7} {z}, zmm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xff,0x6b,0xd3]
+ vcvtph2iubs zmm2 {k7} {z}, zmm3, {rz-sae}
+
+// CHECK: vcvtph2iubs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6b,0xd3]
+ vcvtph2iubs ymm2, ymm3
+
+// CHECK: vcvtph2iubs ymm2, ymm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6b,0xd3]
+ vcvtph2iubs ymm2, ymm3, {rn-sae}
+
+// CHECK: vcvtph2iubs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6b,0xd3]
+ vcvtph2iubs ymm2 {k7}, ymm3
+
+// CHECK: vcvtph2iubs ymm2 {k7} {z}, ymm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0xff,0x6b,0xd3]
+ vcvtph2iubs ymm2 {k7} {z}, ymm3, {rz-sae}
+
+// CHECK: vcvtph2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtph2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtph2iubs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6b,0x10]
+ vcvtph2iubs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvtph2iubs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2iubs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvtph2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6b,0x51,0x7f]
+ vcvtph2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvtph2iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6b,0x52,0x80]
+ vcvtph2iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvtph2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtph2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtph2iubs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6b,0x10]
+ vcvtph2iubs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvtph2iubs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2iubs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvtph2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6b,0x51,0x7f]
+ vcvtph2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvtph2iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6b,0x52,0x80]
+ vcvtph2iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvtph2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtph2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtph2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtph2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtph2iubs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6b,0x10]
+ vcvtph2iubs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvtph2iubs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2iubs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvtph2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6b,0x51,0x7f]
+ vcvtph2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvtph2iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6b,0x52,0x80]
+ vcvtph2iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvtps2ibs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x69,0xd3]
+ vcvtps2ibs xmm2, xmm3
+
+// CHECK: vcvtps2ibs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x69,0xd3]
+ vcvtps2ibs xmm2 {k7}, xmm3
+
+// CHECK: vcvtps2ibs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x69,0xd3]
+ vcvtps2ibs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvtps2ibs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x69,0xd3]
+ vcvtps2ibs zmm2, zmm3
+
+// CHECK: vcvtps2ibs zmm2, zmm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x69,0xd3]
+ vcvtps2ibs zmm2, zmm3, {rn-sae}
+
+// CHECK: vcvtps2ibs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x69,0xd3]
+ vcvtps2ibs zmm2 {k7}, zmm3
+
+// CHECK: vcvtps2ibs zmm2 {k7} {z}, zmm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xff,0x69,0xd3]
+ vcvtps2ibs zmm2 {k7} {z}, zmm3, {rz-sae}
+
+// CHECK: vcvtps2ibs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x69,0xd3]
+ vcvtps2ibs ymm2, ymm3
+
+// CHECK: vcvtps2ibs ymm2, ymm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x69,0xd3]
+ vcvtps2ibs ymm2, ymm3, {rn-sae}
+
+// CHECK: vcvtps2ibs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x69,0xd3]
+ vcvtps2ibs ymm2 {k7}, ymm3
+
+// CHECK: vcvtps2ibs ymm2 {k7} {z}, ymm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0xff,0x69,0xd3]
+ vcvtps2ibs ymm2 {k7} {z}, ymm3, {rz-sae}
+
+// CHECK: vcvtps2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtps2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtps2ibs xmm2, dword ptr [eax]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x69,0x10]
+ vcvtps2ibs xmm2, dword ptr [eax]{1to4}
+
+// CHECK: vcvtps2ibs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x69,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2ibs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvtps2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x69,0x51,0x7f]
+ vcvtps2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvtps2ibs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x69,0x52,0x80]
+ vcvtps2ibs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+
+// CHECK: vcvtps2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtps2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtps2ibs ymm2, dword ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x69,0x10]
+ vcvtps2ibs ymm2, dword ptr [eax]{1to8}
+
+// CHECK: vcvtps2ibs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x69,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2ibs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvtps2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x69,0x51,0x7f]
+ vcvtps2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvtps2ibs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x69,0x52,0x80]
+ vcvtps2ibs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+
+// CHECK: vcvtps2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x69,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtps2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x69,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtps2ibs zmm2, dword ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x69,0x10]
+ vcvtps2ibs zmm2, dword ptr [eax]{1to16}
+
+// CHECK: vcvtps2ibs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x69,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2ibs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvtps2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x69,0x51,0x7f]
+ vcvtps2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvtps2ibs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x69,0x52,0x80]
+ vcvtps2ibs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+
+// CHECK: vcvtps2iubs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6b,0xd3]
+ vcvtps2iubs xmm2, xmm3
+
+// CHECK: vcvtps2iubs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6b,0xd3]
+ vcvtps2iubs xmm2 {k7}, xmm3
+
+// CHECK: vcvtps2iubs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6b,0xd3]
+ vcvtps2iubs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvtps2iubs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6b,0xd3]
+ vcvtps2iubs zmm2, zmm3
+
+// CHECK: vcvtps2iubs zmm2, zmm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6b,0xd3]
+ vcvtps2iubs zmm2, zmm3, {rn-sae}
+
+// CHECK: vcvtps2iubs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6b,0xd3]
+ vcvtps2iubs zmm2 {k7}, zmm3
+
+// CHECK: vcvtps2iubs zmm2 {k7} {z}, zmm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xff,0x6b,0xd3]
+ vcvtps2iubs zmm2 {k7} {z}, zmm3, {rz-sae}
+
+// CHECK: vcvtps2iubs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6b,0xd3]
+ vcvtps2iubs ymm2, ymm3
+
+// CHECK: vcvtps2iubs ymm2, ymm3, {rn-sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6b,0xd3]
+ vcvtps2iubs ymm2, ymm3, {rn-sae}
+
+// CHECK: vcvtps2iubs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6b,0xd3]
+ vcvtps2iubs ymm2 {k7}, ymm3
+
+// CHECK: vcvtps2iubs ymm2 {k7} {z}, ymm3, {rz-sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0xff,0x6b,0xd3]
+ vcvtps2iubs ymm2 {k7} {z}, ymm3, {rz-sae}
+
+// CHECK: vcvtps2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtps2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtps2iubs xmm2, dword ptr [eax]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6b,0x10]
+ vcvtps2iubs xmm2, dword ptr [eax]{1to4}
+
+// CHECK: vcvtps2iubs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6b,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2iubs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvtps2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6b,0x51,0x7f]
+ vcvtps2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvtps2iubs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6b,0x52,0x80]
+ vcvtps2iubs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+
+// CHECK: vcvtps2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtps2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtps2iubs ymm2, dword ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6b,0x10]
+ vcvtps2iubs ymm2, dword ptr [eax]{1to8}
+
+// CHECK: vcvtps2iubs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6b,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2iubs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvtps2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6b,0x51,0x7f]
+ vcvtps2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvtps2iubs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6b,0x52,0x80]
+ vcvtps2iubs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+
+// CHECK: vcvtps2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6b,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvtps2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvtps2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6b,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvtps2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvtps2iubs zmm2, dword ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6b,0x10]
+ vcvtps2iubs zmm2, dword ptr [eax]{1to16}
+
+// CHECK: vcvtps2iubs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6b,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2iubs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvtps2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6b,0x51,0x7f]
+ vcvtps2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvtps2iubs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6b,0x52,0x80]
+ vcvtps2iubs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+
+// CHECK: vcvttnebf162ibs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x68,0xd3]
+ vcvttnebf162ibs xmm2, xmm3
+
+// CHECK: vcvttnebf162ibs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x68,0xd3]
+ vcvttnebf162ibs xmm2 {k7}, xmm3
+
+// CHECK: vcvttnebf162ibs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x68,0xd3]
+ vcvttnebf162ibs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvttnebf162ibs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x68,0xd3]
+ vcvttnebf162ibs zmm2, zmm3
+
+// CHECK: vcvttnebf162ibs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x68,0xd3]
+ vcvttnebf162ibs zmm2 {k7}, zmm3
+
+// CHECK: vcvttnebf162ibs zmm2 {k7} {z}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x68,0xd3]
+ vcvttnebf162ibs zmm2 {k7} {z}, zmm3
+
+// CHECK: vcvttnebf162ibs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x68,0xd3]
+ vcvttnebf162ibs ymm2, ymm3
+
+// CHECK: vcvttnebf162ibs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x68,0xd3]
+ vcvttnebf162ibs ymm2 {k7}, ymm3
+
+// CHECK: vcvttnebf162ibs ymm2 {k7} {z}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x68,0xd3]
+ vcvttnebf162ibs ymm2 {k7} {z}, ymm3
+
+// CHECK: vcvttnebf162ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttnebf162ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttnebf162ibs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x68,0x10]
+ vcvttnebf162ibs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvttnebf162ibs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162ibs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvttnebf162ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x68,0x51,0x7f]
+ vcvttnebf162ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvttnebf162ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x68,0x52,0x80]
+ vcvttnebf162ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvttnebf162ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttnebf162ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttnebf162ibs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x68,0x10]
+ vcvttnebf162ibs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvttnebf162ibs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162ibs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvttnebf162ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x68,0x51,0x7f]
+ vcvttnebf162ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvttnebf162ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x68,0x52,0x80]
+ vcvttnebf162ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvttnebf162ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttnebf162ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttnebf162ibs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x68,0x10]
+ vcvttnebf162ibs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvttnebf162ibs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162ibs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvttnebf162ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x68,0x51,0x7f]
+ vcvttnebf162ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvttnebf162ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x68,0x52,0x80]
+ vcvttnebf162ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvttnebf162iubs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6a,0xd3]
+ vcvttnebf162iubs xmm2, xmm3
+
+// CHECK: vcvttnebf162iubs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6a,0xd3]
+ vcvttnebf162iubs xmm2 {k7}, xmm3
+
+// CHECK: vcvttnebf162iubs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6a,0xd3]
+ vcvttnebf162iubs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvttnebf162iubs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6a,0xd3]
+ vcvttnebf162iubs zmm2, zmm3
+
+// CHECK: vcvttnebf162iubs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6a,0xd3]
+ vcvttnebf162iubs zmm2 {k7}, zmm3
+
+// CHECK: vcvttnebf162iubs zmm2 {k7} {z}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6a,0xd3]
+ vcvttnebf162iubs zmm2 {k7} {z}, zmm3
+
+// CHECK: vcvttnebf162iubs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6a,0xd3]
+ vcvttnebf162iubs ymm2, ymm3
+
+// CHECK: vcvttnebf162iubs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6a,0xd3]
+ vcvttnebf162iubs ymm2 {k7}, ymm3
+
+// CHECK: vcvttnebf162iubs ymm2 {k7} {z}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6a,0xd3]
+ vcvttnebf162iubs ymm2 {k7} {z}, ymm3
+
+// CHECK: vcvttnebf162iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttnebf162iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttnebf162iubs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6a,0x10]
+ vcvttnebf162iubs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvttnebf162iubs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162iubs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvttnebf162iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x8f,0x6a,0x51,0x7f]
+ vcvttnebf162iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvttnebf162iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x9f,0x6a,0x52,0x80]
+ vcvttnebf162iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvttnebf162iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttnebf162iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttnebf162iubs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x38,0x6a,0x10]
+ vcvttnebf162iubs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvttnebf162iubs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162iubs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvttnebf162iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xaf,0x6a,0x51,0x7f]
+ vcvttnebf162iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvttnebf162iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xbf,0x6a,0x52,0x80]
+ vcvttnebf162iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvttnebf162iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttnebf162iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttnebf162iubs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0x58,0x6a,0x10]
+ vcvttnebf162iubs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvttnebf162iubs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7f,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162iubs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvttnebf162iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7f,0xcf,0x6a,0x51,0x7f]
+ vcvttnebf162iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvttnebf162iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7f,0xdf,0x6a,0x52,0x80]
+ vcvttnebf162iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvttph2ibs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x68,0xd3]
+ vcvttph2ibs xmm2, xmm3
+
+// CHECK: vcvttph2ibs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x68,0xd3]
+ vcvttph2ibs xmm2 {k7}, xmm3
+
+// CHECK: vcvttph2ibs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x68,0xd3]
+ vcvttph2ibs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvttph2ibs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x68,0xd3]
+ vcvttph2ibs zmm2, zmm3
+
+// CHECK: vcvttph2ibs zmm2, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x68,0xd3]
+ vcvttph2ibs zmm2, zmm3, {sae}
+
+// CHECK: vcvttph2ibs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x68,0xd3]
+ vcvttph2ibs zmm2 {k7}, zmm3
+
+// CHECK: vcvttph2ibs zmm2 {k7} {z}, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x68,0xd3]
+ vcvttph2ibs zmm2 {k7} {z}, zmm3, {sae}
+
+// CHECK: vcvttph2ibs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x68,0xd3]
+ vcvttph2ibs ymm2, ymm3
+
+// CHECK: vcvttph2ibs ymm2, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x68,0xd3]
+ vcvttph2ibs ymm2, ymm3, {sae}
+
+// CHECK: vcvttph2ibs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x68,0xd3]
+ vcvttph2ibs ymm2 {k7}, ymm3
+
+// CHECK: vcvttph2ibs ymm2 {k7} {z}, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x68,0xd3]
+ vcvttph2ibs ymm2 {k7} {z}, ymm3, {sae}
+
+// CHECK: vcvttph2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttph2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttph2ibs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x68,0x10]
+ vcvttph2ibs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvttph2ibs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2ibs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvttph2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x68,0x51,0x7f]
+ vcvttph2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvttph2ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x68,0x52,0x80]
+ vcvttph2ibs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvttph2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttph2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttph2ibs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x68,0x10]
+ vcvttph2ibs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvttph2ibs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2ibs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvttph2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x68,0x51,0x7f]
+ vcvttph2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvttph2ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x68,0x52,0x80]
+ vcvttph2ibs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvttph2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttph2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttph2ibs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x68,0x10]
+ vcvttph2ibs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvttph2ibs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2ibs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvttph2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x68,0x51,0x7f]
+ vcvttph2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvttph2ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x68,0x52,0x80]
+ vcvttph2ibs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvttph2iubs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6a,0xd3]
+ vcvttph2iubs xmm2, xmm3
+
+// CHECK: vcvttph2iubs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6a,0xd3]
+ vcvttph2iubs xmm2 {k7}, xmm3
+
+// CHECK: vcvttph2iubs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6a,0xd3]
+ vcvttph2iubs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvttph2iubs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6a,0xd3]
+ vcvttph2iubs zmm2, zmm3
+
+// CHECK: vcvttph2iubs zmm2, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6a,0xd3]
+ vcvttph2iubs zmm2, zmm3, {sae}
+
+// CHECK: vcvttph2iubs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6a,0xd3]
+ vcvttph2iubs zmm2 {k7}, zmm3
+
+// CHECK: vcvttph2iubs zmm2 {k7} {z}, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6a,0xd3]
+ vcvttph2iubs zmm2 {k7} {z}, zmm3, {sae}
+
+// CHECK: vcvttph2iubs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6a,0xd3]
+ vcvttph2iubs ymm2, ymm3
+
+// CHECK: vcvttph2iubs ymm2, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6a,0xd3]
+ vcvttph2iubs ymm2, ymm3, {sae}
+
+// CHECK: vcvttph2iubs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6a,0xd3]
+ vcvttph2iubs ymm2 {k7}, ymm3
+
+// CHECK: vcvttph2iubs ymm2 {k7} {z}, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x6a,0xd3]
+ vcvttph2iubs ymm2 {k7} {z}, ymm3, {sae}
+
+// CHECK: vcvttph2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttph2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttph2iubs xmm2, word ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6a,0x10]
+ vcvttph2iubs xmm2, word ptr [eax]{1to8}
+
+// CHECK: vcvttph2iubs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2iubs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvttph2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6a,0x51,0x7f]
+ vcvttph2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvttph2iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6a,0x52,0x80]
+ vcvttph2iubs xmm2 {k7} {z}, word ptr [edx - 256]{1to8}
+
+// CHECK: vcvttph2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttph2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttph2iubs ymm2, word ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6a,0x10]
+ vcvttph2iubs ymm2, word ptr [eax]{1to16}
+
+// CHECK: vcvttph2iubs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2iubs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvttph2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6a,0x51,0x7f]
+ vcvttph2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvttph2iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6a,0x52,0x80]
+ vcvttph2iubs ymm2 {k7} {z}, word ptr [edx - 256]{1to16}
+
+// CHECK: vcvttph2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttph2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttph2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttph2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttph2iubs zmm2, word ptr [eax]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6a,0x10]
+ vcvttph2iubs zmm2, word ptr [eax]{1to32}
+
+// CHECK: vcvttph2iubs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2iubs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvttph2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6a,0x51,0x7f]
+ vcvttph2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvttph2iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6a,0x52,0x80]
+ vcvttph2iubs zmm2 {k7} {z}, word ptr [edx - 256]{1to32}
+
+// CHECK: vcvttps2ibs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x68,0xd3]
+ vcvttps2ibs xmm2, xmm3
+
+// CHECK: vcvttps2ibs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x68,0xd3]
+ vcvttps2ibs xmm2 {k7}, xmm3
+
+// CHECK: vcvttps2ibs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x68,0xd3]
+ vcvttps2ibs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvttps2ibs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x68,0xd3]
+ vcvttps2ibs zmm2, zmm3
+
+// CHECK: vcvttps2ibs zmm2, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x68,0xd3]
+ vcvttps2ibs zmm2, zmm3, {sae}
+
+// CHECK: vcvttps2ibs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x68,0xd3]
+ vcvttps2ibs zmm2 {k7}, zmm3
+
+// CHECK: vcvttps2ibs zmm2 {k7} {z}, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x68,0xd3]
+ vcvttps2ibs zmm2 {k7} {z}, zmm3, {sae}
+
+// CHECK: vcvttps2ibs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x68,0xd3]
+ vcvttps2ibs ymm2, ymm3
+
+// CHECK: vcvttps2ibs ymm2, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x68,0xd3]
+ vcvttps2ibs ymm2, ymm3, {sae}
+
+// CHECK: vcvttps2ibs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x68,0xd3]
+ vcvttps2ibs ymm2 {k7}, ymm3
+
+// CHECK: vcvttps2ibs ymm2 {k7} {z}, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x68,0xd3]
+ vcvttps2ibs ymm2 {k7} {z}, ymm3, {sae}
+
+// CHECK: vcvttps2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2ibs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttps2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2ibs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttps2ibs xmm2, dword ptr [eax]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x68,0x10]
+ vcvttps2ibs xmm2, dword ptr [eax]{1to4}
+
+// CHECK: vcvttps2ibs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x68,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2ibs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvttps2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x68,0x51,0x7f]
+ vcvttps2ibs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvttps2ibs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x68,0x52,0x80]
+ vcvttps2ibs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+
+// CHECK: vcvttps2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2ibs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttps2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2ibs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttps2ibs ymm2, dword ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x68,0x10]
+ vcvttps2ibs ymm2, dword ptr [eax]{1to8}
+
+// CHECK: vcvttps2ibs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x68,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2ibs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvttps2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x68,0x51,0x7f]
+ vcvttps2ibs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvttps2ibs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x68,0x52,0x80]
+ vcvttps2ibs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+
+// CHECK: vcvttps2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x68,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2ibs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttps2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x68,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2ibs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttps2ibs zmm2, dword ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x68,0x10]
+ vcvttps2ibs zmm2, dword ptr [eax]{1to16}
+
+// CHECK: vcvttps2ibs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x68,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2ibs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvttps2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x68,0x51,0x7f]
+ vcvttps2ibs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvttps2ibs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x68,0x52,0x80]
+ vcvttps2ibs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+
+// CHECK: vcvttps2iubs xmm2, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6a,0xd3]
+ vcvttps2iubs xmm2, xmm3
+
+// CHECK: vcvttps2iubs xmm2 {k7}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6a,0xd3]
+ vcvttps2iubs xmm2 {k7}, xmm3
+
+// CHECK: vcvttps2iubs xmm2 {k7} {z}, xmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6a,0xd3]
+ vcvttps2iubs xmm2 {k7} {z}, xmm3
+
+// CHECK: vcvttps2iubs zmm2, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6a,0xd3]
+ vcvttps2iubs zmm2, zmm3
+
+// CHECK: vcvttps2iubs zmm2, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6a,0xd3]
+ vcvttps2iubs zmm2, zmm3, {sae}
+
+// CHECK: vcvttps2iubs zmm2 {k7}, zmm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6a,0xd3]
+ vcvttps2iubs zmm2 {k7}, zmm3
+
+// CHECK: vcvttps2iubs zmm2 {k7} {z}, zmm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6a,0xd3]
+ vcvttps2iubs zmm2 {k7} {z}, zmm3, {sae}
+
+// CHECK: vcvttps2iubs ymm2, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6a,0xd3]
+ vcvttps2iubs ymm2, ymm3
+
+// CHECK: vcvttps2iubs ymm2, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6a,0xd3]
+ vcvttps2iubs ymm2, ymm3, {sae}
+
+// CHECK: vcvttps2iubs ymm2 {k7}, ymm3
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6a,0xd3]
+ vcvttps2iubs ymm2 {k7}, ymm3
+
+// CHECK: vcvttps2iubs ymm2 {k7} {z}, ymm3, {sae}
+// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x6a,0xd3]
+ vcvttps2iubs ymm2 {k7} {z}, ymm3, {sae}
+
+// CHECK: vcvttps2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2iubs xmm2, xmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttps2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2iubs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttps2iubs xmm2, dword ptr [eax]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6a,0x10]
+ vcvttps2iubs xmm2, dword ptr [eax]{1to4}
+
+// CHECK: vcvttps2iubs xmm2, xmmword ptr [2*ebp - 512]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6a,0x14,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2iubs xmm2, xmmword ptr [2*ebp - 512]
+
+// CHECK: vcvttps2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6a,0x51,0x7f]
+ vcvttps2iubs xmm2 {k7} {z}, xmmword ptr [ecx + 2032]
+
+// CHECK: vcvttps2iubs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6a,0x52,0x80]
+ vcvttps2iubs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4}
+
+// CHECK: vcvttps2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2iubs ymm2, ymmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttps2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2iubs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttps2iubs ymm2, dword ptr [eax]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6a,0x10]
+ vcvttps2iubs ymm2, dword ptr [eax]{1to8}
+
+// CHECK: vcvttps2iubs ymm2, ymmword ptr [2*ebp - 1024]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6a,0x14,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2iubs ymm2, ymmword ptr [2*ebp - 1024]
+
+// CHECK: vcvttps2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6a,0x51,0x7f]
+ vcvttps2iubs ymm2 {k7} {z}, ymmword ptr [ecx + 4064]
+
+// CHECK: vcvttps2iubs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6a,0x52,0x80]
+ vcvttps2iubs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8}
+
+// CHECK: vcvttps2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6a,0x94,0xf4,0x00,0x00,0x00,0x10]
+ vcvttps2iubs zmm2, zmmword ptr [esp + 8*esi + 268435456]
+
+// CHECK: vcvttps2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6a,0x94,0x87,0x23,0x01,0x00,0x00]
+ vcvttps2iubs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291]
+
+// CHECK: vcvttps2iubs zmm2, dword ptr [eax]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6a,0x10]
+ vcvttps2iubs zmm2, dword ptr [eax]{1to16}
+
+// CHECK: vcvttps2iubs zmm2, zmmword ptr [2*ebp - 2048]
+// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6a,0x14,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2iubs zmm2, zmmword ptr [2*ebp - 2048]
+
+// CHECK: vcvttps2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6a,0x51,0x7f]
+ vcvttps2iubs zmm2 {k7} {z}, zmmword ptr [ecx + 8128]
+
+// CHECK: vcvttps2iubs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6a,0x52,0x80]
+ vcvttps2iubs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16}
+
diff --git a/llvm/test/MC/X86/avx10.2satcvt-64-att.s b/llvm/test/MC/X86/avx10.2satcvt-64-att.s
new file mode 100644
index 0000000..b6767b9
--- /dev/null
+++ b/llvm/test/MC/X86/avx10.2satcvt-64-att.s
@@ -0,0 +1,1362 @@
+// RUN: llvm-mc -triple x86_64 --show-encoding %s | FileCheck %s
+
+// CHECK: vcvtnebf162ibs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x69,0xf7]
+ vcvtnebf162ibs %xmm23, %xmm22
+
+// CHECK: vcvtnebf162ibs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x69,0xf7]
+ vcvtnebf162ibs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvtnebf162ibs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x69,0xf7]
+ vcvtnebf162ibs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x69,0xf7]
+ vcvtnebf162ibs %zmm23, %zmm22
+
+// CHECK: vcvtnebf162ibs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x69,0xf7]
+ vcvtnebf162ibs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvtnebf162ibs %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x69,0xf7]
+ vcvtnebf162ibs %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x69,0xf7]
+ vcvtnebf162ibs %ymm23, %ymm22
+
+// CHECK: vcvtnebf162ibs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x69,0xf7]
+ vcvtnebf162ibs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvtnebf162ibs %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x69,0xf7]
+ vcvtnebf162ibs %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvtnebf162ibs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvtnebf162ibs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162ibs (%rip){1to8}, %xmm22
+
+// CHECK: vcvtnebf162ibs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162ibs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvtnebf162ibs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x69,0x71,0x7f]
+ vcvtnebf162ibs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x69,0x72,0x80]
+ vcvtnebf162ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvtnebf162ibs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvtnebf162ibs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162ibs (%rip){1to16}, %ymm22
+
+// CHECK: vcvtnebf162ibs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162ibs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvtnebf162ibs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x69,0x71,0x7f]
+ vcvtnebf162ibs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x69,0x72,0x80]
+ vcvtnebf162ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvtnebf162ibs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvtnebf162ibs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162ibs (%rip){1to32}, %zmm22
+
+// CHECK: vcvtnebf162ibs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162ibs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvtnebf162ibs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x69,0x71,0x7f]
+ vcvtnebf162ibs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x69,0x72,0x80]
+ vcvtnebf162ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6b,0xf7]
+ vcvtnebf162iubs %xmm23, %xmm22
+
+// CHECK: vcvtnebf162iubs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x6b,0xf7]
+ vcvtnebf162iubs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvtnebf162iubs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x6b,0xf7]
+ vcvtnebf162iubs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6b,0xf7]
+ vcvtnebf162iubs %zmm23, %zmm22
+
+// CHECK: vcvtnebf162iubs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x6b,0xf7]
+ vcvtnebf162iubs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvtnebf162iubs %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x6b,0xf7]
+ vcvtnebf162iubs %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6b,0xf7]
+ vcvtnebf162iubs %ymm23, %ymm22
+
+// CHECK: vcvtnebf162iubs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x6b,0xf7]
+ vcvtnebf162iubs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvtnebf162iubs %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x6b,0xf7]
+ vcvtnebf162iubs %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvtnebf162iubs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvtnebf162iubs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162iubs (%rip){1to8}, %xmm22
+
+// CHECK: vcvtnebf162iubs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162iubs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvtnebf162iubs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x6b,0x71,0x7f]
+ vcvtnebf162iubs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x6b,0x72,0x80]
+ vcvtnebf162iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvtnebf162iubs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvtnebf162iubs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162iubs (%rip){1to16}, %ymm22
+
+// CHECK: vcvtnebf162iubs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162iubs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvtnebf162iubs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x6b,0x71,0x7f]
+ vcvtnebf162iubs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x6b,0x72,0x80]
+ vcvtnebf162iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvtnebf162iubs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvtnebf162iubs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162iubs (%rip){1to32}, %zmm22
+
+// CHECK: vcvtnebf162iubs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162iubs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvtnebf162iubs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x6b,0x71,0x7f]
+ vcvtnebf162iubs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvtnebf162iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x6b,0x72,0x80]
+ vcvtnebf162iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x69,0xf7]
+ vcvtph2ibs %xmm23, %xmm22
+
+// CHECK: vcvtph2ibs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x69,0xf7]
+ vcvtph2ibs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvtph2ibs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x69,0xf7]
+ vcvtph2ibs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x69,0xf7]
+ vcvtph2ibs %zmm23, %zmm22
+
+// CHECK: vcvtph2ibs {rn-sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x69,0xf7]
+ vcvtph2ibs {rn-sae}, %zmm23, %zmm22
+
+// CHECK: vcvtph2ibs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x69,0xf7]
+ vcvtph2ibs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvtph2ibs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0xff,0x69,0xf7]
+ vcvtph2ibs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x69,0xf7]
+ vcvtph2ibs %ymm23, %ymm22
+
+// CHECK: vcvtph2ibs {rn-sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x69,0xf7]
+ vcvtph2ibs {rn-sae}, %ymm23, %ymm22
+
+// CHECK: vcvtph2ibs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x69,0xf7]
+ vcvtph2ibs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvtph2ibs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x78,0xff,0x69,0xf7]
+ vcvtph2ibs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2ibs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvtph2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvtph2ibs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2ibs (%rip){1to8}, %xmm22
+
+// CHECK: vcvtph2ibs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2ibs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvtph2ibs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x69,0x71,0x7f]
+ vcvtph2ibs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x69,0x72,0x80]
+ vcvtph2ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2ibs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvtph2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvtph2ibs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2ibs (%rip){1to16}, %ymm22
+
+// CHECK: vcvtph2ibs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2ibs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvtph2ibs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x69,0x71,0x7f]
+ vcvtph2ibs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x69,0x72,0x80]
+ vcvtph2ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2ibs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvtph2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvtph2ibs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2ibs (%rip){1to32}, %zmm22
+
+// CHECK: vcvtph2ibs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2ibs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvtph2ibs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x69,0x71,0x7f]
+ vcvtph2ibs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvtph2ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x69,0x72,0x80]
+ vcvtph2ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6b,0xf7]
+ vcvtph2iubs %xmm23, %xmm22
+
+// CHECK: vcvtph2iubs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6b,0xf7]
+ vcvtph2iubs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvtph2iubs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6b,0xf7]
+ vcvtph2iubs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6b,0xf7]
+ vcvtph2iubs %zmm23, %zmm22
+
+// CHECK: vcvtph2iubs {rn-sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6b,0xf7]
+ vcvtph2iubs {rn-sae}, %zmm23, %zmm22
+
+// CHECK: vcvtph2iubs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6b,0xf7]
+ vcvtph2iubs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvtph2iubs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0xff,0x6b,0xf7]
+ vcvtph2iubs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6b,0xf7]
+ vcvtph2iubs %ymm23, %ymm22
+
+// CHECK: vcvtph2iubs {rn-sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6b,0xf7]
+ vcvtph2iubs {rn-sae}, %ymm23, %ymm22
+
+// CHECK: vcvtph2iubs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6b,0xf7]
+ vcvtph2iubs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvtph2iubs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x78,0xff,0x6b,0xf7]
+ vcvtph2iubs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2iubs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvtph2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvtph2iubs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2iubs (%rip){1to8}, %xmm22
+
+// CHECK: vcvtph2iubs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2iubs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvtph2iubs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6b,0x71,0x7f]
+ vcvtph2iubs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6b,0x72,0x80]
+ vcvtph2iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2iubs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvtph2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvtph2iubs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2iubs (%rip){1to16}, %ymm22
+
+// CHECK: vcvtph2iubs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2iubs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvtph2iubs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6b,0x71,0x7f]
+ vcvtph2iubs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6b,0x72,0x80]
+ vcvtph2iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2iubs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvtph2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvtph2iubs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2iubs (%rip){1to32}, %zmm22
+
+// CHECK: vcvtph2iubs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2iubs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvtph2iubs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6b,0x71,0x7f]
+ vcvtph2iubs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvtph2iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6b,0x72,0x80]
+ vcvtph2iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x69,0xf7]
+ vcvtps2ibs %xmm23, %xmm22
+
+// CHECK: vcvtps2ibs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x69,0xf7]
+ vcvtps2ibs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvtps2ibs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x69,0xf7]
+ vcvtps2ibs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x69,0xf7]
+ vcvtps2ibs %zmm23, %zmm22
+
+// CHECK: vcvtps2ibs {rn-sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x69,0xf7]
+ vcvtps2ibs {rn-sae}, %zmm23, %zmm22
+
+// CHECK: vcvtps2ibs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x69,0xf7]
+ vcvtps2ibs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvtps2ibs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0xff,0x69,0xf7]
+ vcvtps2ibs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x69,0xf7]
+ vcvtps2ibs %ymm23, %ymm22
+
+// CHECK: vcvtps2ibs {rn-sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x69,0xf7]
+ vcvtps2ibs {rn-sae}, %ymm23, %ymm22
+
+// CHECK: vcvtps2ibs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x69,0xf7]
+ vcvtps2ibs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvtps2ibs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x79,0xff,0x69,0xf7]
+ vcvtps2ibs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2ibs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvtps2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvtps2ibs (%rip){1to4}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2ibs (%rip){1to4}, %xmm22
+
+// CHECK: vcvtps2ibs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2ibs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvtps2ibs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x69,0x71,0x7f]
+ vcvtps2ibs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x69,0x72,0x80]
+ vcvtps2ibs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2ibs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvtps2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvtps2ibs (%rip){1to8}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2ibs (%rip){1to8}, %ymm22
+
+// CHECK: vcvtps2ibs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2ibs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvtps2ibs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x69,0x71,0x7f]
+ vcvtps2ibs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x69,0x72,0x80]
+ vcvtps2ibs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2ibs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvtps2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvtps2ibs (%rip){1to16}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2ibs (%rip){1to16}, %zmm22
+
+// CHECK: vcvtps2ibs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2ibs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvtps2ibs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x69,0x71,0x7f]
+ vcvtps2ibs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvtps2ibs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x69,0x72,0x80]
+ vcvtps2ibs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6b,0xf7]
+ vcvtps2iubs %xmm23, %xmm22
+
+// CHECK: vcvtps2iubs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6b,0xf7]
+ vcvtps2iubs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvtps2iubs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6b,0xf7]
+ vcvtps2iubs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6b,0xf7]
+ vcvtps2iubs %zmm23, %zmm22
+
+// CHECK: vcvtps2iubs {rn-sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6b,0xf7]
+ vcvtps2iubs {rn-sae}, %zmm23, %zmm22
+
+// CHECK: vcvtps2iubs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6b,0xf7]
+ vcvtps2iubs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvtps2iubs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0xff,0x6b,0xf7]
+ vcvtps2iubs {rz-sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6b,0xf7]
+ vcvtps2iubs %ymm23, %ymm22
+
+// CHECK: vcvtps2iubs {rn-sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6b,0xf7]
+ vcvtps2iubs {rn-sae}, %ymm23, %ymm22
+
+// CHECK: vcvtps2iubs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6b,0xf7]
+ vcvtps2iubs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvtps2iubs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x79,0xff,0x6b,0xf7]
+ vcvtps2iubs {rz-sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2iubs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvtps2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvtps2iubs (%rip){1to4}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2iubs (%rip){1to4}, %xmm22
+
+// CHECK: vcvtps2iubs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2iubs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvtps2iubs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6b,0x71,0x7f]
+ vcvtps2iubs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6b,0x72,0x80]
+ vcvtps2iubs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2iubs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvtps2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvtps2iubs (%rip){1to8}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2iubs (%rip){1to8}, %ymm22
+
+// CHECK: vcvtps2iubs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2iubs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvtps2iubs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6b,0x71,0x7f]
+ vcvtps2iubs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6b,0x72,0x80]
+ vcvtps2iubs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2iubs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvtps2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvtps2iubs (%rip){1to16}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2iubs (%rip){1to16}, %zmm22
+
+// CHECK: vcvtps2iubs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2iubs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvtps2iubs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6b,0x71,0x7f]
+ vcvtps2iubs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvtps2iubs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6b,0x72,0x80]
+ vcvtps2iubs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x68,0xf7]
+ vcvttnebf162ibs %xmm23, %xmm22
+
+// CHECK: vcvttnebf162ibs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x68,0xf7]
+ vcvttnebf162ibs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvttnebf162ibs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x68,0xf7]
+ vcvttnebf162ibs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x68,0xf7]
+ vcvttnebf162ibs %zmm23, %zmm22
+
+// CHECK: vcvttnebf162ibs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x68,0xf7]
+ vcvttnebf162ibs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvttnebf162ibs %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x68,0xf7]
+ vcvttnebf162ibs %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x68,0xf7]
+ vcvttnebf162ibs %ymm23, %ymm22
+
+// CHECK: vcvttnebf162ibs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x68,0xf7]
+ vcvttnebf162ibs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvttnebf162ibs %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x68,0xf7]
+ vcvttnebf162ibs %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvttnebf162ibs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvttnebf162ibs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162ibs (%rip){1to8}, %xmm22
+
+// CHECK: vcvttnebf162ibs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162ibs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvttnebf162ibs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x68,0x71,0x7f]
+ vcvttnebf162ibs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x68,0x72,0x80]
+ vcvttnebf162ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvttnebf162ibs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvttnebf162ibs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162ibs (%rip){1to16}, %ymm22
+
+// CHECK: vcvttnebf162ibs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162ibs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvttnebf162ibs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x68,0x71,0x7f]
+ vcvttnebf162ibs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x68,0x72,0x80]
+ vcvttnebf162ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvttnebf162ibs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvttnebf162ibs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162ibs (%rip){1to32}, %zmm22
+
+// CHECK: vcvttnebf162ibs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162ibs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvttnebf162ibs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x68,0x71,0x7f]
+ vcvttnebf162ibs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x68,0x72,0x80]
+ vcvttnebf162ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6a,0xf7]
+ vcvttnebf162iubs %xmm23, %xmm22
+
+// CHECK: vcvttnebf162iubs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x6a,0xf7]
+ vcvttnebf162iubs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvttnebf162iubs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x6a,0xf7]
+ vcvttnebf162iubs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6a,0xf7]
+ vcvttnebf162iubs %zmm23, %zmm22
+
+// CHECK: vcvttnebf162iubs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x6a,0xf7]
+ vcvttnebf162iubs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvttnebf162iubs %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x6a,0xf7]
+ vcvttnebf162iubs %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6a,0xf7]
+ vcvttnebf162iubs %ymm23, %ymm22
+
+// CHECK: vcvttnebf162iubs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x6a,0xf7]
+ vcvttnebf162iubs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvttnebf162iubs %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x6a,0xf7]
+ vcvttnebf162iubs %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvttnebf162iubs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvttnebf162iubs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162iubs (%rip){1to8}, %xmm22
+
+// CHECK: vcvttnebf162iubs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162iubs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvttnebf162iubs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x6a,0x71,0x7f]
+ vcvttnebf162iubs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x6a,0x72,0x80]
+ vcvttnebf162iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvttnebf162iubs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvttnebf162iubs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162iubs (%rip){1to16}, %ymm22
+
+// CHECK: vcvttnebf162iubs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162iubs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvttnebf162iubs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x6a,0x71,0x7f]
+ vcvttnebf162iubs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x6a,0x72,0x80]
+ vcvttnebf162iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvttnebf162iubs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvttnebf162iubs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162iubs (%rip){1to32}, %zmm22
+
+// CHECK: vcvttnebf162iubs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162iubs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvttnebf162iubs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x6a,0x71,0x7f]
+ vcvttnebf162iubs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvttnebf162iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x6a,0x72,0x80]
+ vcvttnebf162iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x68,0xf7]
+ vcvttph2ibs %xmm23, %xmm22
+
+// CHECK: vcvttph2ibs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x68,0xf7]
+ vcvttph2ibs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvttph2ibs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x68,0xf7]
+ vcvttph2ibs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x68,0xf7]
+ vcvttph2ibs %zmm23, %zmm22
+
+// CHECK: vcvttph2ibs {sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x68,0xf7]
+ vcvttph2ibs {sae}, %zmm23, %zmm22
+
+// CHECK: vcvttph2ibs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x68,0xf7]
+ vcvttph2ibs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvttph2ibs {sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x68,0xf7]
+ vcvttph2ibs {sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x68,0xf7]
+ vcvttph2ibs %ymm23, %ymm22
+
+// CHECK: vcvttph2ibs {sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x68,0xf7]
+ vcvttph2ibs {sae}, %ymm23, %ymm22
+
+// CHECK: vcvttph2ibs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x68,0xf7]
+ vcvttph2ibs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvttph2ibs {sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x68,0xf7]
+ vcvttph2ibs {sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2ibs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvttph2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvttph2ibs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2ibs (%rip){1to8}, %xmm22
+
+// CHECK: vcvttph2ibs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2ibs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvttph2ibs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x68,0x71,0x7f]
+ vcvttph2ibs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x68,0x72,0x80]
+ vcvttph2ibs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2ibs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvttph2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvttph2ibs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2ibs (%rip){1to16}, %ymm22
+
+// CHECK: vcvttph2ibs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2ibs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvttph2ibs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x68,0x71,0x7f]
+ vcvttph2ibs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x68,0x72,0x80]
+ vcvttph2ibs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2ibs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvttph2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvttph2ibs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2ibs (%rip){1to32}, %zmm22
+
+// CHECK: vcvttph2ibs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2ibs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvttph2ibs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x68,0x71,0x7f]
+ vcvttph2ibs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvttph2ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x68,0x72,0x80]
+ vcvttph2ibs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6a,0xf7]
+ vcvttph2iubs %xmm23, %xmm22
+
+// CHECK: vcvttph2iubs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6a,0xf7]
+ vcvttph2iubs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvttph2iubs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6a,0xf7]
+ vcvttph2iubs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6a,0xf7]
+ vcvttph2iubs %zmm23, %zmm22
+
+// CHECK: vcvttph2iubs {sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6a,0xf7]
+ vcvttph2iubs {sae}, %zmm23, %zmm22
+
+// CHECK: vcvttph2iubs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6a,0xf7]
+ vcvttph2iubs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvttph2iubs {sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x6a,0xf7]
+ vcvttph2iubs {sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6a,0xf7]
+ vcvttph2iubs %ymm23, %ymm22
+
+// CHECK: vcvttph2iubs {sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6a,0xf7]
+ vcvttph2iubs {sae}, %ymm23, %ymm22
+
+// CHECK: vcvttph2iubs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6a,0xf7]
+ vcvttph2iubs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvttph2iubs {sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x6a,0xf7]
+ vcvttph2iubs {sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2iubs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvttph2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvttph2iubs (%rip){1to8}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2iubs (%rip){1to8}, %xmm22
+
+// CHECK: vcvttph2iubs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2iubs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvttph2iubs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6a,0x71,0x7f]
+ vcvttph2iubs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6a,0x72,0x80]
+ vcvttph2iubs -256(%rdx){1to8}, %xmm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2iubs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvttph2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvttph2iubs (%rip){1to16}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2iubs (%rip){1to16}, %ymm22
+
+// CHECK: vcvttph2iubs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2iubs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvttph2iubs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6a,0x71,0x7f]
+ vcvttph2iubs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6a,0x72,0x80]
+ vcvttph2iubs -256(%rdx){1to16}, %ymm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2iubs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvttph2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvttph2iubs (%rip){1to32}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2iubs (%rip){1to32}, %zmm22
+
+// CHECK: vcvttph2iubs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2iubs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvttph2iubs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6a,0x71,0x7f]
+ vcvttph2iubs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvttph2iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6a,0x72,0x80]
+ vcvttph2iubs -256(%rdx){1to32}, %zmm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x68,0xf7]
+ vcvttps2ibs %xmm23, %xmm22
+
+// CHECK: vcvttps2ibs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x68,0xf7]
+ vcvttps2ibs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvttps2ibs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x68,0xf7]
+ vcvttps2ibs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x68,0xf7]
+ vcvttps2ibs %zmm23, %zmm22
+
+// CHECK: vcvttps2ibs {sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x68,0xf7]
+ vcvttps2ibs {sae}, %zmm23, %zmm22
+
+// CHECK: vcvttps2ibs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x68,0xf7]
+ vcvttps2ibs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvttps2ibs {sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x68,0xf7]
+ vcvttps2ibs {sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x68,0xf7]
+ vcvttps2ibs %ymm23, %ymm22
+
+// CHECK: vcvttps2ibs {sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x68,0xf7]
+ vcvttps2ibs {sae}, %ymm23, %ymm22
+
+// CHECK: vcvttps2ibs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x68,0xf7]
+ vcvttps2ibs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvttps2ibs {sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x68,0xf7]
+ vcvttps2ibs {sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2ibs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvttps2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2ibs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvttps2ibs (%rip){1to4}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2ibs (%rip){1to4}, %xmm22
+
+// CHECK: vcvttps2ibs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2ibs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvttps2ibs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x68,0x71,0x7f]
+ vcvttps2ibs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x68,0x72,0x80]
+ vcvttps2ibs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2ibs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvttps2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2ibs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvttps2ibs (%rip){1to8}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2ibs (%rip){1to8}, %ymm22
+
+// CHECK: vcvttps2ibs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2ibs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvttps2ibs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x68,0x71,0x7f]
+ vcvttps2ibs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x68,0x72,0x80]
+ vcvttps2ibs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2ibs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvttps2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2ibs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvttps2ibs (%rip){1to16}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2ibs (%rip){1to16}, %zmm22
+
+// CHECK: vcvttps2ibs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2ibs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvttps2ibs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x68,0x71,0x7f]
+ vcvttps2ibs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvttps2ibs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x68,0x72,0x80]
+ vcvttps2ibs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs %xmm23, %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6a,0xf7]
+ vcvttps2iubs %xmm23, %xmm22
+
+// CHECK: vcvttps2iubs %xmm23, %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6a,0xf7]
+ vcvttps2iubs %xmm23, %xmm22 {%k7}
+
+// CHECK: vcvttps2iubs %xmm23, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6a,0xf7]
+ vcvttps2iubs %xmm23, %xmm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6a,0xf7]
+ vcvttps2iubs %zmm23, %zmm22
+
+// CHECK: vcvttps2iubs {sae}, %zmm23, %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6a,0xf7]
+ vcvttps2iubs {sae}, %zmm23, %zmm22
+
+// CHECK: vcvttps2iubs %zmm23, %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6a,0xf7]
+ vcvttps2iubs %zmm23, %zmm22 {%k7}
+
+// CHECK: vcvttps2iubs {sae}, %zmm23, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x6a,0xf7]
+ vcvttps2iubs {sae}, %zmm23, %zmm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6a,0xf7]
+ vcvttps2iubs %ymm23, %ymm22
+
+// CHECK: vcvttps2iubs {sae}, %ymm23, %ymm22
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6a,0xf7]
+ vcvttps2iubs {sae}, %ymm23, %ymm22
+
+// CHECK: vcvttps2iubs %ymm23, %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6a,0xf7]
+ vcvttps2iubs %ymm23, %ymm22 {%k7}
+
+// CHECK: vcvttps2iubs {sae}, %ymm23, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x6a,0xf7]
+ vcvttps2iubs {sae}, %ymm23, %ymm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs 268435456(%rbp,%r14,8), %xmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2iubs 268435456(%rbp,%r14,8), %xmm22
+
+// CHECK: vcvttps2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2iubs 291(%r8,%rax,4), %xmm22 {%k7}
+
+// CHECK: vcvttps2iubs (%rip){1to4}, %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2iubs (%rip){1to4}, %xmm22
+
+// CHECK: vcvttps2iubs -512(,%rbp,2), %xmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2iubs -512(,%rbp,2), %xmm22
+
+// CHECK: vcvttps2iubs 2032(%rcx), %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6a,0x71,0x7f]
+ vcvttps2iubs 2032(%rcx), %xmm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6a,0x72,0x80]
+ vcvttps2iubs -512(%rdx){1to4}, %xmm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs 268435456(%rbp,%r14,8), %ymm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2iubs 268435456(%rbp,%r14,8), %ymm22
+
+// CHECK: vcvttps2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2iubs 291(%r8,%rax,4), %ymm22 {%k7}
+
+// CHECK: vcvttps2iubs (%rip){1to8}, %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2iubs (%rip){1to8}, %ymm22
+
+// CHECK: vcvttps2iubs -1024(,%rbp,2), %ymm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2iubs -1024(,%rbp,2), %ymm22
+
+// CHECK: vcvttps2iubs 4064(%rcx), %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6a,0x71,0x7f]
+ vcvttps2iubs 4064(%rcx), %ymm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6a,0x72,0x80]
+ vcvttps2iubs -512(%rdx){1to8}, %ymm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs 268435456(%rbp,%r14,8), %zmm22
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2iubs 268435456(%rbp,%r14,8), %zmm22
+
+// CHECK: vcvttps2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2iubs 291(%r8,%rax,4), %zmm22 {%k7}
+
+// CHECK: vcvttps2iubs (%rip){1to16}, %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2iubs (%rip){1to16}, %zmm22
+
+// CHECK: vcvttps2iubs -2048(,%rbp,2), %zmm22
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2iubs -2048(,%rbp,2), %zmm22
+
+// CHECK: vcvttps2iubs 8128(%rcx), %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6a,0x71,0x7f]
+ vcvttps2iubs 8128(%rcx), %zmm22 {%k7} {z}
+
+// CHECK: vcvttps2iubs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6a,0x72,0x80]
+ vcvttps2iubs -512(%rdx){1to16}, %zmm22 {%k7} {z}
+
diff --git a/llvm/test/MC/X86/avx10.2satcvt-64-intel.s b/llvm/test/MC/X86/avx10.2satcvt-64-intel.s
new file mode 100644
index 0000000..e1df9dc
--- /dev/null
+++ b/llvm/test/MC/X86/avx10.2satcvt-64-intel.s
@@ -0,0 +1,1362 @@
+// RUN: llvm-mc -triple x86_64 -x86-asm-syntax=intel -output-asm-variant=1 --show-encoding %s | FileCheck %s
+
+// CHECK: vcvtnebf162ibs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x69,0xf7]
+ vcvtnebf162ibs xmm22, xmm23
+
+// CHECK: vcvtnebf162ibs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x69,0xf7]
+ vcvtnebf162ibs xmm22 {k7}, xmm23
+
+// CHECK: vcvtnebf162ibs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x69,0xf7]
+ vcvtnebf162ibs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvtnebf162ibs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x69,0xf7]
+ vcvtnebf162ibs zmm22, zmm23
+
+// CHECK: vcvtnebf162ibs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x69,0xf7]
+ vcvtnebf162ibs zmm22 {k7}, zmm23
+
+// CHECK: vcvtnebf162ibs zmm22 {k7} {z}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x69,0xf7]
+ vcvtnebf162ibs zmm22 {k7} {z}, zmm23
+
+// CHECK: vcvtnebf162ibs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x69,0xf7]
+ vcvtnebf162ibs ymm22, ymm23
+
+// CHECK: vcvtnebf162ibs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x69,0xf7]
+ vcvtnebf162ibs ymm22 {k7}, ymm23
+
+// CHECK: vcvtnebf162ibs ymm22 {k7} {z}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x69,0xf7]
+ vcvtnebf162ibs ymm22 {k7} {z}, ymm23
+
+// CHECK: vcvtnebf162ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtnebf162ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtnebf162ibs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162ibs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvtnebf162ibs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162ibs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvtnebf162ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x69,0x71,0x7f]
+ vcvtnebf162ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvtnebf162ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x69,0x72,0x80]
+ vcvtnebf162ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvtnebf162ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtnebf162ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtnebf162ibs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162ibs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvtnebf162ibs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162ibs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvtnebf162ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x69,0x71,0x7f]
+ vcvtnebf162ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvtnebf162ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x69,0x72,0x80]
+ vcvtnebf162ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvtnebf162ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtnebf162ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtnebf162ibs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162ibs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvtnebf162ibs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162ibs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvtnebf162ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x69,0x71,0x7f]
+ vcvtnebf162ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvtnebf162ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x69,0x72,0x80]
+ vcvtnebf162ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvtnebf162iubs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6b,0xf7]
+ vcvtnebf162iubs xmm22, xmm23
+
+// CHECK: vcvtnebf162iubs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x6b,0xf7]
+ vcvtnebf162iubs xmm22 {k7}, xmm23
+
+// CHECK: vcvtnebf162iubs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x6b,0xf7]
+ vcvtnebf162iubs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvtnebf162iubs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6b,0xf7]
+ vcvtnebf162iubs zmm22, zmm23
+
+// CHECK: vcvtnebf162iubs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x6b,0xf7]
+ vcvtnebf162iubs zmm22 {k7}, zmm23
+
+// CHECK: vcvtnebf162iubs zmm22 {k7} {z}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x6b,0xf7]
+ vcvtnebf162iubs zmm22 {k7} {z}, zmm23
+
+// CHECK: vcvtnebf162iubs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6b,0xf7]
+ vcvtnebf162iubs ymm22, ymm23
+
+// CHECK: vcvtnebf162iubs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x6b,0xf7]
+ vcvtnebf162iubs ymm22 {k7}, ymm23
+
+// CHECK: vcvtnebf162iubs ymm22 {k7} {z}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x6b,0xf7]
+ vcvtnebf162iubs ymm22 {k7} {z}, ymm23
+
+// CHECK: vcvtnebf162iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtnebf162iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtnebf162iubs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162iubs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvtnebf162iubs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtnebf162iubs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvtnebf162iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x6b,0x71,0x7f]
+ vcvtnebf162iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvtnebf162iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x6b,0x72,0x80]
+ vcvtnebf162iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvtnebf162iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtnebf162iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtnebf162iubs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162iubs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvtnebf162iubs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtnebf162iubs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvtnebf162iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x6b,0x71,0x7f]
+ vcvtnebf162iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvtnebf162iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x6b,0x72,0x80]
+ vcvtnebf162iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvtnebf162iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtnebf162iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtnebf162iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtnebf162iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtnebf162iubs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtnebf162iubs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvtnebf162iubs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtnebf162iubs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvtnebf162iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x6b,0x71,0x7f]
+ vcvtnebf162iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvtnebf162iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x6b,0x72,0x80]
+ vcvtnebf162iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvtph2ibs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x69,0xf7]
+ vcvtph2ibs xmm22, xmm23
+
+// CHECK: vcvtph2ibs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x69,0xf7]
+ vcvtph2ibs xmm22 {k7}, xmm23
+
+// CHECK: vcvtph2ibs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x69,0xf7]
+ vcvtph2ibs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvtph2ibs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x69,0xf7]
+ vcvtph2ibs zmm22, zmm23
+
+// CHECK: vcvtph2ibs zmm22, zmm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x69,0xf7]
+ vcvtph2ibs zmm22, zmm23, {rn-sae}
+
+// CHECK: vcvtph2ibs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x69,0xf7]
+ vcvtph2ibs zmm22 {k7}, zmm23
+
+// CHECK: vcvtph2ibs zmm22 {k7} {z}, zmm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0xff,0x69,0xf7]
+ vcvtph2ibs zmm22 {k7} {z}, zmm23, {rz-sae}
+
+// CHECK: vcvtph2ibs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x69,0xf7]
+ vcvtph2ibs ymm22, ymm23
+
+// CHECK: vcvtph2ibs ymm22, ymm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x69,0xf7]
+ vcvtph2ibs ymm22, ymm23, {rn-sae}
+
+// CHECK: vcvtph2ibs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x69,0xf7]
+ vcvtph2ibs ymm22 {k7}, ymm23
+
+// CHECK: vcvtph2ibs ymm22 {k7} {z}, ymm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0xff,0x69,0xf7]
+ vcvtph2ibs ymm22 {k7} {z}, ymm23, {rz-sae}
+
+// CHECK: vcvtph2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtph2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtph2ibs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2ibs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvtph2ibs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2ibs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvtph2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x69,0x71,0x7f]
+ vcvtph2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvtph2ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x69,0x72,0x80]
+ vcvtph2ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvtph2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtph2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtph2ibs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2ibs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvtph2ibs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2ibs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvtph2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x69,0x71,0x7f]
+ vcvtph2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvtph2ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x69,0x72,0x80]
+ vcvtph2ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvtph2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtph2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtph2ibs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2ibs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvtph2ibs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2ibs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvtph2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x69,0x71,0x7f]
+ vcvtph2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvtph2ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x69,0x72,0x80]
+ vcvtph2ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvtph2iubs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6b,0xf7]
+ vcvtph2iubs xmm22, xmm23
+
+// CHECK: vcvtph2iubs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6b,0xf7]
+ vcvtph2iubs xmm22 {k7}, xmm23
+
+// CHECK: vcvtph2iubs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6b,0xf7]
+ vcvtph2iubs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvtph2iubs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6b,0xf7]
+ vcvtph2iubs zmm22, zmm23
+
+// CHECK: vcvtph2iubs zmm22, zmm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6b,0xf7]
+ vcvtph2iubs zmm22, zmm23, {rn-sae}
+
+// CHECK: vcvtph2iubs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6b,0xf7]
+ vcvtph2iubs zmm22 {k7}, zmm23
+
+// CHECK: vcvtph2iubs zmm22 {k7} {z}, zmm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0xff,0x6b,0xf7]
+ vcvtph2iubs zmm22 {k7} {z}, zmm23, {rz-sae}
+
+// CHECK: vcvtph2iubs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6b,0xf7]
+ vcvtph2iubs ymm22, ymm23
+
+// CHECK: vcvtph2iubs ymm22, ymm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6b,0xf7]
+ vcvtph2iubs ymm22, ymm23, {rn-sae}
+
+// CHECK: vcvtph2iubs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6b,0xf7]
+ vcvtph2iubs ymm22 {k7}, ymm23
+
+// CHECK: vcvtph2iubs ymm22 {k7} {z}, ymm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0xff,0x6b,0xf7]
+ vcvtph2iubs ymm22 {k7} {z}, ymm23, {rz-sae}
+
+// CHECK: vcvtph2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtph2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtph2iubs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2iubs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvtph2iubs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtph2iubs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvtph2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6b,0x71,0x7f]
+ vcvtph2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvtph2iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6b,0x72,0x80]
+ vcvtph2iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvtph2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtph2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtph2iubs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2iubs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvtph2iubs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtph2iubs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvtph2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6b,0x71,0x7f]
+ vcvtph2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvtph2iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6b,0x72,0x80]
+ vcvtph2iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvtph2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtph2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtph2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtph2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtph2iubs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtph2iubs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvtph2iubs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtph2iubs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvtph2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6b,0x71,0x7f]
+ vcvtph2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvtph2iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6b,0x72,0x80]
+ vcvtph2iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvtps2ibs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x69,0xf7]
+ vcvtps2ibs xmm22, xmm23
+
+// CHECK: vcvtps2ibs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x69,0xf7]
+ vcvtps2ibs xmm22 {k7}, xmm23
+
+// CHECK: vcvtps2ibs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x69,0xf7]
+ vcvtps2ibs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvtps2ibs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x69,0xf7]
+ vcvtps2ibs zmm22, zmm23
+
+// CHECK: vcvtps2ibs zmm22, zmm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x69,0xf7]
+ vcvtps2ibs zmm22, zmm23, {rn-sae}
+
+// CHECK: vcvtps2ibs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x69,0xf7]
+ vcvtps2ibs zmm22 {k7}, zmm23
+
+// CHECK: vcvtps2ibs zmm22 {k7} {z}, zmm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0xff,0x69,0xf7]
+ vcvtps2ibs zmm22 {k7} {z}, zmm23, {rz-sae}
+
+// CHECK: vcvtps2ibs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x69,0xf7]
+ vcvtps2ibs ymm22, ymm23
+
+// CHECK: vcvtps2ibs ymm22, ymm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x69,0xf7]
+ vcvtps2ibs ymm22, ymm23, {rn-sae}
+
+// CHECK: vcvtps2ibs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x69,0xf7]
+ vcvtps2ibs ymm22 {k7}, ymm23
+
+// CHECK: vcvtps2ibs ymm22 {k7} {z}, ymm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0xff,0x69,0xf7]
+ vcvtps2ibs ymm22 {k7} {z}, ymm23, {rz-sae}
+
+// CHECK: vcvtps2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtps2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtps2ibs xmm22, dword ptr [rip]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2ibs xmm22, dword ptr [rip]{1to4}
+
+// CHECK: vcvtps2ibs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x69,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2ibs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvtps2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x69,0x71,0x7f]
+ vcvtps2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvtps2ibs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x69,0x72,0x80]
+ vcvtps2ibs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+
+// CHECK: vcvtps2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtps2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtps2ibs ymm22, dword ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2ibs ymm22, dword ptr [rip]{1to8}
+
+// CHECK: vcvtps2ibs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x69,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2ibs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvtps2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x69,0x71,0x7f]
+ vcvtps2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvtps2ibs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x69,0x72,0x80]
+ vcvtps2ibs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+
+// CHECK: vcvtps2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x69,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtps2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x69,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtps2ibs zmm22, dword ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x69,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2ibs zmm22, dword ptr [rip]{1to16}
+
+// CHECK: vcvtps2ibs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x69,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2ibs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvtps2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x69,0x71,0x7f]
+ vcvtps2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvtps2ibs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x69,0x72,0x80]
+ vcvtps2ibs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+
+// CHECK: vcvtps2iubs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6b,0xf7]
+ vcvtps2iubs xmm22, xmm23
+
+// CHECK: vcvtps2iubs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6b,0xf7]
+ vcvtps2iubs xmm22 {k7}, xmm23
+
+// CHECK: vcvtps2iubs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6b,0xf7]
+ vcvtps2iubs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvtps2iubs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6b,0xf7]
+ vcvtps2iubs zmm22, zmm23
+
+// CHECK: vcvtps2iubs zmm22, zmm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6b,0xf7]
+ vcvtps2iubs zmm22, zmm23, {rn-sae}
+
+// CHECK: vcvtps2iubs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6b,0xf7]
+ vcvtps2iubs zmm22 {k7}, zmm23
+
+// CHECK: vcvtps2iubs zmm22 {k7} {z}, zmm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0xff,0x6b,0xf7]
+ vcvtps2iubs zmm22 {k7} {z}, zmm23, {rz-sae}
+
+// CHECK: vcvtps2iubs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6b,0xf7]
+ vcvtps2iubs ymm22, ymm23
+
+// CHECK: vcvtps2iubs ymm22, ymm23, {rn-sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6b,0xf7]
+ vcvtps2iubs ymm22, ymm23, {rn-sae}
+
+// CHECK: vcvtps2iubs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6b,0xf7]
+ vcvtps2iubs ymm22 {k7}, ymm23
+
+// CHECK: vcvtps2iubs ymm22 {k7} {z}, ymm23, {rz-sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0xff,0x6b,0xf7]
+ vcvtps2iubs ymm22 {k7} {z}, ymm23, {rz-sae}
+
+// CHECK: vcvtps2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtps2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtps2iubs xmm22, dword ptr [rip]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2iubs xmm22, dword ptr [rip]{1to4}
+
+// CHECK: vcvtps2iubs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6b,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvtps2iubs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvtps2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6b,0x71,0x7f]
+ vcvtps2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvtps2iubs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6b,0x72,0x80]
+ vcvtps2iubs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+
+// CHECK: vcvtps2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtps2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtps2iubs ymm22, dword ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2iubs ymm22, dword ptr [rip]{1to8}
+
+// CHECK: vcvtps2iubs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6b,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvtps2iubs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvtps2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6b,0x71,0x7f]
+ vcvtps2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvtps2iubs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6b,0x72,0x80]
+ vcvtps2iubs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+
+// CHECK: vcvtps2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6b,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvtps2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvtps2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6b,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvtps2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvtps2iubs zmm22, dword ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6b,0x35,0x00,0x00,0x00,0x00]
+ vcvtps2iubs zmm22, dword ptr [rip]{1to16}
+
+// CHECK: vcvtps2iubs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6b,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvtps2iubs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvtps2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6b,0x71,0x7f]
+ vcvtps2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvtps2iubs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6b,0x72,0x80]
+ vcvtps2iubs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+
+// CHECK: vcvttnebf162ibs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x68,0xf7]
+ vcvttnebf162ibs xmm22, xmm23
+
+// CHECK: vcvttnebf162ibs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x68,0xf7]
+ vcvttnebf162ibs xmm22 {k7}, xmm23
+
+// CHECK: vcvttnebf162ibs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x68,0xf7]
+ vcvttnebf162ibs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvttnebf162ibs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x68,0xf7]
+ vcvttnebf162ibs zmm22, zmm23
+
+// CHECK: vcvttnebf162ibs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x68,0xf7]
+ vcvttnebf162ibs zmm22 {k7}, zmm23
+
+// CHECK: vcvttnebf162ibs zmm22 {k7} {z}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x68,0xf7]
+ vcvttnebf162ibs zmm22 {k7} {z}, zmm23
+
+// CHECK: vcvttnebf162ibs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x68,0xf7]
+ vcvttnebf162ibs ymm22, ymm23
+
+// CHECK: vcvttnebf162ibs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x68,0xf7]
+ vcvttnebf162ibs ymm22 {k7}, ymm23
+
+// CHECK: vcvttnebf162ibs ymm22 {k7} {z}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x68,0xf7]
+ vcvttnebf162ibs ymm22 {k7} {z}, ymm23
+
+// CHECK: vcvttnebf162ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttnebf162ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttnebf162ibs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162ibs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvttnebf162ibs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162ibs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvttnebf162ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x68,0x71,0x7f]
+ vcvttnebf162ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvttnebf162ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x68,0x72,0x80]
+ vcvttnebf162ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvttnebf162ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttnebf162ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttnebf162ibs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162ibs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvttnebf162ibs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162ibs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvttnebf162ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x68,0x71,0x7f]
+ vcvttnebf162ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvttnebf162ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x68,0x72,0x80]
+ vcvttnebf162ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvttnebf162ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttnebf162ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttnebf162ibs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162ibs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvttnebf162ibs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162ibs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvttnebf162ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x68,0x71,0x7f]
+ vcvttnebf162ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvttnebf162ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x68,0x72,0x80]
+ vcvttnebf162ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvttnebf162iubs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6a,0xf7]
+ vcvttnebf162iubs xmm22, xmm23
+
+// CHECK: vcvttnebf162iubs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x0f,0x6a,0xf7]
+ vcvttnebf162iubs xmm22 {k7}, xmm23
+
+// CHECK: vcvttnebf162iubs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x8f,0x6a,0xf7]
+ vcvttnebf162iubs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvttnebf162iubs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6a,0xf7]
+ vcvttnebf162iubs zmm22, zmm23
+
+// CHECK: vcvttnebf162iubs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x4f,0x6a,0xf7]
+ vcvttnebf162iubs zmm22 {k7}, zmm23
+
+// CHECK: vcvttnebf162iubs zmm22 {k7} {z}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xcf,0x6a,0xf7]
+ vcvttnebf162iubs zmm22 {k7} {z}, zmm23
+
+// CHECK: vcvttnebf162iubs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6a,0xf7]
+ vcvttnebf162iubs ymm22, ymm23
+
+// CHECK: vcvttnebf162iubs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0x2f,0x6a,0xf7]
+ vcvttnebf162iubs ymm22 {k7}, ymm23
+
+// CHECK: vcvttnebf162iubs ymm22 {k7} {z}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7f,0xaf,0x6a,0xf7]
+ vcvttnebf162iubs ymm22 {k7} {z}, ymm23
+
+// CHECK: vcvttnebf162iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttnebf162iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttnebf162iubs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x18,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162iubs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvttnebf162iubs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttnebf162iubs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvttnebf162iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x8f,0x6a,0x71,0x7f]
+ vcvttnebf162iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvttnebf162iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x9f,0x6a,0x72,0x80]
+ vcvttnebf162iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvttnebf162iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttnebf162iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttnebf162iubs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x38,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162iubs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvttnebf162iubs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttnebf162iubs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvttnebf162iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xaf,0x6a,0x71,0x7f]
+ vcvttnebf162iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvttnebf162iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xbf,0x6a,0x72,0x80]
+ vcvttnebf162iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvttnebf162iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7f,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttnebf162iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttnebf162iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7f,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttnebf162iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttnebf162iubs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0x58,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttnebf162iubs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvttnebf162iubs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7f,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttnebf162iubs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvttnebf162iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7f,0xcf,0x6a,0x71,0x7f]
+ vcvttnebf162iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvttnebf162iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7f,0xdf,0x6a,0x72,0x80]
+ vcvttnebf162iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvttph2ibs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x68,0xf7]
+ vcvttph2ibs xmm22, xmm23
+
+// CHECK: vcvttph2ibs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x68,0xf7]
+ vcvttph2ibs xmm22 {k7}, xmm23
+
+// CHECK: vcvttph2ibs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x68,0xf7]
+ vcvttph2ibs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvttph2ibs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x68,0xf7]
+ vcvttph2ibs zmm22, zmm23
+
+// CHECK: vcvttph2ibs zmm22, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x68,0xf7]
+ vcvttph2ibs zmm22, zmm23, {sae}
+
+// CHECK: vcvttph2ibs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x68,0xf7]
+ vcvttph2ibs zmm22 {k7}, zmm23
+
+// CHECK: vcvttph2ibs zmm22 {k7} {z}, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x68,0xf7]
+ vcvttph2ibs zmm22 {k7} {z}, zmm23, {sae}
+
+// CHECK: vcvttph2ibs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x68,0xf7]
+ vcvttph2ibs ymm22, ymm23
+
+// CHECK: vcvttph2ibs ymm22, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x68,0xf7]
+ vcvttph2ibs ymm22, ymm23, {sae}
+
+// CHECK: vcvttph2ibs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x68,0xf7]
+ vcvttph2ibs ymm22 {k7}, ymm23
+
+// CHECK: vcvttph2ibs ymm22 {k7} {z}, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x68,0xf7]
+ vcvttph2ibs ymm22 {k7} {z}, ymm23, {sae}
+
+// CHECK: vcvttph2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttph2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttph2ibs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2ibs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvttph2ibs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2ibs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvttph2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x68,0x71,0x7f]
+ vcvttph2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvttph2ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x68,0x72,0x80]
+ vcvttph2ibs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvttph2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttph2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttph2ibs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2ibs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvttph2ibs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2ibs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvttph2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x68,0x71,0x7f]
+ vcvttph2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvttph2ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x68,0x72,0x80]
+ vcvttph2ibs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvttph2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttph2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttph2ibs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2ibs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvttph2ibs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2ibs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvttph2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x68,0x71,0x7f]
+ vcvttph2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvttph2ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x68,0x72,0x80]
+ vcvttph2ibs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvttph2iubs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6a,0xf7]
+ vcvttph2iubs xmm22, xmm23
+
+// CHECK: vcvttph2iubs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6a,0xf7]
+ vcvttph2iubs xmm22 {k7}, xmm23
+
+// CHECK: vcvttph2iubs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6a,0xf7]
+ vcvttph2iubs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvttph2iubs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6a,0xf7]
+ vcvttph2iubs zmm22, zmm23
+
+// CHECK: vcvttph2iubs zmm22, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6a,0xf7]
+ vcvttph2iubs zmm22, zmm23, {sae}
+
+// CHECK: vcvttph2iubs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6a,0xf7]
+ vcvttph2iubs zmm22 {k7}, zmm23
+
+// CHECK: vcvttph2iubs zmm22 {k7} {z}, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x6a,0xf7]
+ vcvttph2iubs zmm22 {k7} {z}, zmm23, {sae}
+
+// CHECK: vcvttph2iubs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6a,0xf7]
+ vcvttph2iubs ymm22, ymm23
+
+// CHECK: vcvttph2iubs ymm22, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6a,0xf7]
+ vcvttph2iubs ymm22, ymm23, {sae}
+
+// CHECK: vcvttph2iubs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6a,0xf7]
+ vcvttph2iubs ymm22 {k7}, ymm23
+
+// CHECK: vcvttph2iubs ymm22 {k7} {z}, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x6a,0xf7]
+ vcvttph2iubs ymm22 {k7} {z}, ymm23, {sae}
+
+// CHECK: vcvttph2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttph2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttph2iubs xmm22, word ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2iubs xmm22, word ptr [rip]{1to8}
+
+// CHECK: vcvttph2iubs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttph2iubs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvttph2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6a,0x71,0x7f]
+ vcvttph2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvttph2iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6a,0x72,0x80]
+ vcvttph2iubs xmm22 {k7} {z}, word ptr [rdx - 256]{1to8}
+
+// CHECK: vcvttph2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttph2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttph2iubs ymm22, word ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2iubs ymm22, word ptr [rip]{1to16}
+
+// CHECK: vcvttph2iubs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttph2iubs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvttph2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6a,0x71,0x7f]
+ vcvttph2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvttph2iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6a,0x72,0x80]
+ vcvttph2iubs ymm22 {k7} {z}, word ptr [rdx - 256]{1to16}
+
+// CHECK: vcvttph2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttph2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttph2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttph2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttph2iubs zmm22, word ptr [rip]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttph2iubs zmm22, word ptr [rip]{1to32}
+
+// CHECK: vcvttph2iubs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttph2iubs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvttph2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6a,0x71,0x7f]
+ vcvttph2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvttph2iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6a,0x72,0x80]
+ vcvttph2iubs zmm22 {k7} {z}, word ptr [rdx - 256]{1to32}
+
+// CHECK: vcvttps2ibs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x68,0xf7]
+ vcvttps2ibs xmm22, xmm23
+
+// CHECK: vcvttps2ibs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x68,0xf7]
+ vcvttps2ibs xmm22 {k7}, xmm23
+
+// CHECK: vcvttps2ibs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x68,0xf7]
+ vcvttps2ibs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvttps2ibs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x68,0xf7]
+ vcvttps2ibs zmm22, zmm23
+
+// CHECK: vcvttps2ibs zmm22, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x68,0xf7]
+ vcvttps2ibs zmm22, zmm23, {sae}
+
+// CHECK: vcvttps2ibs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x68,0xf7]
+ vcvttps2ibs zmm22 {k7}, zmm23
+
+// CHECK: vcvttps2ibs zmm22 {k7} {z}, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x68,0xf7]
+ vcvttps2ibs zmm22 {k7} {z}, zmm23, {sae}
+
+// CHECK: vcvttps2ibs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x68,0xf7]
+ vcvttps2ibs ymm22, ymm23
+
+// CHECK: vcvttps2ibs ymm22, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x68,0xf7]
+ vcvttps2ibs ymm22, ymm23, {sae}
+
+// CHECK: vcvttps2ibs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x68,0xf7]
+ vcvttps2ibs ymm22 {k7}, ymm23
+
+// CHECK: vcvttps2ibs ymm22 {k7} {z}, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x68,0xf7]
+ vcvttps2ibs ymm22 {k7} {z}, ymm23, {sae}
+
+// CHECK: vcvttps2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2ibs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttps2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2ibs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttps2ibs xmm22, dword ptr [rip]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2ibs xmm22, dword ptr [rip]{1to4}
+
+// CHECK: vcvttps2ibs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x68,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2ibs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvttps2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x68,0x71,0x7f]
+ vcvttps2ibs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvttps2ibs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x68,0x72,0x80]
+ vcvttps2ibs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+
+// CHECK: vcvttps2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2ibs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttps2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2ibs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttps2ibs ymm22, dword ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2ibs ymm22, dword ptr [rip]{1to8}
+
+// CHECK: vcvttps2ibs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x68,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2ibs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvttps2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x68,0x71,0x7f]
+ vcvttps2ibs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvttps2ibs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x68,0x72,0x80]
+ vcvttps2ibs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+
+// CHECK: vcvttps2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x68,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2ibs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttps2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x68,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2ibs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttps2ibs zmm22, dword ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x68,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2ibs zmm22, dword ptr [rip]{1to16}
+
+// CHECK: vcvttps2ibs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x68,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2ibs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvttps2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x68,0x71,0x7f]
+ vcvttps2ibs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvttps2ibs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x68,0x72,0x80]
+ vcvttps2ibs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+
+// CHECK: vcvttps2iubs xmm22, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6a,0xf7]
+ vcvttps2iubs xmm22, xmm23
+
+// CHECK: vcvttps2iubs xmm22 {k7}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6a,0xf7]
+ vcvttps2iubs xmm22 {k7}, xmm23
+
+// CHECK: vcvttps2iubs xmm22 {k7} {z}, xmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6a,0xf7]
+ vcvttps2iubs xmm22 {k7} {z}, xmm23
+
+// CHECK: vcvttps2iubs zmm22, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6a,0xf7]
+ vcvttps2iubs zmm22, zmm23
+
+// CHECK: vcvttps2iubs zmm22, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6a,0xf7]
+ vcvttps2iubs zmm22, zmm23, {sae}
+
+// CHECK: vcvttps2iubs zmm22 {k7}, zmm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6a,0xf7]
+ vcvttps2iubs zmm22 {k7}, zmm23
+
+// CHECK: vcvttps2iubs zmm22 {k7} {z}, zmm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x6a,0xf7]
+ vcvttps2iubs zmm22 {k7} {z}, zmm23, {sae}
+
+// CHECK: vcvttps2iubs ymm22, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6a,0xf7]
+ vcvttps2iubs ymm22, ymm23
+
+// CHECK: vcvttps2iubs ymm22, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6a,0xf7]
+ vcvttps2iubs ymm22, ymm23, {sae}
+
+// CHECK: vcvttps2iubs ymm22 {k7}, ymm23
+// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6a,0xf7]
+ vcvttps2iubs ymm22 {k7}, ymm23
+
+// CHECK: vcvttps2iubs ymm22 {k7} {z}, ymm23, {sae}
+// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x6a,0xf7]
+ vcvttps2iubs ymm22 {k7} {z}, ymm23, {sae}
+
+// CHECK: vcvttps2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2iubs xmm22, xmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttps2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2iubs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttps2iubs xmm22, dword ptr [rip]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2iubs xmm22, dword ptr [rip]{1to4}
+
+// CHECK: vcvttps2iubs xmm22, xmmword ptr [2*rbp - 512]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6a,0x34,0x6d,0x00,0xfe,0xff,0xff]
+ vcvttps2iubs xmm22, xmmword ptr [2*rbp - 512]
+
+// CHECK: vcvttps2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6a,0x71,0x7f]
+ vcvttps2iubs xmm22 {k7} {z}, xmmword ptr [rcx + 2032]
+
+// CHECK: vcvttps2iubs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6a,0x72,0x80]
+ vcvttps2iubs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4}
+
+// CHECK: vcvttps2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2iubs ymm22, ymmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttps2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2iubs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttps2iubs ymm22, dword ptr [rip]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2iubs ymm22, dword ptr [rip]{1to8}
+
+// CHECK: vcvttps2iubs ymm22, ymmword ptr [2*rbp - 1024]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6a,0x34,0x6d,0x00,0xfc,0xff,0xff]
+ vcvttps2iubs ymm22, ymmword ptr [2*rbp - 1024]
+
+// CHECK: vcvttps2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6a,0x71,0x7f]
+ vcvttps2iubs ymm22 {k7} {z}, ymmword ptr [rcx + 4064]
+
+// CHECK: vcvttps2iubs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6a,0x72,0x80]
+ vcvttps2iubs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8}
+
+// CHECK: vcvttps2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6a,0xb4,0xf5,0x00,0x00,0x00,0x10]
+ vcvttps2iubs zmm22, zmmword ptr [rbp + 8*r14 + 268435456]
+
+// CHECK: vcvttps2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6a,0xb4,0x80,0x23,0x01,0x00,0x00]
+ vcvttps2iubs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291]
+
+// CHECK: vcvttps2iubs zmm22, dword ptr [rip]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6a,0x35,0x00,0x00,0x00,0x00]
+ vcvttps2iubs zmm22, dword ptr [rip]{1to16}
+
+// CHECK: vcvttps2iubs zmm22, zmmword ptr [2*rbp - 2048]
+// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6a,0x34,0x6d,0x00,0xf8,0xff,0xff]
+ vcvttps2iubs zmm22, zmmword ptr [2*rbp - 2048]
+
+// CHECK: vcvttps2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6a,0x71,0x7f]
+ vcvttps2iubs zmm22 {k7} {z}, zmmword ptr [rcx + 8128]
+
+// CHECK: vcvttps2iubs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6a,0x72,0x80]
+ vcvttps2iubs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16}
+
diff --git a/llvm/test/TableGen/SubtargetFeatureUniqueNames.td b/llvm/test/TableGen/SubtargetFeatureUniqueNames.td
new file mode 100644
index 0000000..a322d25
--- /dev/null
+++ b/llvm/test/TableGen/SubtargetFeatureUniqueNames.td
@@ -0,0 +1,15 @@
+// Temporarily disable test due to non-deterministic order of error messages.
+// UNSUPPORTED: target={{.*}}
+
+// RUN: not llvm-tblgen -gen-subtarget -I %p/../../include %s 2>&1 | FileCheck %s -DFILE=%s
+// Verify that subtarget features with same names result in an error.
+
+include "llvm/Target/Target.td"
+
+def MyTarget : Target;
+
+def FeatureA : SubtargetFeature<"NameA", "", "", "">;
+
+// CHECK: [[FILE]]:[[@LINE+2]]:5: error: Feature `NameA` already defined.
+// CHECK: [[FILE]]:[[@LINE-3]]:5: note: Previous definition here.
+def FeatureB : SubtargetFeature<"NameA", "", "", "">;
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 3b7caef..523db92 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -1189,6 +1189,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::VCVTDQ2PSZ256rr, X86::VCVTDQ2PSZ256rm, 0},
{X86::VCVTDQ2PSZrr, X86::VCVTDQ2PSZrm, 0},
{X86::VCVTDQ2PSrr, X86::VCVTDQ2PSrm, 0},
+ {X86::VCVTNEBF162IBSZ128rr, X86::VCVTNEBF162IBSZ128rm, 0},
+ {X86::VCVTNEBF162IBSZ256rr, X86::VCVTNEBF162IBSZ256rm, 0},
+ {X86::VCVTNEBF162IBSZrr, X86::VCVTNEBF162IBSZrm, 0},
+ {X86::VCVTNEBF162IUBSZ128rr, X86::VCVTNEBF162IUBSZ128rm, 0},
+ {X86::VCVTNEBF162IUBSZ256rr, X86::VCVTNEBF162IUBSZ256rm, 0},
+ {X86::VCVTNEBF162IUBSZrr, X86::VCVTNEBF162IUBSZrm, 0},
{X86::VCVTNEPS2BF16Yrr, X86::VCVTNEPS2BF16Yrm, 0},
{X86::VCVTNEPS2BF16Z128rr, X86::VCVTNEPS2BF16Z128rm, 0},
{X86::VCVTNEPS2BF16Z256rr, X86::VCVTNEPS2BF16Z256rm, 0},
@@ -1219,6 +1225,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::VCVTPH2DQZ128rr, X86::VCVTPH2DQZ128rm, TB_NO_REVERSE},
{X86::VCVTPH2DQZ256rr, X86::VCVTPH2DQZ256rm, 0},
{X86::VCVTPH2DQZrr, X86::VCVTPH2DQZrm, 0},
+ {X86::VCVTPH2IBSZ128rr, X86::VCVTPH2IBSZ128rm, 0},
+ {X86::VCVTPH2IBSZ256rr, X86::VCVTPH2IBSZ256rm, 0},
+ {X86::VCVTPH2IBSZrr, X86::VCVTPH2IBSZrm, 0},
+ {X86::VCVTPH2IUBSZ128rr, X86::VCVTPH2IUBSZ128rm, 0},
+ {X86::VCVTPH2IUBSZ256rr, X86::VCVTPH2IUBSZ256rm, 0},
+ {X86::VCVTPH2IUBSZrr, X86::VCVTPH2IUBSZrm, 0},
{X86::VCVTPH2PDZ128rr, X86::VCVTPH2PDZ128rm, TB_NO_REVERSE},
{X86::VCVTPH2PDZ256rr, X86::VCVTPH2PDZ256rm, TB_NO_REVERSE},
{X86::VCVTPH2PDZrr, X86::VCVTPH2PDZrm, 0},
@@ -1250,6 +1262,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::VCVTPS2DQZ256rr, X86::VCVTPS2DQZ256rm, 0},
{X86::VCVTPS2DQZrr, X86::VCVTPS2DQZrm, 0},
{X86::VCVTPS2DQrr, X86::VCVTPS2DQrm, 0},
+ {X86::VCVTPS2IBSZ128rr, X86::VCVTPS2IBSZ128rm, 0},
+ {X86::VCVTPS2IBSZ256rr, X86::VCVTPS2IBSZ256rm, 0},
+ {X86::VCVTPS2IBSZrr, X86::VCVTPS2IBSZrm, 0},
+ {X86::VCVTPS2IUBSZ128rr, X86::VCVTPS2IUBSZ128rm, 0},
+ {X86::VCVTPS2IUBSZ256rr, X86::VCVTPS2IUBSZ256rm, 0},
+ {X86::VCVTPS2IUBSZrr, X86::VCVTPS2IUBSZrm, 0},
{X86::VCVTPS2PDYrr, X86::VCVTPS2PDYrm, 0},
{X86::VCVTPS2PDZ128rr, X86::VCVTPS2PDZ128rm, TB_NO_REVERSE},
{X86::VCVTPS2PDZ256rr, X86::VCVTPS2PDZ256rm, 0},
@@ -1300,6 +1318,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::VCVTSS2SIrr_Int, X86::VCVTSS2SIrm_Int, TB_NO_REVERSE},
{X86::VCVTSS2USI64Zrr_Int, X86::VCVTSS2USI64Zrm_Int, TB_NO_REVERSE},
{X86::VCVTSS2USIZrr_Int, X86::VCVTSS2USIZrm_Int, TB_NO_REVERSE},
+ {X86::VCVTTNEBF162IBSZ128rr, X86::VCVTTNEBF162IBSZ128rm, 0},
+ {X86::VCVTTNEBF162IBSZ256rr, X86::VCVTTNEBF162IBSZ256rm, 0},
+ {X86::VCVTTNEBF162IBSZrr, X86::VCVTTNEBF162IBSZrm, 0},
+ {X86::VCVTTNEBF162IUBSZ128rr, X86::VCVTTNEBF162IUBSZ128rm, 0},
+ {X86::VCVTTNEBF162IUBSZ256rr, X86::VCVTTNEBF162IUBSZ256rm, 0},
+ {X86::VCVTTNEBF162IUBSZrr, X86::VCVTTNEBF162IUBSZrm, 0},
{X86::VCVTTPD2DQYrr, X86::VCVTTPD2DQYrm, 0},
{X86::VCVTTPD2DQZ128rr, X86::VCVTTPD2DQZ128rm, 0},
{X86::VCVTTPD2DQZ256rr, X86::VCVTTPD2DQZ256rm, 0},
@@ -1317,6 +1341,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::VCVTTPH2DQZ128rr, X86::VCVTTPH2DQZ128rm, TB_NO_REVERSE},
{X86::VCVTTPH2DQZ256rr, X86::VCVTTPH2DQZ256rm, 0},
{X86::VCVTTPH2DQZrr, X86::VCVTTPH2DQZrm, 0},
+ {X86::VCVTTPH2IBSZ128rr, X86::VCVTTPH2IBSZ128rm, 0},
+ {X86::VCVTTPH2IBSZ256rr, X86::VCVTTPH2IBSZ256rm, 0},
+ {X86::VCVTTPH2IBSZrr, X86::VCVTTPH2IBSZrm, 0},
+ {X86::VCVTTPH2IUBSZ128rr, X86::VCVTTPH2IUBSZ128rm, 0},
+ {X86::VCVTTPH2IUBSZ256rr, X86::VCVTTPH2IUBSZ256rm, 0},
+ {X86::VCVTTPH2IUBSZrr, X86::VCVTTPH2IUBSZrm, 0},
{X86::VCVTTPH2QQZ128rr, X86::VCVTTPH2QQZ128rm, TB_NO_REVERSE},
{X86::VCVTTPH2QQZ256rr, X86::VCVTTPH2QQZ256rm, TB_NO_REVERSE},
{X86::VCVTTPH2QQZrr, X86::VCVTTPH2QQZrm, 0},
@@ -1337,6 +1367,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::VCVTTPS2DQZ256rr, X86::VCVTTPS2DQZ256rm, 0},
{X86::VCVTTPS2DQZrr, X86::VCVTTPS2DQZrm, 0},
{X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, 0},
+ {X86::VCVTTPS2IBSZ128rr, X86::VCVTTPS2IBSZ128rm, 0},
+ {X86::VCVTTPS2IBSZ256rr, X86::VCVTTPS2IBSZ256rm, 0},
+ {X86::VCVTTPS2IBSZrr, X86::VCVTTPS2IBSZrm, 0},
+ {X86::VCVTTPS2IUBSZ128rr, X86::VCVTTPS2IUBSZ128rm, 0},
+ {X86::VCVTTPS2IUBSZ256rr, X86::VCVTTPS2IUBSZ256rm, 0},
+ {X86::VCVTTPS2IUBSZrr, X86::VCVTTPS2IUBSZrm, 0},
{X86::VCVTTPS2QQZ128rr, X86::VCVTTPS2QQZ128rm, TB_NO_REVERSE},
{X86::VCVTTPS2QQZ256rr, X86::VCVTTPS2QQZ256rm, 0},
{X86::VCVTTPS2QQZrr, X86::VCVTTPS2QQZrm, 0},
@@ -2416,6 +2452,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VCVTNE2PS2BF16Z128rr, X86::VCVTNE2PS2BF16Z128rm, 0},
{X86::VCVTNE2PS2BF16Z256rr, X86::VCVTNE2PS2BF16Z256rm, 0},
{X86::VCVTNE2PS2BF16Zrr, X86::VCVTNE2PS2BF16Zrm, 0},
+ {X86::VCVTNEBF162IBSZ128rrkz, X86::VCVTNEBF162IBSZ128rmkz, 0},
+ {X86::VCVTNEBF162IBSZ256rrkz, X86::VCVTNEBF162IBSZ256rmkz, 0},
+ {X86::VCVTNEBF162IBSZrrkz, X86::VCVTNEBF162IBSZrmkz, 0},
+ {X86::VCVTNEBF162IUBSZ128rrkz, X86::VCVTNEBF162IUBSZ128rmkz, 0},
+ {X86::VCVTNEBF162IUBSZ256rrkz, X86::VCVTNEBF162IUBSZ256rmkz, 0},
+ {X86::VCVTNEBF162IUBSZrrkz, X86::VCVTNEBF162IUBSZrmkz, 0},
{X86::VCVTNEPS2BF16Z128rrkz, X86::VCVTNEPS2BF16Z128rmkz, 0},
{X86::VCVTNEPS2BF16Z256rrkz, X86::VCVTNEPS2BF16Z256rmkz, 0},
{X86::VCVTNEPS2BF16Zrrkz, X86::VCVTNEPS2BF16Zrmkz, 0},
@@ -2440,6 +2482,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VCVTPH2DQZ128rrkz, X86::VCVTPH2DQZ128rmkz, TB_NO_REVERSE},
{X86::VCVTPH2DQZ256rrkz, X86::VCVTPH2DQZ256rmkz, 0},
{X86::VCVTPH2DQZrrkz, X86::VCVTPH2DQZrmkz, 0},
+ {X86::VCVTPH2IBSZ128rrkz, X86::VCVTPH2IBSZ128rmkz, 0},
+ {X86::VCVTPH2IBSZ256rrkz, X86::VCVTPH2IBSZ256rmkz, 0},
+ {X86::VCVTPH2IBSZrrkz, X86::VCVTPH2IBSZrmkz, 0},
+ {X86::VCVTPH2IUBSZ128rrkz, X86::VCVTPH2IUBSZ128rmkz, 0},
+ {X86::VCVTPH2IUBSZ256rrkz, X86::VCVTPH2IUBSZ256rmkz, 0},
+ {X86::VCVTPH2IUBSZrrkz, X86::VCVTPH2IUBSZrmkz, 0},
{X86::VCVTPH2PDZ128rrkz, X86::VCVTPH2PDZ128rmkz, TB_NO_REVERSE},
{X86::VCVTPH2PDZ256rrkz, X86::VCVTPH2PDZ256rmkz, TB_NO_REVERSE},
{X86::VCVTPH2PDZrrkz, X86::VCVTPH2PDZrmkz, 0},
@@ -2467,6 +2515,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VCVTPS2DQZ128rrkz, X86::VCVTPS2DQZ128rmkz, 0},
{X86::VCVTPS2DQZ256rrkz, X86::VCVTPS2DQZ256rmkz, 0},
{X86::VCVTPS2DQZrrkz, X86::VCVTPS2DQZrmkz, 0},
+ {X86::VCVTPS2IBSZ128rrkz, X86::VCVTPS2IBSZ128rmkz, 0},
+ {X86::VCVTPS2IBSZ256rrkz, X86::VCVTPS2IBSZ256rmkz, 0},
+ {X86::VCVTPS2IBSZrrkz, X86::VCVTPS2IBSZrmkz, 0},
+ {X86::VCVTPS2IUBSZ128rrkz, X86::VCVTPS2IUBSZ128rmkz, 0},
+ {X86::VCVTPS2IUBSZ256rrkz, X86::VCVTPS2IUBSZ256rmkz, 0},
+ {X86::VCVTPS2IUBSZrrkz, X86::VCVTPS2IUBSZrmkz, 0},
{X86::VCVTPS2PDZ128rrkz, X86::VCVTPS2PDZ128rmkz, TB_NO_REVERSE},
{X86::VCVTPS2PDZ256rrkz, X86::VCVTPS2PDZ256rmkz, 0},
{X86::VCVTPS2PDZrrkz, X86::VCVTPS2PDZrmkz, 0},
@@ -2527,6 +2581,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VCVTSS2SDrr_Int, X86::VCVTSS2SDrm_Int, TB_NO_REVERSE},
{X86::VCVTSS2SHZrr, X86::VCVTSS2SHZrm, 0},
{X86::VCVTSS2SHZrr_Int, X86::VCVTSS2SHZrm_Int, TB_NO_REVERSE},
+ {X86::VCVTTNEBF162IBSZ128rrkz, X86::VCVTTNEBF162IBSZ128rmkz, 0},
+ {X86::VCVTTNEBF162IBSZ256rrkz, X86::VCVTTNEBF162IBSZ256rmkz, 0},
+ {X86::VCVTTNEBF162IBSZrrkz, X86::VCVTTNEBF162IBSZrmkz, 0},
+ {X86::VCVTTNEBF162IUBSZ128rrkz, X86::VCVTTNEBF162IUBSZ128rmkz, 0},
+ {X86::VCVTTNEBF162IUBSZ256rrkz, X86::VCVTTNEBF162IUBSZ256rmkz, 0},
+ {X86::VCVTTNEBF162IUBSZrrkz, X86::VCVTTNEBF162IUBSZrmkz, 0},
{X86::VCVTTPD2DQZ128rrkz, X86::VCVTTPD2DQZ128rmkz, 0},
{X86::VCVTTPD2DQZ256rrkz, X86::VCVTTPD2DQZ256rmkz, 0},
{X86::VCVTTPD2DQZrrkz, X86::VCVTTPD2DQZrmkz, 0},
@@ -2542,6 +2602,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VCVTTPH2DQZ128rrkz, X86::VCVTTPH2DQZ128rmkz, TB_NO_REVERSE},
{X86::VCVTTPH2DQZ256rrkz, X86::VCVTTPH2DQZ256rmkz, 0},
{X86::VCVTTPH2DQZrrkz, X86::VCVTTPH2DQZrmkz, 0},
+ {X86::VCVTTPH2IBSZ128rrkz, X86::VCVTTPH2IBSZ128rmkz, 0},
+ {X86::VCVTTPH2IBSZ256rrkz, X86::VCVTTPH2IBSZ256rmkz, 0},
+ {X86::VCVTTPH2IBSZrrkz, X86::VCVTTPH2IBSZrmkz, 0},
+ {X86::VCVTTPH2IUBSZ128rrkz, X86::VCVTTPH2IUBSZ128rmkz, 0},
+ {X86::VCVTTPH2IUBSZ256rrkz, X86::VCVTTPH2IUBSZ256rmkz, 0},
+ {X86::VCVTTPH2IUBSZrrkz, X86::VCVTTPH2IUBSZrmkz, 0},
{X86::VCVTTPH2QQZ128rrkz, X86::VCVTTPH2QQZ128rmkz, TB_NO_REVERSE},
{X86::VCVTTPH2QQZ256rrkz, X86::VCVTTPH2QQZ256rmkz, TB_NO_REVERSE},
{X86::VCVTTPH2QQZrrkz, X86::VCVTTPH2QQZrmkz, 0},
@@ -2560,6 +2626,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VCVTTPS2DQZ128rrkz, X86::VCVTTPS2DQZ128rmkz, 0},
{X86::VCVTTPS2DQZ256rrkz, X86::VCVTTPS2DQZ256rmkz, 0},
{X86::VCVTTPS2DQZrrkz, X86::VCVTTPS2DQZrmkz, 0},
+ {X86::VCVTTPS2IBSZ128rrkz, X86::VCVTTPS2IBSZ128rmkz, 0},
+ {X86::VCVTTPS2IBSZ256rrkz, X86::VCVTTPS2IBSZ256rmkz, 0},
+ {X86::VCVTTPS2IBSZrrkz, X86::VCVTTPS2IBSZrmkz, 0},
+ {X86::VCVTTPS2IUBSZ128rrkz, X86::VCVTTPS2IUBSZ128rmkz, 0},
+ {X86::VCVTTPS2IUBSZ256rrkz, X86::VCVTTPS2IUBSZ256rmkz, 0},
+ {X86::VCVTTPS2IUBSZrrkz, X86::VCVTTPS2IUBSZrmkz, 0},
{X86::VCVTTPS2QQZ128rrkz, X86::VCVTTPS2QQZ128rmkz, TB_NO_REVERSE},
{X86::VCVTTPS2QQZ256rrkz, X86::VCVTTPS2QQZ256rmkz, 0},
{X86::VCVTTPS2QQZrrkz, X86::VCVTTPS2QQZrmkz, 0},
@@ -4010,6 +4082,12 @@ static const X86FoldTableEntry Table3[] = {
{X86::VCVTNE2PS2BF16Z128rrkz, X86::VCVTNE2PS2BF16Z128rmkz, 0},
{X86::VCVTNE2PS2BF16Z256rrkz, X86::VCVTNE2PS2BF16Z256rmkz, 0},
{X86::VCVTNE2PS2BF16Zrrkz, X86::VCVTNE2PS2BF16Zrmkz, 0},
+ {X86::VCVTNEBF162IBSZ128rrk, X86::VCVTNEBF162IBSZ128rmk, 0},
+ {X86::VCVTNEBF162IBSZ256rrk, X86::VCVTNEBF162IBSZ256rmk, 0},
+ {X86::VCVTNEBF162IBSZrrk, X86::VCVTNEBF162IBSZrmk, 0},
+ {X86::VCVTNEBF162IUBSZ128rrk, X86::VCVTNEBF162IUBSZ128rmk, 0},
+ {X86::VCVTNEBF162IUBSZ256rrk, X86::VCVTNEBF162IUBSZ256rmk, 0},
+ {X86::VCVTNEBF162IUBSZrrk, X86::VCVTNEBF162IUBSZrmk, 0},
{X86::VCVTNEPS2BF16Z128rrk, X86::VCVTNEPS2BF16Z128rmk, 0},
{X86::VCVTNEPS2BF16Z256rrk, X86::VCVTNEPS2BF16Z256rmk, 0},
{X86::VCVTNEPS2BF16Zrrk, X86::VCVTNEPS2BF16Zrmk, 0},
@@ -4034,6 +4112,12 @@ static const X86FoldTableEntry Table3[] = {
{X86::VCVTPH2DQZ128rrk, X86::VCVTPH2DQZ128rmk, TB_NO_REVERSE},
{X86::VCVTPH2DQZ256rrk, X86::VCVTPH2DQZ256rmk, 0},
{X86::VCVTPH2DQZrrk, X86::VCVTPH2DQZrmk, 0},
+ {X86::VCVTPH2IBSZ128rrk, X86::VCVTPH2IBSZ128rmk, 0},
+ {X86::VCVTPH2IBSZ256rrk, X86::VCVTPH2IBSZ256rmk, 0},
+ {X86::VCVTPH2IBSZrrk, X86::VCVTPH2IBSZrmk, 0},
+ {X86::VCVTPH2IUBSZ128rrk, X86::VCVTPH2IUBSZ128rmk, 0},
+ {X86::VCVTPH2IUBSZ256rrk, X86::VCVTPH2IUBSZ256rmk, 0},
+ {X86::VCVTPH2IUBSZrrk, X86::VCVTPH2IUBSZrmk, 0},
{X86::VCVTPH2PDZ128rrk, X86::VCVTPH2PDZ128rmk, TB_NO_REVERSE},
{X86::VCVTPH2PDZ256rrk, X86::VCVTPH2PDZ256rmk, TB_NO_REVERSE},
{X86::VCVTPH2PDZrrk, X86::VCVTPH2PDZrmk, 0},
@@ -4061,6 +4145,12 @@ static const X86FoldTableEntry Table3[] = {
{X86::VCVTPS2DQZ128rrk, X86::VCVTPS2DQZ128rmk, 0},
{X86::VCVTPS2DQZ256rrk, X86::VCVTPS2DQZ256rmk, 0},
{X86::VCVTPS2DQZrrk, X86::VCVTPS2DQZrmk, 0},
+ {X86::VCVTPS2IBSZ128rrk, X86::VCVTPS2IBSZ128rmk, 0},
+ {X86::VCVTPS2IBSZ256rrk, X86::VCVTPS2IBSZ256rmk, 0},
+ {X86::VCVTPS2IBSZrrk, X86::VCVTPS2IBSZrmk, 0},
+ {X86::VCVTPS2IUBSZ128rrk, X86::VCVTPS2IUBSZ128rmk, 0},
+ {X86::VCVTPS2IUBSZ256rrk, X86::VCVTPS2IUBSZ256rmk, 0},
+ {X86::VCVTPS2IUBSZrrk, X86::VCVTPS2IUBSZrmk, 0},
{X86::VCVTPS2PDZ128rrk, X86::VCVTPS2PDZ128rmk, TB_NO_REVERSE},
{X86::VCVTPS2PDZ256rrk, X86::VCVTPS2PDZ256rmk, 0},
{X86::VCVTPS2PDZrrk, X86::VCVTPS2PDZrmk, 0},
@@ -4091,6 +4181,12 @@ static const X86FoldTableEntry Table3[] = {
{X86::VCVTSH2SSZrr_Intkz, X86::VCVTSH2SSZrm_Intkz, TB_NO_REVERSE},
{X86::VCVTSS2SDZrr_Intkz, X86::VCVTSS2SDZrm_Intkz, TB_NO_REVERSE},
{X86::VCVTSS2SHZrr_Intkz, X86::VCVTSS2SHZrm_Intkz, TB_NO_REVERSE},
+ {X86::VCVTTNEBF162IBSZ128rrk, X86::VCVTTNEBF162IBSZ128rmk, 0},
+ {X86::VCVTTNEBF162IBSZ256rrk, X86::VCVTTNEBF162IBSZ256rmk, 0},
+ {X86::VCVTTNEBF162IBSZrrk, X86::VCVTTNEBF162IBSZrmk, 0},
+ {X86::VCVTTNEBF162IUBSZ128rrk, X86::VCVTTNEBF162IUBSZ128rmk, 0},
+ {X86::VCVTTNEBF162IUBSZ256rrk, X86::VCVTTNEBF162IUBSZ256rmk, 0},
+ {X86::VCVTTNEBF162IUBSZrrk, X86::VCVTTNEBF162IUBSZrmk, 0},
{X86::VCVTTPD2DQZ128rrk, X86::VCVTTPD2DQZ128rmk, 0},
{X86::VCVTTPD2DQZ256rrk, X86::VCVTTPD2DQZ256rmk, 0},
{X86::VCVTTPD2DQZrrk, X86::VCVTTPD2DQZrmk, 0},
@@ -4106,6 +4202,12 @@ static const X86FoldTableEntry Table3[] = {
{X86::VCVTTPH2DQZ128rrk, X86::VCVTTPH2DQZ128rmk, TB_NO_REVERSE},
{X86::VCVTTPH2DQZ256rrk, X86::VCVTTPH2DQZ256rmk, 0},
{X86::VCVTTPH2DQZrrk, X86::VCVTTPH2DQZrmk, 0},
+ {X86::VCVTTPH2IBSZ128rrk, X86::VCVTTPH2IBSZ128rmk, 0},
+ {X86::VCVTTPH2IBSZ256rrk, X86::VCVTTPH2IBSZ256rmk, 0},
+ {X86::VCVTTPH2IBSZrrk, X86::VCVTTPH2IBSZrmk, 0},
+ {X86::VCVTTPH2IUBSZ128rrk, X86::VCVTTPH2IUBSZ128rmk, 0},
+ {X86::VCVTTPH2IUBSZ256rrk, X86::VCVTTPH2IUBSZ256rmk, 0},
+ {X86::VCVTTPH2IUBSZrrk, X86::VCVTTPH2IUBSZrmk, 0},
{X86::VCVTTPH2QQZ128rrk, X86::VCVTTPH2QQZ128rmk, TB_NO_REVERSE},
{X86::VCVTTPH2QQZ256rrk, X86::VCVTTPH2QQZ256rmk, TB_NO_REVERSE},
{X86::VCVTTPH2QQZrrk, X86::VCVTTPH2QQZrmk, 0},
@@ -4124,6 +4226,12 @@ static const X86FoldTableEntry Table3[] = {
{X86::VCVTTPS2DQZ128rrk, X86::VCVTTPS2DQZ128rmk, 0},
{X86::VCVTTPS2DQZ256rrk, X86::VCVTTPS2DQZ256rmk, 0},
{X86::VCVTTPS2DQZrrk, X86::VCVTTPS2DQZrmk, 0},
+ {X86::VCVTTPS2IBSZ128rrk, X86::VCVTTPS2IBSZ128rmk, 0},
+ {X86::VCVTTPS2IBSZ256rrk, X86::VCVTTPS2IBSZ256rmk, 0},
+ {X86::VCVTTPS2IBSZrrk, X86::VCVTTPS2IBSZrmk, 0},
+ {X86::VCVTTPS2IUBSZ128rrk, X86::VCVTTPS2IUBSZ128rmk, 0},
+ {X86::VCVTTPS2IUBSZ256rrk, X86::VCVTTPS2IUBSZ256rmk, 0},
+ {X86::VCVTTPS2IUBSZrrk, X86::VCVTTPS2IUBSZrmk, 0},
{X86::VCVTTPS2QQZ128rrk, X86::VCVTTPS2QQZ128rmk, TB_NO_REVERSE},
{X86::VCVTTPS2QQZ256rrk, X86::VCVTTPS2QQZ256rmk, 0},
{X86::VCVTTPS2QQZrrk, X86::VCVTTPS2QQZrmk, 0},
@@ -6842,6 +6950,12 @@ static const X86FoldTableEntry BroadcastTable1[] = {
{X86::VCVTDQ2PSZ128rr, X86::VCVTDQ2PSZ128rmb, TB_BCAST_D},
{X86::VCVTDQ2PSZ256rr, X86::VCVTDQ2PSZ256rmb, TB_BCAST_D},
{X86::VCVTDQ2PSZrr, X86::VCVTDQ2PSZrmb, TB_BCAST_D},
+ {X86::VCVTNEBF162IBSZ128rr, X86::VCVTNEBF162IBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTNEBF162IBSZ256rr, X86::VCVTNEBF162IBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTNEBF162IBSZrr, X86::VCVTNEBF162IBSZrmb, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZ128rr, X86::VCVTNEBF162IUBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZ256rr, X86::VCVTNEBF162IUBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZrr, X86::VCVTNEBF162IUBSZrmb, TB_BCAST_SH},
{X86::VCVTNEPS2BF16Z128rr, X86::VCVTNEPS2BF16Z128rmb, TB_BCAST_SS},
{X86::VCVTNEPS2BF16Z256rr, X86::VCVTNEPS2BF16Z256rmb, TB_BCAST_SS},
{X86::VCVTNEPS2BF16Zrr, X86::VCVTNEPS2BF16Zrmb, TB_BCAST_SS},
@@ -6866,6 +6980,12 @@ static const X86FoldTableEntry BroadcastTable1[] = {
{X86::VCVTPH2DQZ128rr, X86::VCVTPH2DQZ128rmb, TB_BCAST_SH},
{X86::VCVTPH2DQZ256rr, X86::VCVTPH2DQZ256rmb, TB_BCAST_SH},
{X86::VCVTPH2DQZrr, X86::VCVTPH2DQZrmb, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZ128rr, X86::VCVTPH2IBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZ256rr, X86::VCVTPH2IBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZrr, X86::VCVTPH2IBSZrmb, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZ128rr, X86::VCVTPH2IUBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZ256rr, X86::VCVTPH2IUBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZrr, X86::VCVTPH2IUBSZrmb, TB_BCAST_SH},
{X86::VCVTPH2PDZ128rr, X86::VCVTPH2PDZ128rmb, TB_BCAST_SH},
{X86::VCVTPH2PDZ256rr, X86::VCVTPH2PDZ256rmb, TB_BCAST_SH},
{X86::VCVTPH2PDZrr, X86::VCVTPH2PDZrmb, TB_BCAST_SH},
@@ -6890,6 +7010,12 @@ static const X86FoldTableEntry BroadcastTable1[] = {
{X86::VCVTPS2DQZ128rr, X86::VCVTPS2DQZ128rmb, TB_BCAST_SS},
{X86::VCVTPS2DQZ256rr, X86::VCVTPS2DQZ256rmb, TB_BCAST_SS},
{X86::VCVTPS2DQZrr, X86::VCVTPS2DQZrmb, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZ128rr, X86::VCVTPS2IBSZ128rmb, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZ256rr, X86::VCVTPS2IBSZ256rmb, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZrr, X86::VCVTPS2IBSZrmb, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZ128rr, X86::VCVTPS2IUBSZ128rmb, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZ256rr, X86::VCVTPS2IUBSZ256rmb, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZrr, X86::VCVTPS2IUBSZrmb, TB_BCAST_SS},
{X86::VCVTPS2PDZ128rr, X86::VCVTPS2PDZ128rmb, TB_BCAST_SS},
{X86::VCVTPS2PDZ256rr, X86::VCVTPS2PDZ256rmb, TB_BCAST_SS},
{X86::VCVTPS2PDZrr, X86::VCVTPS2PDZrmb, TB_BCAST_SS},
@@ -6914,6 +7040,12 @@ static const X86FoldTableEntry BroadcastTable1[] = {
{X86::VCVTQQ2PSZ128rr, X86::VCVTQQ2PSZ128rmb, TB_BCAST_Q},
{X86::VCVTQQ2PSZ256rr, X86::VCVTQQ2PSZ256rmb, TB_BCAST_Q},
{X86::VCVTQQ2PSZrr, X86::VCVTQQ2PSZrmb, TB_BCAST_Q},
+ {X86::VCVTTNEBF162IBSZ128rr, X86::VCVTTNEBF162IBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IBSZ256rr, X86::VCVTTNEBF162IBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IBSZrr, X86::VCVTTNEBF162IBSZrmb, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZ128rr, X86::VCVTTNEBF162IUBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZ256rr, X86::VCVTTNEBF162IUBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZrr, X86::VCVTTNEBF162IUBSZrmb, TB_BCAST_SH},
{X86::VCVTTPD2DQZ128rr, X86::VCVTTPD2DQZ128rmb, TB_BCAST_SD},
{X86::VCVTTPD2DQZ256rr, X86::VCVTTPD2DQZ256rmb, TB_BCAST_SD},
{X86::VCVTTPD2DQZrr, X86::VCVTTPD2DQZrmb, TB_BCAST_SD},
@@ -6929,6 +7061,12 @@ static const X86FoldTableEntry BroadcastTable1[] = {
{X86::VCVTTPH2DQZ128rr, X86::VCVTTPH2DQZ128rmb, TB_BCAST_SH},
{X86::VCVTTPH2DQZ256rr, X86::VCVTTPH2DQZ256rmb, TB_BCAST_SH},
{X86::VCVTTPH2DQZrr, X86::VCVTTPH2DQZrmb, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZ128rr, X86::VCVTTPH2IBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZ256rr, X86::VCVTTPH2IBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZrr, X86::VCVTTPH2IBSZrmb, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZ128rr, X86::VCVTTPH2IUBSZ128rmb, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZ256rr, X86::VCVTTPH2IUBSZ256rmb, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZrr, X86::VCVTTPH2IUBSZrmb, TB_BCAST_SH},
{X86::VCVTTPH2QQZ128rr, X86::VCVTTPH2QQZ128rmb, TB_BCAST_SH},
{X86::VCVTTPH2QQZ256rr, X86::VCVTTPH2QQZ256rmb, TB_BCAST_SH},
{X86::VCVTTPH2QQZrr, X86::VCVTTPH2QQZrmb, TB_BCAST_SH},
@@ -6947,6 +7085,12 @@ static const X86FoldTableEntry BroadcastTable1[] = {
{X86::VCVTTPS2DQZ128rr, X86::VCVTTPS2DQZ128rmb, TB_BCAST_SS},
{X86::VCVTTPS2DQZ256rr, X86::VCVTTPS2DQZ256rmb, TB_BCAST_SS},
{X86::VCVTTPS2DQZrr, X86::VCVTTPS2DQZrmb, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZ128rr, X86::VCVTTPS2IBSZ128rmb, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZ256rr, X86::VCVTTPS2IBSZ256rmb, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZrr, X86::VCVTTPS2IBSZrmb, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZ128rr, X86::VCVTTPS2IUBSZ128rmb, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZ256rr, X86::VCVTTPS2IUBSZ256rmb, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZrr, X86::VCVTTPS2IUBSZrmb, TB_BCAST_SS},
{X86::VCVTTPS2QQZ128rr, X86::VCVTTPS2QQZ128rmb, TB_BCAST_SS},
{X86::VCVTTPS2QQZ256rr, X86::VCVTTPS2QQZ256rmb, TB_BCAST_SS},
{X86::VCVTTPS2QQZrr, X86::VCVTTPS2QQZrmb, TB_BCAST_SS},
@@ -7182,6 +7326,12 @@ static const X86FoldTableEntry BroadcastTable2[] = {
{X86::VCVTNE2PS2BF16Z128rr, X86::VCVTNE2PS2BF16Z128rmb, TB_BCAST_SS},
{X86::VCVTNE2PS2BF16Z256rr, X86::VCVTNE2PS2BF16Z256rmb, TB_BCAST_SS},
{X86::VCVTNE2PS2BF16Zrr, X86::VCVTNE2PS2BF16Zrmb, TB_BCAST_SS},
+ {X86::VCVTNEBF162IBSZ128rrkz, X86::VCVTNEBF162IBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTNEBF162IBSZ256rrkz, X86::VCVTNEBF162IBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTNEBF162IBSZrrkz, X86::VCVTNEBF162IBSZrmbkz, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZ128rrkz, X86::VCVTNEBF162IUBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZ256rrkz, X86::VCVTNEBF162IUBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZrrkz, X86::VCVTNEBF162IUBSZrmbkz, TB_BCAST_SH},
{X86::VCVTNEPS2BF16Z128rrkz, X86::VCVTNEPS2BF16Z128rmbkz, TB_BCAST_SS},
{X86::VCVTNEPS2BF16Z256rrkz, X86::VCVTNEPS2BF16Z256rmbkz, TB_BCAST_SS},
{X86::VCVTNEPS2BF16Zrrkz, X86::VCVTNEPS2BF16Zrmbkz, TB_BCAST_SS},
@@ -7206,6 +7356,12 @@ static const X86FoldTableEntry BroadcastTable2[] = {
{X86::VCVTPH2DQZ128rrkz, X86::VCVTPH2DQZ128rmbkz, TB_BCAST_SH},
{X86::VCVTPH2DQZ256rrkz, X86::VCVTPH2DQZ256rmbkz, TB_BCAST_SH},
{X86::VCVTPH2DQZrrkz, X86::VCVTPH2DQZrmbkz, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZ128rrkz, X86::VCVTPH2IBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZ256rrkz, X86::VCVTPH2IBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZrrkz, X86::VCVTPH2IBSZrmbkz, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZ128rrkz, X86::VCVTPH2IUBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZ256rrkz, X86::VCVTPH2IUBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZrrkz, X86::VCVTPH2IUBSZrmbkz, TB_BCAST_SH},
{X86::VCVTPH2PDZ128rrkz, X86::VCVTPH2PDZ128rmbkz, TB_BCAST_SH},
{X86::VCVTPH2PDZ256rrkz, X86::VCVTPH2PDZ256rmbkz, TB_BCAST_SH},
{X86::VCVTPH2PDZrrkz, X86::VCVTPH2PDZrmbkz, TB_BCAST_SH},
@@ -7230,6 +7386,12 @@ static const X86FoldTableEntry BroadcastTable2[] = {
{X86::VCVTPS2DQZ128rrkz, X86::VCVTPS2DQZ128rmbkz, TB_BCAST_SS},
{X86::VCVTPS2DQZ256rrkz, X86::VCVTPS2DQZ256rmbkz, TB_BCAST_SS},
{X86::VCVTPS2DQZrrkz, X86::VCVTPS2DQZrmbkz, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZ128rrkz, X86::VCVTPS2IBSZ128rmbkz, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZ256rrkz, X86::VCVTPS2IBSZ256rmbkz, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZrrkz, X86::VCVTPS2IBSZrmbkz, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZ128rrkz, X86::VCVTPS2IUBSZ128rmbkz, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZ256rrkz, X86::VCVTPS2IUBSZ256rmbkz, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZrrkz, X86::VCVTPS2IUBSZrmbkz, TB_BCAST_SS},
{X86::VCVTPS2PDZ128rrkz, X86::VCVTPS2PDZ128rmbkz, TB_BCAST_SS},
{X86::VCVTPS2PDZ256rrkz, X86::VCVTPS2PDZ256rmbkz, TB_BCAST_SS},
{X86::VCVTPS2PDZrrkz, X86::VCVTPS2PDZrmbkz, TB_BCAST_SS},
@@ -7254,6 +7416,12 @@ static const X86FoldTableEntry BroadcastTable2[] = {
{X86::VCVTQQ2PSZ128rrkz, X86::VCVTQQ2PSZ128rmbkz, TB_BCAST_Q},
{X86::VCVTQQ2PSZ256rrkz, X86::VCVTQQ2PSZ256rmbkz, TB_BCAST_Q},
{X86::VCVTQQ2PSZrrkz, X86::VCVTQQ2PSZrmbkz, TB_BCAST_Q},
+ {X86::VCVTTNEBF162IBSZ128rrkz, X86::VCVTTNEBF162IBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IBSZ256rrkz, X86::VCVTTNEBF162IBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IBSZrrkz, X86::VCVTTNEBF162IBSZrmbkz, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZ128rrkz, X86::VCVTTNEBF162IUBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZ256rrkz, X86::VCVTTNEBF162IUBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZrrkz, X86::VCVTTNEBF162IUBSZrmbkz, TB_BCAST_SH},
{X86::VCVTTPD2DQZ128rrkz, X86::VCVTTPD2DQZ128rmbkz, TB_BCAST_SD},
{X86::VCVTTPD2DQZ256rrkz, X86::VCVTTPD2DQZ256rmbkz, TB_BCAST_SD},
{X86::VCVTTPD2DQZrrkz, X86::VCVTTPD2DQZrmbkz, TB_BCAST_SD},
@@ -7269,6 +7437,12 @@ static const X86FoldTableEntry BroadcastTable2[] = {
{X86::VCVTTPH2DQZ128rrkz, X86::VCVTTPH2DQZ128rmbkz, TB_BCAST_SH},
{X86::VCVTTPH2DQZ256rrkz, X86::VCVTTPH2DQZ256rmbkz, TB_BCAST_SH},
{X86::VCVTTPH2DQZrrkz, X86::VCVTTPH2DQZrmbkz, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZ128rrkz, X86::VCVTTPH2IBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZ256rrkz, X86::VCVTTPH2IBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZrrkz, X86::VCVTTPH2IBSZrmbkz, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZ128rrkz, X86::VCVTTPH2IUBSZ128rmbkz, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZ256rrkz, X86::VCVTTPH2IUBSZ256rmbkz, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZrrkz, X86::VCVTTPH2IUBSZrmbkz, TB_BCAST_SH},
{X86::VCVTTPH2QQZ128rrkz, X86::VCVTTPH2QQZ128rmbkz, TB_BCAST_SH},
{X86::VCVTTPH2QQZ256rrkz, X86::VCVTTPH2QQZ256rmbkz, TB_BCAST_SH},
{X86::VCVTTPH2QQZrrkz, X86::VCVTTPH2QQZrmbkz, TB_BCAST_SH},
@@ -7287,6 +7461,12 @@ static const X86FoldTableEntry BroadcastTable2[] = {
{X86::VCVTTPS2DQZ128rrkz, X86::VCVTTPS2DQZ128rmbkz, TB_BCAST_SS},
{X86::VCVTTPS2DQZ256rrkz, X86::VCVTTPS2DQZ256rmbkz, TB_BCAST_SS},
{X86::VCVTTPS2DQZrrkz, X86::VCVTTPS2DQZrmbkz, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZ128rrkz, X86::VCVTTPS2IBSZ128rmbkz, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZ256rrkz, X86::VCVTTPS2IBSZ256rmbkz, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZrrkz, X86::VCVTTPS2IBSZrmbkz, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZ128rrkz, X86::VCVTTPS2IUBSZ128rmbkz, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZ256rrkz, X86::VCVTTPS2IUBSZ256rmbkz, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZrrkz, X86::VCVTTPS2IUBSZrmbkz, TB_BCAST_SS},
{X86::VCVTTPS2QQZ128rrkz, X86::VCVTTPS2QQZ128rmbkz, TB_BCAST_SS},
{X86::VCVTTPS2QQZ256rrkz, X86::VCVTTPS2QQZ256rmbkz, TB_BCAST_SS},
{X86::VCVTTPS2QQZrrkz, X86::VCVTTPS2QQZrmbkz, TB_BCAST_SS},
@@ -7859,6 +8039,12 @@ static const X86FoldTableEntry BroadcastTable3[] = {
{X86::VCVTNE2PS2BF16Z128rrkz, X86::VCVTNE2PS2BF16Z128rmbkz, TB_BCAST_SS},
{X86::VCVTNE2PS2BF16Z256rrkz, X86::VCVTNE2PS2BF16Z256rmbkz, TB_BCAST_SS},
{X86::VCVTNE2PS2BF16Zrrkz, X86::VCVTNE2PS2BF16Zrmbkz, TB_BCAST_SS},
+ {X86::VCVTNEBF162IBSZ128rrk, X86::VCVTNEBF162IBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTNEBF162IBSZ256rrk, X86::VCVTNEBF162IBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTNEBF162IBSZrrk, X86::VCVTNEBF162IBSZrmbk, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZ128rrk, X86::VCVTNEBF162IUBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZ256rrk, X86::VCVTNEBF162IUBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTNEBF162IUBSZrrk, X86::VCVTNEBF162IUBSZrmbk, TB_BCAST_SH},
{X86::VCVTNEPS2BF16Z128rrk, X86::VCVTNEPS2BF16Z128rmbk, TB_BCAST_SS},
{X86::VCVTNEPS2BF16Z256rrk, X86::VCVTNEPS2BF16Z256rmbk, TB_BCAST_SS},
{X86::VCVTNEPS2BF16Zrrk, X86::VCVTNEPS2BF16Zrmbk, TB_BCAST_SS},
@@ -7883,6 +8069,12 @@ static const X86FoldTableEntry BroadcastTable3[] = {
{X86::VCVTPH2DQZ128rrk, X86::VCVTPH2DQZ128rmbk, TB_BCAST_SH},
{X86::VCVTPH2DQZ256rrk, X86::VCVTPH2DQZ256rmbk, TB_BCAST_SH},
{X86::VCVTPH2DQZrrk, X86::VCVTPH2DQZrmbk, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZ128rrk, X86::VCVTPH2IBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZ256rrk, X86::VCVTPH2IBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTPH2IBSZrrk, X86::VCVTPH2IBSZrmbk, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZ128rrk, X86::VCVTPH2IUBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZ256rrk, X86::VCVTPH2IUBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTPH2IUBSZrrk, X86::VCVTPH2IUBSZrmbk, TB_BCAST_SH},
{X86::VCVTPH2PDZ128rrk, X86::VCVTPH2PDZ128rmbk, TB_BCAST_SH},
{X86::VCVTPH2PDZ256rrk, X86::VCVTPH2PDZ256rmbk, TB_BCAST_SH},
{X86::VCVTPH2PDZrrk, X86::VCVTPH2PDZrmbk, TB_BCAST_SH},
@@ -7907,6 +8099,12 @@ static const X86FoldTableEntry BroadcastTable3[] = {
{X86::VCVTPS2DQZ128rrk, X86::VCVTPS2DQZ128rmbk, TB_BCAST_SS},
{X86::VCVTPS2DQZ256rrk, X86::VCVTPS2DQZ256rmbk, TB_BCAST_SS},
{X86::VCVTPS2DQZrrk, X86::VCVTPS2DQZrmbk, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZ128rrk, X86::VCVTPS2IBSZ128rmbk, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZ256rrk, X86::VCVTPS2IBSZ256rmbk, TB_BCAST_SS},
+ {X86::VCVTPS2IBSZrrk, X86::VCVTPS2IBSZrmbk, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZ128rrk, X86::VCVTPS2IUBSZ128rmbk, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZ256rrk, X86::VCVTPS2IUBSZ256rmbk, TB_BCAST_SS},
+ {X86::VCVTPS2IUBSZrrk, X86::VCVTPS2IUBSZrmbk, TB_BCAST_SS},
{X86::VCVTPS2PDZ128rrk, X86::VCVTPS2PDZ128rmbk, TB_BCAST_SS},
{X86::VCVTPS2PDZ256rrk, X86::VCVTPS2PDZ256rmbk, TB_BCAST_SS},
{X86::VCVTPS2PDZrrk, X86::VCVTPS2PDZrmbk, TB_BCAST_SS},
@@ -7931,6 +8129,12 @@ static const X86FoldTableEntry BroadcastTable3[] = {
{X86::VCVTQQ2PSZ128rrk, X86::VCVTQQ2PSZ128rmbk, TB_BCAST_Q},
{X86::VCVTQQ2PSZ256rrk, X86::VCVTQQ2PSZ256rmbk, TB_BCAST_Q},
{X86::VCVTQQ2PSZrrk, X86::VCVTQQ2PSZrmbk, TB_BCAST_Q},
+ {X86::VCVTTNEBF162IBSZ128rrk, X86::VCVTTNEBF162IBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IBSZ256rrk, X86::VCVTTNEBF162IBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IBSZrrk, X86::VCVTTNEBF162IBSZrmbk, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZ128rrk, X86::VCVTTNEBF162IUBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZ256rrk, X86::VCVTTNEBF162IUBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTTNEBF162IUBSZrrk, X86::VCVTTNEBF162IUBSZrmbk, TB_BCAST_SH},
{X86::VCVTTPD2DQZ128rrk, X86::VCVTTPD2DQZ128rmbk, TB_BCAST_SD},
{X86::VCVTTPD2DQZ256rrk, X86::VCVTTPD2DQZ256rmbk, TB_BCAST_SD},
{X86::VCVTTPD2DQZrrk, X86::VCVTTPD2DQZrmbk, TB_BCAST_SD},
@@ -7946,6 +8150,12 @@ static const X86FoldTableEntry BroadcastTable3[] = {
{X86::VCVTTPH2DQZ128rrk, X86::VCVTTPH2DQZ128rmbk, TB_BCAST_SH},
{X86::VCVTTPH2DQZ256rrk, X86::VCVTTPH2DQZ256rmbk, TB_BCAST_SH},
{X86::VCVTTPH2DQZrrk, X86::VCVTTPH2DQZrmbk, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZ128rrk, X86::VCVTTPH2IBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZ256rrk, X86::VCVTTPH2IBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTTPH2IBSZrrk, X86::VCVTTPH2IBSZrmbk, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZ128rrk, X86::VCVTTPH2IUBSZ128rmbk, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZ256rrk, X86::VCVTTPH2IUBSZ256rmbk, TB_BCAST_SH},
+ {X86::VCVTTPH2IUBSZrrk, X86::VCVTTPH2IUBSZrmbk, TB_BCAST_SH},
{X86::VCVTTPH2QQZ128rrk, X86::VCVTTPH2QQZ128rmbk, TB_BCAST_SH},
{X86::VCVTTPH2QQZ256rrk, X86::VCVTTPH2QQZ256rmbk, TB_BCAST_SH},
{X86::VCVTTPH2QQZrrk, X86::VCVTTPH2QQZrmbk, TB_BCAST_SH},
@@ -7964,6 +8174,12 @@ static const X86FoldTableEntry BroadcastTable3[] = {
{X86::VCVTTPS2DQZ128rrk, X86::VCVTTPS2DQZ128rmbk, TB_BCAST_SS},
{X86::VCVTTPS2DQZ256rrk, X86::VCVTTPS2DQZ256rmbk, TB_BCAST_SS},
{X86::VCVTTPS2DQZrrk, X86::VCVTTPS2DQZrmbk, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZ128rrk, X86::VCVTTPS2IBSZ128rmbk, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZ256rrk, X86::VCVTTPS2IBSZ256rmbk, TB_BCAST_SS},
+ {X86::VCVTTPS2IBSZrrk, X86::VCVTTPS2IBSZrmbk, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZ128rrk, X86::VCVTTPS2IUBSZ128rmbk, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZ256rrk, X86::VCVTTPS2IUBSZ256rmbk, TB_BCAST_SS},
+ {X86::VCVTTPS2IUBSZrrk, X86::VCVTTPS2IUBSZrmbk, TB_BCAST_SS},
{X86::VCVTTPS2QQZ128rrk, X86::VCVTTPS2QQZ128rmbk, TB_BCAST_SS},
{X86::VCVTTPS2QQZ256rrk, X86::VCVTTPS2QQZ256rmbk, TB_BCAST_SS},
{X86::VCVTTPS2QQZrrk, X86::VCVTTPS2QQZrmbk, TB_BCAST_SS},
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
index 2a5b3e9..9143d44 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
@@ -451,15 +451,15 @@ define i32 @malloc_in_loop(i32 %arg) {
; CHECK-LABEL: define {{[^@]+}}@malloc_in_loop
; CHECK-SAME: (i32 [[ARG:%.*]]) {
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[I1:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: [[I11:%.*]] = alloca i8, i32 0, align 8
-; CHECK-NEXT: store i32 [[ARG]], ptr [[I]], align 4
+; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
+; CHECK-NEXT: [[I1:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: [[I11:%.*]] = alloca i8, i32 0, align 8, addrspace(5)
+; CHECK-NEXT: store i32 [[ARG]], ptr addrspace(5) [[I]], align 4
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[I3:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT: [[I3:%.*]] = load i32, ptr addrspace(5) [[I]], align 4
; CHECK-NEXT: [[I4:%.*]] = add nsw i32 [[I3]], -1
-; CHECK-NEXT: store i32 [[I4]], ptr [[I]], align 4
+; CHECK-NEXT: store i32 [[I4]], ptr addrspace(5) [[I]], align 4
; CHECK-NEXT: [[I5:%.*]] = icmp sgt i32 [[I4]], 0
; CHECK-NEXT: br i1 [[I5]], label [[BB6:%.*]], label [[BB9:%.*]]
; CHECK: bb6:
@@ -469,15 +469,15 @@ define i32 @malloc_in_loop(i32 %arg) {
; CHECK-NEXT: ret i32 5
;
bb:
- %i = alloca i32, align 4
- %i1 = alloca ptr, align 8
- store i32 %arg, ptr %i, align 4
+ %i = alloca i32, align 4, addrspace(5)
+ %i1 = alloca ptr, align 8, addrspace(5)
+ store i32 %arg, ptr addrspace(5) %i, align 4
br label %bb2
bb2:
- %i3 = load i32, ptr %i, align 4
+ %i3 = load i32, ptr addrspace(5) %i, align 4
%i4 = add nsw i32 %i3, -1
- store i32 %i4, ptr %i, align 4
+ store i32 %i4, ptr addrspace(5) %i, align 4
%i5 = icmp sgt i32 %i4, 0
br i1 %i5, label %bb6, label %bb9
diff --git a/llvm/test/Transforms/Attributor/value-simplify-gpu.ll b/llvm/test/Transforms/Attributor/value-simplify-gpu.ll
index 04ba6e2..dc2d92fa 100644
--- a/llvm/test/Transforms/Attributor/value-simplify-gpu.ll
+++ b/llvm/test/Transforms/Attributor/value-simplify-gpu.ll
@@ -205,8 +205,9 @@ define internal void @level1(i32 %C) {
; TUNIT-LABEL: define {{[^@]+}}@level1
; TUNIT-SAME: (i32 [[C:%.*]]) #[[ATTR1]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[LOCAL:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @level2all_early(ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR4]]
+; TUNIT-NEXT: [[LOCAL_ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
+; TUNIT-NEXT: [[LOCAL:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_ALLOCA]] to ptr
+; TUNIT-NEXT: call void @level2all_early(ptr nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR4]]
; TUNIT-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[C]], 0
; TUNIT-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; TUNIT: if.then:
@@ -216,29 +217,31 @@ define internal void @level1(i32 %C) {
; TUNIT-NEXT: call void @level2b() #[[ATTR5]]
; TUNIT-NEXT: br label [[IF_END]]
; TUNIT: if.end:
-; TUNIT-NEXT: call void @level2all_late(ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR6]]
+; TUNIT-NEXT: call void @level2all_late(ptr nocapture nofree noundef writeonly align 4 dereferenceable_or_null(4) [[LOCAL]]) #[[ATTR6]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: norecurse nosync nounwind
; CGSCC-LABEL: define {{[^@]+}}@level1
; CGSCC-SAME: (i32 [[C:%.*]]) #[[ATTR1]] {
; CGSCC-NEXT: entry:
-; CGSCC-NEXT: [[LOCAL:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @level2all_early(ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR5]]
+; CGSCC-NEXT: [[LOCAL_ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
+; CGSCC-NEXT: [[LOCAL:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_ALLOCA]] to ptr
+; CGSCC-NEXT: call void @level2all_early(ptr nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR5]]
; CGSCC-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[C]], 0
; CGSCC-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CGSCC: if.then:
-; CGSCC-NEXT: call void @level2a(ptr noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR4]]
+; CGSCC-NEXT: call void @level2a(ptr nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR4]]
; CGSCC-NEXT: br label [[IF_END:%.*]]
; CGSCC: if.else:
-; CGSCC-NEXT: call void @level2b(ptr noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR4]]
+; CGSCC-NEXT: call void @level2b(ptr nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR4]]
; CGSCC-NEXT: br label [[IF_END]]
; CGSCC: if.end:
-; CGSCC-NEXT: call void @level2all_late(ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR6]]
+; CGSCC-NEXT: call void @level2all_late(ptr nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[LOCAL]]) #[[ATTR6]]
; CGSCC-NEXT: ret void
;
entry:
- %local = alloca i32
+ %local.alloca = alloca i32, addrspace(5)
+ %local = addrspacecast ptr addrspace(5) %local.alloca to ptr
call void @level2all_early(ptr %local)
%tobool = icmp ne i32 %C, 0
br i1 %tobool, label %if.then, label %if.else
@@ -259,9 +262,10 @@ if.end: ; preds = %if.else, %if.then
define internal void @level2all_early(ptr %addr) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
; TUNIT-LABEL: define {{[^@]+}}@level2all_early
-; TUNIT-SAME: (ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[ADDR:%.*]]) #[[ATTR2]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[ADDR:%.*]]) #[[ATTR2]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: store i32 1, ptr addrspace(3) @ReachableNonKernel, align 4
+; TUNIT-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[ADDR]] to ptr addrspace(5)
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
@@ -337,14 +341,15 @@ entry:
define internal void @level2all_late(ptr %addr) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
; TUNIT-LABEL: define {{[^@]+}}@level2all_late
-; TUNIT-SAME: (ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[ADDR:%.*]]) #[[ATTR2]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[ADDR:%.*]]) #[[ATTR2]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: store i32 1, ptr addrspace(3) @UnreachableNonKernel, align 4
+; TUNIT-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[ADDR]] to ptr addrspace(5)
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
; CGSCC-LABEL: define {{[^@]+}}@level2all_late
-; CGSCC-SAME: (ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[ADDR:%.*]]) #[[ATTR2]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[ADDR:%.*]]) #[[ATTR2]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: store i32 1, ptr addrspace(3) @UnreachableNonKernel, align 4
; CGSCC-NEXT: store i32 5, ptr [[ADDR]], align 4
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll b/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll
index 8b4dbc9..1d6e54c 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll
@@ -86,7 +86,7 @@ define i8 @test4(i8 %a, i8 %b) {
; CHECK-NEXT: br i1 [[CMP]], label [[BB:%.*]], label [[EXIT:%.*]]
; CHECK: bb:
; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i8 [[A:%.*]], [[B]]
-; CHECK-NEXT: ret i8 -1
+; CHECK-NEXT: ret i8 [[SHL]]
; CHECK: exit:
; CHECK-NEXT: ret i8 0
;
@@ -105,7 +105,7 @@ exit:
define i8 @test5(i8 %b) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i8 0, [[B:%.*]]
-; CHECK-NEXT: ret i8 [[SHL]]
+; CHECK-NEXT: ret i8 0
;
%shl = shl i8 0, %b
ret i8 %shl
@@ -474,3 +474,17 @@ define i1 @shl_nuw_nsw_test4(i32 %x, i32 range(i32 0, 32) %k) {
%cmp = icmp eq i64 %shl, -9223372036854775808
ret i1 %cmp
}
+
+define i1 @shl_nuw_nsw_test5(i32 %x) {
+; CHECK-LABEL: @shl_nuw_nsw_test5(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i32 768, [[X:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[SHL]], 1846
+; CHECK-NEXT: ret i1 true
+;
+entry:
+ %shl = shl nuw nsw i32 768, %x
+ %add = add nuw i32 %shl, 1846
+ %cmp = icmp sgt i32 %add, 0
+ ret i1 %cmp
+}
diff --git a/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll b/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
index eeea145..6fa5beb 100644
--- a/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
+++ b/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
@@ -7,7 +7,7 @@ define i32 @foo(<vscale x 2 x i32> %x) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: #dbg_value(<vscale x 2 x i32> undef, [[META8:![0-9]+]], !DIExpression(), [[META14:![0-9]+]])
+; CHECK-NEXT: #dbg_value(<vscale x 2 x i32> poison, [[META8:![0-9]+]], !DIExpression(), [[META14:![0-9]+]])
; CHECK-NEXT: store <vscale x 2 x i32> [[X:%.*]], ptr [[ARR]], align 4
; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[ARR]], align 4
; CHECK-NEXT: ret i32 [[RES]]
diff --git a/llvm/test/Transforms/InstCombine/debuginfo.ll b/llvm/test/Transforms/InstCombine/debuginfo.ll
index 73e9252..ee2e2c2 100644
--- a/llvm/test/Transforms/InstCombine/debuginfo.ll
+++ b/llvm/test/Transforms/InstCombine/debuginfo.ll
@@ -74,10 +74,10 @@ entry:
; NOLOWER-NOT: store
; NOLOWER-NOT: #dbg_declare
; Here we want to find: call void @llvm.dbg.value(metadata i64 %o.coerce0, metadata [[VARIABLE_O]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64))
-; NOLOWER: #dbg_value(i64 undef, {{.*}})
+; NOLOWER: #dbg_value(i64 poison, {{.*}})
; NOLOWER-NOT: store
; Here we want to find: call void @llvm.dbg.value(metadata i64 %o.coerce1, metadata [[VARIABLE_O]], metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64))
-; NOLOWER: #dbg_value(i64 undef, {{.*}})
+; NOLOWER: #dbg_value(i64 poison, {{.*}})
; NOLOWER-NOT: store
; NOLOWER: call void @tworegs_callee(i64 %o.coerce0, i64 %o.coerce1)
diff --git a/llvm/test/Transforms/InstCombine/select-icmp-and.ll b/llvm/test/Transforms/InstCombine/select-icmp-and.ll
index 8bedf69..a57a7c5 100644
--- a/llvm/test/Transforms/InstCombine/select-icmp-and.ll
+++ b/llvm/test/Transforms/InstCombine/select-icmp-and.ll
@@ -629,3 +629,89 @@ define i8 @set_to_clear_decomposebittest_extra_use(i8 %x) {
ret i8 %t3
}
+define i32 @select_bittest_to_add(i32 %x) {
+; CHECK-LABEL: @select_bittest_to_add(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT: [[RET:%.*]] = add nuw nsw i32 [[AND]], 3
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ %and = and i32 %x, 1
+ %cmp = icmp eq i32 %and, 0
+ %ret = select i1 %cmp, i32 3, i32 4
+ ret i32 %ret
+}
+
+define i32 @select_bittest_to_sub(i32 %x) {
+; CHECK-LABEL: @select_bittest_to_sub(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT: [[RET:%.*]] = sub nuw nsw i32 4, [[AND]]
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ %and = and i32 %x, 1
+ %cmp = icmp eq i32 %and, 0
+ %ret = select i1 %cmp, i32 4, i32 3
+ ret i32 %ret
+}
+
+define i32 @select_bittest_to_shl(i32 %x) {
+; CHECK-LABEL: @select_bittest_to_shl(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[CMP]], i32 2, i32 4
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ %and = and i32 %x, 1
+ %cmp = icmp eq i32 %and, 0
+ %ret = select i1 %cmp, i32 2, i32 4
+ ret i32 %ret
+}
+
+define i32 @select_bittest_to_lshr(i32 %x) {
+; CHECK-LABEL: @select_bittest_to_lshr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[CMP]], i32 4, i32 2
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ %and = and i32 %x, 1
+ %cmp = icmp eq i32 %and, 0
+ %ret = select i1 %cmp, i32 4, i32 2
+ ret i32 %ret
+}
+
+define i32 @select_bittest_to_ashr(i32 %x) {
+; CHECK-LABEL: @select_bittest_to_ashr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 2
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[CMP]], i32 -4, i32 -1
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ %and = and i32 %x, 2
+ %cmp = icmp eq i32 %and, 0
+ %ret = select i1 %cmp, i32 -4, i32 -1
+ ret i32 %ret
+}
+
+define i32 @select_bittest_to_shl_negative_test(i32 %x) {
+; CHECK-LABEL: @select_bittest_to_shl_negative_test(
+; CHECK-NEXT: [[MASK:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[MASK]], 0
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i32 4, i32 6
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %mask = and i32 %x, 1
+ %cond = icmp eq i32 %mask, 0
+ %y = select i1 %cond, i32 2, i32 4
+ %res = add nuw nsw i32 %y, 2
+ ret i32 %res
+}
diff --git a/llvm/test/Transforms/InstSimplify/insertelement.ll b/llvm/test/Transforms/InstSimplify/insertelement.ll
index 55fab36..3fe8b83 100644
--- a/llvm/test/Transforms/InstSimplify/insertelement.ll
+++ b/llvm/test/Transforms/InstSimplify/insertelement.ll
@@ -119,3 +119,11 @@ unreachable_infloop:
%bogus = insertelement <2 x i64> %bogus, i64 undef, i32 1
br label %unreachable_infloop
}
+
+define <4 x i32> @insert_into_splat(i32 %index) {
+; CHECK-LABEL: @insert_into_splat(
+; CHECK-NEXT: ret <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+;
+ %I = insertelement <4 x i32> <i32 3, i32 3, i32 3, i32 3>, i32 3, i32 %index
+ ret <4 x i32> %I
+}
diff --git a/llvm/test/Transforms/InstSimplify/select-icmp.ll b/llvm/test/Transforms/InstSimplify/select-icmp.ll
new file mode 100755
index 0000000..a6ef937
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/select-icmp.ll
@@ -0,0 +1,246 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+
+; TODO: https://alive2.llvm.org/ce/z/3ybZRl
+define i32 @pr54735_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @pr54735_slt(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], 1
+; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[SUB]], -1
+; CHECK-NEXT: [[ABSCOND:%.*]] = icmp sle i32 [[SUB]], -1
+; CHECK-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i32 [[NEG]], i32 [[ADD]]
+; CHECK-NEXT: ret i32 [[ABS]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %cmp = icmp slt i32 %x, %y ; x<y ? abs (x-y+1): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub nsw i32 %x, %y
+ %add = add nsw i32 %sub, 1
+ %neg = xor i32 %sub, -1 ; sub nsw i32 0, %add
+ %abscond = icmp sle i32 %sub, -1
+ %abs = select i1 %abscond, i32 %neg, i32 %add
+ ret i32 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i32 0
+}
+
+; https://alive2.llvm.org/ce/z/fTTsdT
+define i32 @pr54735_sgt(i32 %x, i32 %y) {
+; CHECK-LABEL: @pr54735_sgt(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], 1
+; CHECK-NEXT: ret i32 [[ADD]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %cmp = icmp sgt i32 %x, %y ; x>y ? abs (x-y+1): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub nsw i32 %x, %y
+ %add = add nsw i32 %sub, 1
+ %neg = xor i32 %sub, -1 ; sub nsw i32 0, %add
+ %abscond = icmp slt i32 %sub, -1
+ %abs = select i1 %abscond, i32 %neg, i32 %add
+ ret i32 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i32 0
+}
+
+; https://alive2.llvm.org/ce/z/k9v75c
+define i32 @pr54735_sge(i32 %x, i32 %y) {
+; CHECK-LABEL: @pr54735_sge(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], 1
+; CHECK-NEXT: ret i32 [[ADD]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %cmp = icmp sge i32 %x, %y ; x>y ? abs (x-y+1): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub nsw i32 %x, %y
+ %add = add nsw i32 %sub, 1
+ %neg = xor i32 %sub, -1 ; sub nsw i32 0, %add
+ %abscond = icmp slt i32 %sub, -1
+ %abs = select i1 %abscond, i32 %neg, i32 %add
+ ret i32 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i32 0
+}
+
+; Negative test: https://alive2.llvm.org/ce/z/oZyu4M
+define i8 @pr54735_without_nsw (i8 %x, i8 %y) {
+; CHECK-LABEL: @pr54735_without_nsw(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub i8 [[X]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SUB]], 1
+; CHECK-NEXT: [[NEG:%.*]] = xor i8 [[SUB]], -1
+; CHECK-NEXT: [[ABSCOND:%.*]] = icmp slt i8 [[SUB]], -1
+; CHECK-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i8 [[NEG]], i8 [[ADD]]
+; CHECK-NEXT: ret i8 [[ABS]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i8 0
+;
+entry:
+ %cmp = icmp sgt i8 %x, %y
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub i8 %x, %y
+ %add = add i8 %sub, 1
+ %neg = xor i8 %sub, -1
+ %abscond = icmp slt i8 %sub, -1
+ %abs = select i1 %abscond, i8 %neg, i8 %add
+ ret i8 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i8 0
+}
+
+define i32 @pr54735_sle(i32 %x, i32 %y) {
+; CHECK-LABEL: @pr54735_sle(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], 1
+; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[SUB]], -1
+; CHECK-NEXT: [[ABSCOND:%.*]] = icmp slt i32 [[SUB]], -1
+; CHECK-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i32 [[NEG]], i32 [[ADD]]
+; CHECK-NEXT: ret i32 [[ABS]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %cmp = icmp sle i32 %x, %y ; x<=y ? abs (x-y+1): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub nsw i32 %x, %y
+ %add = add nsw i32 %sub, 1
+ %neg = xor i32 %sub, -1 ; sub nsw i32 0, %add
+ %abscond = icmp slt i32 %sub, -1
+ %abs = select i1 %abscond, i32 %neg, i32 %add
+ ret i32 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i32 0
+}
+
+; https://alive2.llvm.org/ce/z/pp9zJi
+define i32 @pr54735_slt_neg(i32 %x, i32 %y) {
+; CHECK-LABEL: @pr54735_slt_neg(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X]], [[Y]]
+; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[SUB]], 12
+; CHECK-NEXT: ret i32 [[NEG]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %cmp = icmp slt i32 %x, %y ; x<y ? abs (x-y-12): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub nsw i32 %x, %y
+ %add = add nsw i32 %sub, -12 ; %sub - 12
+ %neg = xor i32 %sub, 12 ; 12 - %sub
+ %abscond = icmp sle i32 %sub, 12
+ %abs = select i1 %abscond, i32 %neg, i32 %add
+ ret i32 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i32 0
+}
+
+; https://alive2.llvm.org/ce/z/9P6grR
+define i32 @pr54735_sle_neg(i32 %x, i32 %y) {
+; CHECK-LABEL: @pr54735_sle_neg(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X]], [[Y]]
+; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[SUB]], 12
+; CHECK-NEXT: ret i32 [[NEG]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %cmp = icmp sle i32 %x, %y ; x<=y ? abs (x-y-12): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub nsw i32 %x, %y
+ %add = add nsw i32 %sub, -12
+ %neg = xor i32 %sub, 12 ; %sub - 12
+ %abscond = icmp sle i32 %sub, 12
+ %abs = select i1 %abscond, i32 %neg, i32 %add
+ ret i32 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i32 0
+}
+
+; Negative test: https://alive2.llvm.org/ce/z/Yqv4x2
+define i8 @pr54735_unexpect_const (i8 %x, i8 %y) {
+; CHECK-LABEL: @pr54735_unexpect_const(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
+; CHECK: cond.true:
+; CHECK-NEXT: [[SUB:%.*]] = sub i8 [[X]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SUB]], 2
+; CHECK-NEXT: [[NEG:%.*]] = xor i8 [[SUB]], -1
+; CHECK-NEXT: [[ABSCOND:%.*]] = icmp slt i8 [[SUB]], -2
+; CHECK-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i8 [[NEG]], i8 [[ADD]]
+; CHECK-NEXT: ret i8 [[ABS]]
+; CHECK: cond.end:
+; CHECK-NEXT: ret i8 0
+;
+entry:
+ %cmp = icmp sgt i8 %x, %y ; x>y ? abs (x-y+2): 0
+ br i1 %cmp, label %cond.true, label %cond.end
+
+cond.true: ; preds = %entry
+ %sub = sub i8 %x, %y
+ %add = add i8 %sub, 2 ; x-y+2
+ %neg = xor i8 %sub, -1 ; y-x-1
+ %neg1 = sub i8 %neg, 1 ; y-x-2
+ %abscond = icmp slt i8 %sub, -2
+ %abs = select i1 %abscond, i8 %neg, i8 %add
+ ret i8 %abs
+
+cond.end: ; preds = %entry, %cond.true
+ ret i8 0
+}
diff --git a/llvm/test/Transforms/LICM/hoist-binop.ll b/llvm/test/Transforms/LICM/hoist-binop.ll
index 8bda74e..7a309ea 100644
--- a/llvm/test/Transforms/LICM/hoist-binop.ll
+++ b/llvm/test/Transforms/LICM/hoist-binop.ll
@@ -189,6 +189,35 @@ loop:
br label %loop
}
+; Don't fold if the intermediate op has more than two uses. This is an
+; heuristic that can be adjusted if warranted. Currently we are being
+; conservative to minimise potential impact in code size.
+define void @not_many_uses(i64 %c1, i64 %c2, i64 %c3) {
+; CHECK-LABEL: @not_many_uses(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[STEP_ADD:%.*]] = add i64 [[INDEX]], [[C1:%.*]]
+; CHECK-NEXT: call void @use(i64 [[STEP_ADD]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[STEP_ADD]], [[C2:%.*]]
+; CHECK-NEXT: [[OTHER:%.*]] = add i64 [[STEP_ADD]], [[C3:%.*]]
+; CHECK-NEXT: call void @use(i64 [[OTHER]])
+; CHECK-NEXT: br label [[LOOP]]
+;
+entry:
+ br label %loop
+
+loop:
+ %index = phi i64 [ 0, %entry ], [ %index.next, %loop ]
+ %step.add = add i64 %index, %c1
+ call void @use(i64 %step.add)
+ %index.next = add i64 %step.add, %c2
+ %other = add i64 %step.add, %c3
+ call void @use(i64 %other)
+ br label %loop
+}
+
; Original reproducer, adapted from:
; for(long i = 0; i < n; ++i)
; a[i] = (i*k) * v;
diff --git a/llvm/test/Transforms/LoopIdiom/basic.ll b/llvm/test/Transforms/LoopIdiom/basic.ll
index 87247aa..912b154 100644
--- a/llvm/test/Transforms/LoopIdiom/basic.ll
+++ b/llvm/test/Transforms/LoopIdiom/basic.ll
@@ -1,15 +1,15 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt -passes=loop-idiom < %s -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-; For @test11_pattern
-; CHECK: @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 1, i32 1, i32 1, i32 1]
-
-; For @test13_pattern
-; CHECK: @.memset_pattern.1 = private unnamed_addr constant [2 x ptr] [ptr @G, ptr @G]
-
target triple = "x86_64-apple-darwin10.0.0"
+;.
+; CHECK: @G = global i32 5
+; CHECK: @g_50 = global [7 x i32] [i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0], align 16
+; CHECK: @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 1, i32 1, i32 1, i32 1], align 16
+; CHECK: @.memset_pattern.1 = private unnamed_addr constant [2 x ptr] [ptr @G, ptr @G], align 16
+;.
define void @test1(ptr %Base, i64 %Size) nounwind ssp {
; CHECK-LABEL: @test1(
; CHECK-NEXT: bb.nph:
@@ -1620,3 +1620,11 @@ define noalias ptr @_ZN8CMSPULog9beginImplEja(ptr nocapture writeonly %0) local_
}
; Validate that "memset_pattern" has the proper attributes.
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind ssp }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nounwind }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { nofree nounwind willreturn memory(argmem: readwrite) }
+; CHECK: attributes #[[ATTR5:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
index 0f26092..c999048 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
@@ -34,21 +34,21 @@ define void @saddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX6:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[OFFSET_IDX6]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i16>, ptr [[TMP5]], align 2
-; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD]], <8 x i16> [[BROADCAST_SPLAT]])
-; CHECK-NEXT: [[TMP7:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD8]], <8 x i16> [[BROADCAST_SPLAT]])
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP6]], i64 16
-; CHECK-NEXT: store <8 x i16> [[TMP6]], ptr [[NEXT_GEP6]], align 2
-; CHECK-NEXT: store <8 x i16> [[TMP7]], ptr [[TMP8]], align 2
+; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2
+; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD]], <8 x i16> [[BROADCAST_SPLAT]])
+; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD9]], <8 x i16> [[BROADCAST_SPLAT]])
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP7]], i64 16
+; CHECK-NEXT: store <8 x i16> [[TMP4]], ptr [[NEXT_GEP7]], align 2
+; CHECK-NEXT: store <8 x i16> [[TMP5]], ptr [[TMP6]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]]
@@ -62,10 +62,10 @@ define void @saddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca
; CHECK-NEXT: [[PSRC_ADDR_08:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[PDST_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR3:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRC_ADDR_08]], i64 2
-; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr [[PSRC_ADDR_08]], align 2
-; CHECK-NEXT: [[TMP11:%.*]] = tail call i16 @llvm.sadd.sat.i16(i16 [[TMP10]], i16 [[OFFSET]])
+; CHECK-NEXT: [[TMP8:%.*]] = load i16, ptr [[PSRC_ADDR_08]], align 2
+; CHECK-NEXT: [[TMP9:%.*]] = tail call i16 @llvm.sadd.sat.i16(i16 [[TMP8]], i16 [[OFFSET]])
; CHECK-NEXT: [[INCDEC_PTR3]] = getelementptr inbounds i8, ptr [[PDST_ADDR_07]], i64 2
-; CHECK-NEXT: store i16 [[TMP11]], ptr [[PDST_ADDR_07]], align 2
+; CHECK-NEXT: store i16 [[TMP9]], ptr [[PDST_ADDR_07]], align 2
; CHECK-NEXT: [[DEC]] = add i32 [[BLKCNT_09]], -1
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[DEC]], 0
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END]], label [[WHILE_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
@@ -136,45 +136,45 @@ define void @umin(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[IND_END18:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC]]
-; CHECK-NEXT: [[IND_END15:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC]]
-; CHECK-NEXT: [[DOTCAST11:%.*]] = trunc nuw i64 [[N_VEC]] to i32
-; CHECK-NEXT: [[IND_END12:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST11]]
+; CHECK-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC]]
+; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC]]
+; CHECK-NEXT: [[DOTCAST8:%.*]] = trunc nuw i64 [[N_VEC]] to i32
+; CHECK-NEXT: [[IND_END9:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST8]]
; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP0]], 24
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[N_VEC9:%.*]] = and i64 [[TMP0]], 4294967288
-; CHECK-NEXT: [[DOTCAST:%.*]] = trunc nuw i64 [[N_VEC9]] to i32
-; CHECK-NEXT: [[IND_END10:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST]]
-; CHECK-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC9]]
-; CHECK-NEXT: [[IND_END17:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC9]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT25:%.*]] = insertelement <8 x i8> poison, i8 [[OFFSET]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT26:%.*]] = shufflevector <8 x i8> [[BROADCAST_SPLATINSERT25]], <8 x i8> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[N_VEC7:%.*]] = and i64 [[TMP0]], 4294967288
+; CHECK-NEXT: [[DOTCAST:%.*]] = trunc nuw i64 [[N_VEC7]] to i32
+; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST]]
+; CHECK-NEXT: [[IND_END10:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC7]]
+; CHECK-NEXT: [[IND_END13:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT20:%.*]] = insertelement <8 x i8> poison, i8 [[OFFSET]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT21:%.*]] = shufflevector <8 x i8> [[BROADCAST_SPLATINSERT20]], <8 x i8> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX21:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT27:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[NEXT_GEP22:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[INDEX21]]
-; CHECK-NEXT: [[NEXT_GEP23:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[INDEX21]]
-; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <8 x i8>, ptr [[NEXT_GEP22]], align 2
-; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.umin.v8i8(<8 x i8> [[WIDE_LOAD24]], <8 x i8> [[BROADCAST_SPLAT26]])
-; CHECK-NEXT: store <8 x i8> [[TMP6]], ptr [[NEXT_GEP23]], align 2
-; CHECK-NEXT: [[INDEX_NEXT27]] = add nuw i64 [[INDEX21]], 8
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT27]], [[N_VEC9]]
+; CHECK-NEXT: [[INDEX16:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT22:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[NEXT_GEP17:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[INDEX16]]
+; CHECK-NEXT: [[NEXT_GEP18:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[INDEX16]]
+; CHECK-NEXT: [[WIDE_LOAD19:%.*]] = load <8 x i8>, ptr [[NEXT_GEP17]], align 2
+; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.umin.v8i8(<8 x i8> [[WIDE_LOAD19]], <8 x i8> [[BROADCAST_SPLAT21]])
+; CHECK-NEXT: store <8 x i8> [[TMP6]], ptr [[NEXT_GEP18]], align 2
+; CHECK-NEXT: [[INDEX_NEXT22]] = add nuw i64 [[INDEX16]], 8
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT22]], [[N_VEC7]]
; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: vec.epilog.middle.block:
-; CHECK-NEXT: [[CMP_N20:%.*]] = icmp eq i64 [[N_VEC9]], [[TMP0]]
-; CHECK-NEXT: br i1 [[CMP_N20]], label [[WHILE_END]], label [[VEC_EPILOG_SCALAR_PH]]
+; CHECK-NEXT: [[CMP_N23:%.*]] = icmp eq i64 [[N_VEC7]], [[TMP0]]
+; CHECK-NEXT: br i1 [[CMP_N23]], label [[WHILE_END]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[IND_END10]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END12]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[BLOCKSIZE]], [[ITER_CHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL16:%.*]] = phi ptr [ [[IND_END14]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END15]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PSRC]], [[ITER_CHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL19:%.*]] = phi ptr [ [[IND_END17]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END18]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PDST]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END9]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[BLOCKSIZE]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL12:%.*]] = phi ptr [ [[IND_END10]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END11]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PSRC]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL15:%.*]] = phi ptr [ [[IND_END13]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END14]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PDST]], [[ITER_CHECK]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[BLKCNT_09:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL13]], [[VEC_EPILOG_SCALAR_PH]] ]
-; CHECK-NEXT: [[PSRC_ADDR_08:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL16]], [[VEC_EPILOG_SCALAR_PH]] ]
-; CHECK-NEXT: [[PDST_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR3:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL19]], [[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: [[BLKCNT_09:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: [[PSRC_ADDR_08:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL12]], [[VEC_EPILOG_SCALAR_PH]] ]
+; CHECK-NEXT: [[PDST_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR3:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL15]], [[VEC_EPILOG_SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRC_ADDR_08]], i64 1
; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[PSRC_ADDR_08]], align 2
; CHECK-NEXT: [[TMP9:%.*]] = tail call i8 @llvm.umin.i8(i8 [[TMP8]], i8 [[OFFSET]])
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index c0bd7bb..45674ac 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -5595,11 +5595,11 @@ define i64 @trunc_with_first_order_recurrence() {
; CHECK-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[Y:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
; CHECK-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
-; CHECK-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR]], [[C6]]
+; CHECK-NEXT: [[C10:%.*]] = add i32 [[Y]], [[C6]]
; CHECK-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
; CHECK-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
; CHECK-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5657,11 +5657,11 @@ define i64 @trunc_with_first_order_recurrence() {
; IND-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; IND-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
; IND-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
-; IND-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
+; IND-NEXT: [[Y:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
; IND-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
; IND-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
; IND-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
-; IND-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR]], [[C6]]
+; IND-NEXT: [[C10:%.*]] = add i32 [[Y]], [[C6]]
; IND-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
; IND-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
; IND-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5735,11 +5735,11 @@ define i64 @trunc_with_first_order_recurrence() {
; UNROLL-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
; UNROLL-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
-; UNROLL-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
+; UNROLL-NEXT: [[Y:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
; UNROLL-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
; UNROLL-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
-; UNROLL-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR]], [[C6]]
+; UNROLL-NEXT: [[C10:%.*]] = add i32 [[Y]], [[C6]]
; UNROLL-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
; UNROLL-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
; UNROLL-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5815,11 +5815,11 @@ define i64 @trunc_with_first_order_recurrence() {
; UNROLL-NO-IC-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[Y:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
; UNROLL-NO-IC-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
; UNROLL-NO-IC-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
-; UNROLL-NO-IC-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR]], [[C6]]
+; UNROLL-NO-IC-NEXT: [[C10:%.*]] = add i32 [[Y]], [[C6]]
; UNROLL-NO-IC-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
; UNROLL-NO-IC-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
; UNROLL-NO-IC-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5893,11 +5893,11 @@ define i64 @trunc_with_first_order_recurrence() {
; INTERLEAVE-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
-; INTERLEAVE-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
+; INTERLEAVE-NEXT: [[Y:%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
; INTERLEAVE-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
; INTERLEAVE-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
-; INTERLEAVE-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR]], [[C6]]
+; INTERLEAVE-NEXT: [[C10:%.*]] = add i32 [[Y]], [[C6]]
; INTERLEAVE-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
; INTERLEAVE-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
; INTERLEAVE-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5980,9 +5980,9 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[SCALAR_RECUR]]
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]]
; CHECK-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
@@ -6107,9 +6107,9 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NO-IC: loop:
; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
+; UNROLL-NO-IC-NEXT: [[RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
-; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[SCALAR_RECUR]]
+; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]]
; UNROLL-NO-IC-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; UNROLL-NO-IC-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
@@ -6159,9 +6159,9 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; INTERLEAVE: loop:
; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 96, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; INTERLEAVE-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 96, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
-; INTERLEAVE-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
+; INTERLEAVE-NEXT: [[RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
; INTERLEAVE-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
-; INTERLEAVE-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[SCALAR_RECUR]]
+; INTERLEAVE-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]]
; INTERLEAVE-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; INTERLEAVE-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
@@ -6265,13 +6265,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; CHECK-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
-; CHECK-NEXT: store i32 [[SCALAR_RECUR]], ptr [[GEP]], align 4
+; CHECK-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6336,13 +6336,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; IND-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; IND-NEXT: br label [[LOOP:%.*]]
; IND: loop:
-; IND-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
+; IND-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
; IND-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
; IND-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; IND-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; IND-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
; IND-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
-; IND-NEXT: store i32 [[SCALAR_RECUR]], ptr [[GEP]], align 4
+; IND-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; IND-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; IND-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; IND-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6411,13 +6411,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; UNROLL-NEXT: br label [[LOOP:%.*]]
; UNROLL: loop:
-; UNROLL-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
+; UNROLL-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
; UNROLL-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
; UNROLL-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; UNROLL-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; UNROLL-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
; UNROLL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
-; UNROLL-NEXT: store i32 [[SCALAR_RECUR]], ptr [[GEP]], align 4
+; UNROLL-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; UNROLL-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; UNROLL-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; UNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6494,13 +6494,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY]] ]
; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]]
; UNROLL-NO-IC: loop:
-; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
+; UNROLL-NO-IC-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; UNROLL-NO-IC-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
; UNROLL-NO-IC-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
-; UNROLL-NO-IC-NEXT: store i32 [[SCALAR_RECUR]], ptr [[GEP]], align 4
+; UNROLL-NO-IC-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; UNROLL-NO-IC-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; UNROLL-NO-IC-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; UNROLL-NO-IC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6569,13 +6569,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; INTERLEAVE-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; INTERLEAVE-NEXT: br label [[LOOP:%.*]]
; INTERLEAVE: loop:
-; INTERLEAVE-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
+; INTERLEAVE-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
; INTERLEAVE-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
; INTERLEAVE-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
; INTERLEAVE-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
-; INTERLEAVE-NEXT: store i32 [[SCALAR_RECUR]], ptr [[GEP]], align 4
+; INTERLEAVE-NEXT: store i32 [[FOR]], ptr [[GEP]], align 4
; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
; INTERLEAVE-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 2e8e14e..61ed955e 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -1520,10 +1520,10 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ [[DOTPRE]], [[ENTRY]] ], [ [[DOTPRE]], [[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[LOAD2:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[LOAD2:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV2:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[I1:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[SCALAR_RECUR]] to i32
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP16]] to i32
; CHECK-NEXT: [[I1]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[IV1:%.*]] = or disjoint i64 [[IV]], 1
; CHECK-NEXT: [[IV2]] = add nuw nsw i64 [[IV]], 2
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll b/llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
index fc6dcc3..66a0c5f 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
@@ -67,16 +67,16 @@ define void @Test(ptr nocapture %obj, i64 %z) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[OBJ]], i64 0, i32 0, i64 [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP12]], align 4, !alias.scope !0
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope !3
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP12]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3:![0-9]+]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP13]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4, !alias.scope !5, !noalias !7
+; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4, !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
; CHECK-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[TMP14]], [[WIDE_LOAD8]]
-; CHECK-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP16]], align 4, !alias.scope !5, !noalias !7
+; CHECK-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP16]], align 4, !alias.scope [[META5]], !noalias [[META7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -134,16 +134,16 @@ define void @Test(ptr nocapture %obj, i64 %z) #0 {
; CHECK-HOIST-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0
; CHECK-HOIST-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[OBJ]], i64 0, i32 0, i64 [[TMP4]]
; CHECK-HOIST-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i32 0
-; CHECK-HOIST-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope !0
-; CHECK-HOIST-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP3]], align 4, !alias.scope !3
+; CHECK-HOIST-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-HOIST-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP3]], align 4, !alias.scope [[META3:![0-9]+]]
; CHECK-HOIST-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0
; CHECK-HOIST-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-HOIST-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
; CHECK-HOIST-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[TMP4]]
; CHECK-HOIST-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0
-; CHECK-HOIST-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4, !alias.scope !5, !noalias !7
+; CHECK-HOIST-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4, !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
; CHECK-HOIST-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[TMP8]], [[WIDE_LOAD5]]
-; CHECK-HOIST-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP10]], align 4, !alias.scope !5, !noalias !7
+; CHECK-HOIST-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP10]], align 4, !alias.scope [[META5]], !noalias [[META7]]
; CHECK-HOIST-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-HOIST-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-HOIST-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/Mem2Reg/dbg_declare_to_value_conversions.ll b/llvm/test/Transforms/Mem2Reg/dbg_declare_to_value_conversions.ll
index 721405b..132d290 100644
--- a/llvm/test/Transforms/Mem2Reg/dbg_declare_to_value_conversions.ll
+++ b/llvm/test/Transforms/Mem2Reg/dbg_declare_to_value_conversions.ll
@@ -33,7 +33,7 @@ define i32 @foo2(ptr %arg) {
store ptr %arg, ptr %arg.indirect_addr
call void @llvm.dbg.declare(metadata ptr %arg.indirect_addr, metadata !25, metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 2)), !dbg !40
; CHECK-LABEL: @foo2
- ; CHECK-NEXT: #dbg_value(ptr undef, {{.*}}, !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 2),
+ ; CHECK-NEXT: #dbg_value(ptr poison, {{.*}}, !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 2),
%val = load i32, ptr %arg
ret i32 %val
}
diff --git a/llvm/test/Transforms/Mem2Reg/debug-alloca-vla-2.ll b/llvm/test/Transforms/Mem2Reg/debug-alloca-vla-2.ll
index a17fb26..0d94e9a 100644
--- a/llvm/test/Transforms/Mem2Reg/debug-alloca-vla-2.ll
+++ b/llvm/test/Transforms/Mem2Reg/debug-alloca-vla-2.ll
@@ -25,7 +25,7 @@ define void @scan(i32 %n) #0 !dbg !4 {
; CHECK: for.cond:
; CHECK-NEXT: [[VLA1_0:%.*]] = phi i32 [ undef, [[ENTRY:%.*]] ], [ [[T0:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[T0]] = add i32 [[VLA1_0]], 1
-; CHECK-NEXT: #dbg_value(i32 undef, [[META11:![0-9]+]], !DIExpression(), [[META19:![0-9]+]])
+; CHECK-NEXT: #dbg_value(i32 poison, [[META11:![0-9]+]], !DIExpression(), [[META19:![0-9]+]])
; CHECK-NEXT: br label [[FOR_COND]], !dbg [[DBG10]]
;
entry:
diff --git a/llvm/test/Transforms/OpenMP/barrier_removal.ll b/llvm/test/Transforms/OpenMP/barrier_removal.ll
index 5bfdb6f..0925418 100644
--- a/llvm/test/Transforms/OpenMP/barrier_removal.ll
+++ b/llvm/test/Transforms/OpenMP/barrier_removal.ll
@@ -332,28 +332,28 @@ define void @pos_priv_mem() "kernel" {
; CHECK-LABEL: define {{[^@]+}}@pos_priv_mem
; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: [[ARG:%.*]] = load ptr addrspace(5), ptr @GPtr5, align 4
-; CHECK-NEXT: [[LOC:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[LOC:%.*]] = alloca i32, align 4, addrspace(5)
; CHECK-NEXT: [[A:%.*]] = load i32, ptr @PG1, align 4
-; CHECK-NEXT: store i32 [[A]], ptr [[LOC]], align 4
+; CHECK-NEXT: store i32 [[A]], ptr addrspace(5) [[LOC]], align 4
; CHECK-NEXT: [[B:%.*]] = load i32, ptr addrspacecast (ptr addrspace(5) @PG2 to ptr), align 4
; CHECK-NEXT: [[ARGC:%.*]] = addrspacecast ptr addrspace(5) [[ARG]] to ptr
; CHECK-NEXT: store i32 [[B]], ptr [[ARGC]], align 4
-; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[LOC]], align 4
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr addrspace(5) [[LOC]], align 4
; CHECK-NEXT: store i32 [[V]], ptr @PG1, align 4
; CHECK-NEXT: ret void
;
%arg = load ptr addrspace(5), ptr @GPtr5
- %loc = alloca i32
+ %loc = alloca i32, addrspace(5)
%a = load i32, ptr @PG1
call void @aligned_barrier()
- store i32 %a, ptr %loc
+ store i32 %a, ptr addrspace(5) %loc
%PG2c = addrspacecast ptr addrspace(5) @PG2 to ptr
%b = load i32, ptr %PG2c
call void @aligned_barrier()
%argc = addrspacecast ptr addrspace(5) %arg to ptr
store i32 %b, ptr %argc
call void @aligned_barrier()
- %v = load i32, ptr %loc
+ %v = load i32, ptr addrspace(5) %loc
store i32 %v, ptr @PG1
call void @aligned_barrier()
ret void
diff --git a/llvm/test/Transforms/PhaseOrdering/varargs.ll b/llvm/test/Transforms/PhaseOrdering/varargs.ll
index c66a982..70c63a8 100644
--- a/llvm/test/Transforms/PhaseOrdering/varargs.ll
+++ b/llvm/test/Transforms/PhaseOrdering/varargs.ll
@@ -20,7 +20,8 @@ entry:
define internal i32 @vararg(i32 %first, ...) {
entry:
- %vlist = alloca ptr, align 8
+ %vlist.alloca = alloca ptr, align 8, addrspace(5)
+ %vlist = addrspacecast ptr addrspace(5) %vlist.alloca to ptr
call void @llvm.va_start.p0(ptr %vlist)
%vlist.promoted = load ptr, ptr %vlist, align 8
%argp.a = getelementptr inbounds i8, ptr %vlist.promoted, i64 4
diff --git a/llvm/test/CodeGen/Generic/expand-vp-fp-intrinsics.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-fp-intrinsics.ll
index bc89ddea..d0b6fe2 100644
--- a/llvm/test/CodeGen/Generic/expand-vp-fp-intrinsics.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-fp-intrinsics.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -expandvp -S < %s | FileCheck %s
+; RUN: opt -passes=pre-isel-intrinsic-lowering -S < %s | FileCheck %s
define void @vp_fadd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; CHECK-LABEL: define void @vp_fadd_v4f32(
diff --git a/llvm/test/CodeGen/Generic/expand-vp-gather-scatter.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-gather-scatter.ll
index 2e2dba5..23096ec 100644
--- a/llvm/test/CodeGen/Generic/expand-vp-gather-scatter.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-gather-scatter.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt --expandvp -S < %s | FileCheck %s
+; RUN: opt -passes=pre-isel-intrinsic-lowering -S < %s | FileCheck %s
; Fixed vectors
define <4 x i32> @vpgather_v4i32(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
diff --git a/llvm/test/CodeGen/Generic/expand-vp-load-store.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-load-store.ll
index 5c6f1e8..c966c3e 100644
--- a/llvm/test/CodeGen/Generic/expand-vp-load-store.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp-load-store.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt --expandvp -S < %s | FileCheck %s
-; RUN: opt --expandvp --expandvp-override-evl-transform=Legal --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s
+; RUN: opt -passes=pre-isel-intrinsic-lowering -S < %s | FileCheck %s
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Legal --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s
; Fixed vectors
define <2 x i64> @vpload_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
diff --git a/llvm/test/CodeGen/Generic/expand-vp.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp.ll
index 4fee9a5..92b3605 100644
--- a/llvm/test/CodeGen/Generic/expand-vp.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/expand-vp.ll
@@ -1,12 +1,12 @@
; Partial expansion cases (still VP with parameter expansions).
-; RUN: opt --expandvp --expandvp-override-evl-transform=Legal --expandvp-override-mask-transform=Legal -S < %s | FileCheck %s --check-prefix=LEGAL_LEGAL
-; RUN: opt --expandvp --expandvp-override-evl-transform=Discard --expandvp-override-mask-transform=Legal -S < %s | FileCheck %s --check-prefix=DISCARD_LEGAL
-; RUN: opt --expandvp --expandvp-override-evl-transform=Convert --expandvp-override-mask-transform=Legal -S < %s | FileCheck %s --check-prefix=CONVERT_LEGAL
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Legal --expandvp-override-mask-transform=Legal -S < %s | FileCheck %s --check-prefix=LEGAL_LEGAL
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Discard --expandvp-override-mask-transform=Legal -S < %s | FileCheck %s --check-prefix=DISCARD_LEGAL
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Convert --expandvp-override-mask-transform=Legal -S < %s | FileCheck %s --check-prefix=CONVERT_LEGAL
; Full expansion cases (all expanded to non-VP).
-; RUN: opt --expandvp --expandvp-override-evl-transform=Discard --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
-; RUN: opt --expandvp -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
-; RUN: opt --expandvp --expandvp-override-evl-transform=Legal --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
-; RUN: opt --expandvp --expandvp-override-evl-transform=Convert --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Discard --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
+; RUN: opt -passes=pre-isel-intrinsic-lowering -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Legal --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
+; RUN: opt -passes=pre-isel-intrinsic-lowering --expandvp-override-evl-transform=Convert --expandvp-override-mask-transform=Convert -S < %s | FileCheck %s --check-prefix=ALL-CONVERT
; Fixed-width vectors
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
index 3595f77..4978991 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
@@ -8,17 +8,12 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
; CHECK-NEXT: [[CONV1:%.*]] = zext i8 [[TMP0]] to i32
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX1]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x ptr> [[TMP1]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX2]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x ptr> [[TMP4]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 4, i64 6>
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 2
+; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 1
+; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 5
+; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 5
; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
; CHECK-NEXT: [[CONV33:%.*]] = zext i8 [[TMP10]] to i32
@@ -26,10 +21,12 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
; CHECK-NEXT: [[CONV_1:%.*]] = zext i8 [[TMP11]] to i32
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ADD_PTR644]], align 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; CHECK-NEXT: [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
-; CHECK-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 2
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX22_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 1
+; CHECK-NEXT: [[ARRAYIDX25_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 5
+; CHECK-NEXT: [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 5
; CHECK-NEXT: [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
; CHECK-NEXT: [[CONV33_1:%.*]] = zext i8 [[TMP14]] to i32
@@ -37,178 +34,189 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
-; CHECK-NEXT: [[TMP15:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1
-; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP15]] to <2 x i32>
-; CHECK-NEXT: [[TMP17:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1
-; CHECK-NEXT: [[TMP18:%.*]] = zext <2 x i8> [[TMP17]] to <2 x i32>
-; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP16]], [[TMP18]]
-; CHECK-NEXT: [[TMP20:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1
-; CHECK-NEXT: [[TMP21:%.*]] = zext <2 x i8> [[TMP20]] to <2 x i32>
-; CHECK-NEXT: [[TMP22:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1
-; CHECK-NEXT: [[TMP23:%.*]] = zext <2 x i8> [[TMP22]] to <2 x i32>
-; CHECK-NEXT: [[TMP24:%.*]] = sub <2 x i32> [[TMP21]], [[TMP23]]
+; CHECK-NEXT: [[ARRAYIDX8_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 1
+; CHECK-NEXT: [[ARRAYIDX10_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 1
+; CHECK-NEXT: [[ARRAYIDX13_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 5
+; CHECK-NEXT: [[ARRAYIDX15_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 5
+; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP4]] to <2 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR64_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP7:%.*]] = zext <2 x i8> [[TMP6]] to <2 x i32>
+; CHECK-NEXT: [[TMP8:%.*]] = sub <2 x i32> [[TMP16]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP13:%.*]] = zext <2 x i8> [[TMP9]] to <2 x i32>
+; CHECK-NEXT: [[TMP28:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP12:%.*]] = zext <2 x i8> [[TMP28]] to <2 x i32>
+; CHECK-NEXT: [[TMP24:%.*]] = sub <2 x i32> [[TMP13]], [[TMP12]]
; CHECK-NEXT: [[TMP25:%.*]] = shl <2 x i32> [[TMP24]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP26:%.*]] = add <2 x i32> [[TMP25]], [[TMP19]]
-; CHECK-NEXT: [[ARRAYIDX20_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 2
-; CHECK-NEXT: [[ARRAYIDX22_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 2
-; CHECK-NEXT: [[ARRAYIDX25_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 6
-; CHECK-NEXT: [[ARRAYIDX27_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 6
-; CHECK-NEXT: [[TMP27:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20_2]], align 1
-; CHECK-NEXT: [[TMP28:%.*]] = zext <2 x i8> [[TMP27]] to <2 x i32>
-; CHECK-NEXT: [[TMP29:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_2]], align 1
-; CHECK-NEXT: [[TMP30:%.*]] = zext <2 x i8> [[TMP29]] to <2 x i32>
-; CHECK-NEXT: [[TMP31:%.*]] = sub <2 x i32> [[TMP28]], [[TMP30]]
-; CHECK-NEXT: [[TMP32:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25_2]], align 1
-; CHECK-NEXT: [[TMP33:%.*]] = zext <2 x i8> [[TMP32]] to <2 x i32>
-; CHECK-NEXT: [[TMP34:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_2]], align 1
-; CHECK-NEXT: [[TMP35:%.*]] = zext <2 x i8> [[TMP34]] to <2 x i32>
-; CHECK-NEXT: [[TMP36:%.*]] = sub <2 x i32> [[TMP33]], [[TMP35]]
+; CHECK-NEXT: [[TMP15:%.*]] = add <2 x i32> [[TMP25]], [[TMP8]]
+; CHECK-NEXT: [[TMP29:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX8_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP17:%.*]] = zext <2 x i8> [[TMP29]] to <2 x i32>
+; CHECK-NEXT: [[TMP18:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX10_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP19:%.*]] = zext <2 x i8> [[TMP18]] to <2 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = sub <2 x i32> [[TMP17]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX13_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP22:%.*]] = zext <2 x i8> [[TMP21]] to <2 x i32>
+; CHECK-NEXT: [[TMP23:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX15_2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP30:%.*]] = zext <2 x i8> [[TMP23]] to <2 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = sub <2 x i32> [[TMP22]], [[TMP30]]
; CHECK-NEXT: [[TMP37:%.*]] = shl <2 x i32> [[TMP36]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP37]], [[TMP31]]
+; CHECK-NEXT: [[TMP27:%.*]] = add <2 x i32> [[TMP37]], [[TMP20]]
+; CHECK-NEXT: [[TMP26:%.*]] = add <2 x i32> [[TMP27]], [[TMP15]]
+; CHECK-NEXT: [[TMP38:%.*]] = sub <2 x i32> [[TMP15]], [[TMP27]]
; CHECK-NEXT: [[ADD44_2:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0
; CHECK-NEXT: [[CONV:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1
-; CHECK-NEXT: [[ADD44_3:%.*]] = add i32 [[CONV]], [[ADD44_2]]
+; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[CONV]], [[ADD44_2]]
; CHECK-NEXT: [[SUB51_2:%.*]] = sub i32 [[ADD44_2]], [[CONV]]
; CHECK-NEXT: [[SUB45_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0
; CHECK-NEXT: [[SUB47_2:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1
-; CHECK-NEXT: [[ADD46_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]]
+; CHECK-NEXT: [[ADD55_2:%.*]] = add i32 [[SUB47_2]], [[SUB45_2]]
; CHECK-NEXT: [[SUB59_2:%.*]] = sub i32 [[SUB45_2]], [[SUB47_2]]
-; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_3]]
-; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1
-; CHECK-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2
-; CHECK-NEXT: [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr i8, ptr null, i64 4
+; CHECK-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr i8, ptr null, i64 4
+; CHECK-NEXT: [[ARRAYIDX8_3:%.*]] = getelementptr i8, ptr null, i64 1
+; CHECK-NEXT: [[ARRAYIDX10_3:%.*]] = getelementptr i8, ptr null, i64 1
; CHECK-NEXT: [[TMP44:%.*]] = load i8, ptr null, align 1
-; CHECK-NEXT: [[TMP45:%.*]] = insertelement <2 x ptr> <ptr poison, ptr null>, ptr [[ARRAYIDX20_3]], i32 0
-; CHECK-NEXT: [[TMP46:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP45]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP47:%.*]] = zext <2 x i8> [[TMP46]] to <2 x i32>
-; CHECK-NEXT: [[TMP48:%.*]] = insertelement <2 x ptr> <ptr poison, ptr null>, ptr [[ARRAYIDX22_3]], i32 0
-; CHECK-NEXT: [[TMP49:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP48]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP50:%.*]] = zext <2 x i8> [[TMP49]] to <2 x i32>
-; CHECK-NEXT: [[TMP51:%.*]] = sub <2 x i32> [[TMP47]], [[TMP50]]
-; CHECK-NEXT: [[TMP52:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
-; CHECK-NEXT: [[TMP53:%.*]] = zext <2 x i8> [[TMP52]] to <2 x i32>
-; CHECK-NEXT: [[TMP54:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 6, i64 4>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP55:%.*]] = zext <2 x i8> [[TMP54]] to <2 x i32>
-; CHECK-NEXT: [[TMP56:%.*]] = sub <2 x i32> [[TMP53]], [[TMP55]]
-; CHECK-NEXT: [[TMP57:%.*]] = shl <2 x i32> [[TMP56]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP58:%.*]] = add <2 x i32> [[TMP57]], [[TMP51]]
-; CHECK-NEXT: [[TMP59:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 3, i64 1>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP60:%.*]] = zext <2 x i8> [[TMP59]] to <2 x i32>
-; CHECK-NEXT: [[TMP61:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 3, i64 1>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP62:%.*]] = zext <2 x i8> [[TMP61]] to <2 x i32>
-; CHECK-NEXT: [[TMP63:%.*]] = sub <2 x i32> [[TMP60]], [[TMP62]]
+; CHECK-NEXT: [[ARRAYIDX15_3:%.*]] = getelementptr i8, ptr null, i64 5
+; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1
+; CHECK-NEXT: [[TMP53:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP33:%.*]] = zext <2 x i8> [[TMP53]] to <2 x i32>
+; CHECK-NEXT: [[TMP54:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP39:%.*]] = zext <2 x i8> [[TMP54]] to <2 x i32>
+; CHECK-NEXT: [[TMP40:%.*]] = sub <2 x i32> [[TMP33]], [[TMP39]]
+; CHECK-NEXT: [[TMP41:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_3]], i64 -4, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP42:%.*]] = zext <2 x i8> [[TMP41]] to <2 x i32>
+; CHECK-NEXT: [[TMP58:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP59:%.*]] = zext <2 x i8> [[TMP58]] to <2 x i32>
+; CHECK-NEXT: [[TMP45:%.*]] = sub <2 x i32> [[TMP42]], [[TMP59]]
+; CHECK-NEXT: [[TMP46:%.*]] = shl <2 x i32> [[TMP45]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP62:%.*]] = add <2 x i32> [[TMP46]], [[TMP40]]
+; CHECK-NEXT: [[TMP48:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX8_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP49:%.*]] = zext <2 x i8> [[TMP48]] to <2 x i32>
+; CHECK-NEXT: [[TMP50:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX10_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP51:%.*]] = zext <2 x i8> [[TMP50]] to <2 x i32>
+; CHECK-NEXT: [[TMP52:%.*]] = sub <2 x i32> [[TMP49]], [[TMP51]]
; CHECK-NEXT: [[TMP64:%.*]] = insertelement <2 x i8> poison, i8 [[TMP44]], i32 0
; CHECK-NEXT: [[TMP65:%.*]] = insertelement <2 x i8> [[TMP64]], i8 [[TMP43]], i32 1
-; CHECK-NEXT: [[TMP66:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
-; CHECK-NEXT: [[TMP67:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 7, i64 5>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP68:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
-; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP66]], [[TMP68]]
+; CHECK-NEXT: [[TMP55:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
+; CHECK-NEXT: [[TMP56:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX15_3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP57:%.*]] = zext <2 x i8> [[TMP56]] to <2 x i32>
+; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP55]], [[TMP57]]
; CHECK-NEXT: [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP71:%.*]] = add <2 x i32> [[TMP70]], [[TMP63]]
-; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]]
-; CHECK-NEXT: [[TMP190:%.*]] = sub <2 x i32> [[TMP58]], [[TMP71]]
+; CHECK-NEXT: [[TMP60:%.*]] = add <2 x i32> [[TMP70]], [[TMP52]]
+; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP60]], [[TMP62]]
+; CHECK-NEXT: [[TMP47:%.*]] = sub <2 x i32> [[TMP62]], [[TMP60]]
; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0
; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1
-; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP74]], [[TMP75]]
+; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP75]], [[TMP74]]
+; CHECK-NEXT: [[SUB51_3:%.*]] = sub i32 [[TMP74]], [[TMP75]]
+; CHECK-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP47]], i32 0
+; CHECK-NEXT: [[TMP79:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
+; CHECK-NEXT: [[ADD55_3:%.*]] = add i32 [[TMP79]], [[TMP61]]
+; CHECK-NEXT: [[SUB59_3:%.*]] = sub i32 [[TMP61]], [[TMP79]]
; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
-; CHECK-NEXT: [[TMP79:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
-; CHECK-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[TMP79]], 15
-; CHECK-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
-; CHECK-NEXT: [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
-; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[ADD46_2]], 15
+; CHECK-NEXT: [[TMP63:%.*]] = extractelement <2 x i32> [[TMP33]], i32 0
+; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[TMP63]], 15
; CHECK-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; CHECK-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
+; CHECK-NEXT: [[SHR_I_1:%.*]] = lshr i32 [[CONV]], 15
+; CHECK-NEXT: [[AND_I_1:%.*]] = and i32 [[SHR_I_1]], 65537
+; CHECK-NEXT: [[MUL_I_1:%.*]] = mul i32 [[AND_I_1]], 65535
+; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[ADD55_3]], [[ADD55_2]]
+; CHECK-NEXT: [[SUB102_1:%.*]] = sub i32 [[ADD55_2]], [[ADD55_3]]
; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0
; CHECK-NEXT: [[SHR_I49_5:%.*]] = lshr i32 [[TMP107]], 15
; CHECK-NEXT: [[AND_I50_5:%.*]] = and i32 [[SHR_I49_5]], 65537
; CHECK-NEXT: [[MUL_I51_5:%.*]] = mul i32 [[AND_I50_5]], 65535
+; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
+; CHECK-NEXT: [[SUB102_2:%.*]] = sub i32 [[SUB51_2]], [[SUB51_3]]
; CHECK-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV_1]], 15
; CHECK-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
; CHECK-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
+; CHECK-NEXT: [[ADD94_5:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
+; CHECK-NEXT: [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
; CHECK-NEXT: [[SHR_I49_6:%.*]] = lshr i32 [[CONV1]], 15
; CHECK-NEXT: [[AND_I50_6:%.*]] = and i32 [[SHR_I49_6]], 65537
; CHECK-NEXT: [[MUL_I51_6:%.*]] = mul i32 [[AND_I50_6]], 65535
-; CHECK-NEXT: [[TMP78:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
-; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32>
-; CHECK-NEXT: [[TMP80:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[ARRAYIDX22]], i32 1
-; CHECK-NEXT: [[TMP81:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP80]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP82:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
-; CHECK-NEXT: [[TMP83:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP3]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP84:%.*]] = zext <2 x i8> [[TMP83]] to <2 x i32>
-; CHECK-NEXT: [[TMP85:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP6]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP86:%.*]] = zext <2 x i8> [[TMP85]] to <2 x i32>
-; CHECK-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP84]], [[TMP86]]
+; CHECK-NEXT: [[TMP66:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
+; CHECK-NEXT: [[TMP102:%.*]] = zext <2 x i8> [[TMP66]] to <2 x i32>
+; CHECK-NEXT: [[TMP67:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[PIX2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP78:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
+; CHECK-NEXT: [[TMP73:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[TMP1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP81:%.*]] = zext <2 x i8> [[TMP73]] to <2 x i32>
+; CHECK-NEXT: [[TMP71:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP76:%.*]] = zext <2 x i8> [[TMP71]] to <2 x i32>
+; CHECK-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP81]], [[TMP76]]
; CHECK-NEXT: [[TMP88:%.*]] = shl <2 x i32> [[TMP87]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP89:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP7]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP90:%.*]] = zext <2 x i8> [[TMP89]] to <2 x i32>
-; CHECK-NEXT: [[TMP91:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP8]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP92:%.*]] = zext <2 x i8> [[TMP91]] to <2 x i32>
-; CHECK-NEXT: [[TMP93:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP9]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP94:%.*]] = zext <2 x i8> [[TMP93]] to <2 x i32>
-; CHECK-NEXT: [[TMP95:%.*]] = sub <2 x i32> [[TMP92]], [[TMP94]]
+; CHECK-NEXT: [[TMP83:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP80:%.*]] = zext <2 x i8> [[TMP83]] to <2 x i32>
+; CHECK-NEXT: [[TMP77:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP82:%.*]] = zext <2 x i8> [[TMP77]] to <2 x i32>
+; CHECK-NEXT: [[TMP85:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP84:%.*]] = zext <2 x i8> [[TMP85]] to <2 x i32>
+; CHECK-NEXT: [[TMP95:%.*]] = sub <2 x i32> [[TMP82]], [[TMP84]]
; CHECK-NEXT: [[TMP96:%.*]] = shl <2 x i32> [[TMP95]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV33]], i32 1
-; CHECK-NEXT: [[TMP98:%.*]] = sub <2 x i32> [[TMP97]], [[TMP90]]
-; CHECK-NEXT: [[TMP104:%.*]] = add <2 x i32> [[TMP96]], [[TMP98]]
-; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0
-; CHECK-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]]
-; CHECK-NEXT: [[TMP200:%.*]] = add <2 x i32> [[TMP88]], [[TMP101]]
-; CHECK-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP104]], <2 x i32> [[TMP200]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP104]], [[TMP200]]
-; CHECK-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP200]], [[TMP104]]
+; CHECK-NEXT: [[TMP89:%.*]] = sub <2 x i32> [[TMP97]], [[TMP80]]
+; CHECK-NEXT: [[TMP105:%.*]] = add <2 x i32> [[TMP96]], [[TMP89]]
+; CHECK-NEXT: [[TMP86:%.*]] = insertelement <2 x i32> [[TMP102]], i32 [[CONV1]], i32 0
+; CHECK-NEXT: [[TMP99:%.*]] = sub <2 x i32> [[TMP86]], [[TMP78]]
+; CHECK-NEXT: [[TMP92:%.*]] = add <2 x i32> [[TMP88]], [[TMP99]]
+; CHECK-NEXT: [[TMP93:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> [[TMP92]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP105]], [[TMP92]]
+; CHECK-NEXT: [[TMP91:%.*]] = sub <2 x i32> [[TMP92]], [[TMP105]]
; CHECK-NEXT: [[TMP238:%.*]] = extractelement <2 x i32> [[TMP106]], i32 0
; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i32> [[TMP106]], i32 1
; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP108]], [[TMP238]]
-; CHECK-NEXT: [[TMP142:%.*]] = extractelement <2 x i32> [[TMP105]], i32 1
+; CHECK-NEXT: [[SUB51:%.*]] = sub i32 [[TMP238]], [[TMP108]]
+; CHECK-NEXT: [[TMP94:%.*]] = extractelement <2 x i32> [[TMP91]], i32 0
+; CHECK-NEXT: [[SUB47:%.*]] = extractelement <2 x i32> [[TMP91]], i32 1
+; CHECK-NEXT: [[ADD55:%.*]] = add i32 [[SUB47]], [[TMP94]]
+; CHECK-NEXT: [[SUB59:%.*]] = sub i32 [[TMP94]], [[SUB47]]
; CHECK-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[TMP108]], 15
; CHECK-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; CHECK-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
-; CHECK-NEXT: [[SHR_I59_4:%.*]] = lshr i32 [[TMP142]], 15
+; CHECK-NEXT: [[SHR_I59_4:%.*]] = lshr i32 [[SUB47]], 15
; CHECK-NEXT: [[AND_I60_4:%.*]] = and i32 [[SHR_I59_4]], 65537
; CHECK-NEXT: [[MUL_I61_4:%.*]] = mul i32 [[AND_I60_4]], 65535
-; CHECK-NEXT: [[TMP109:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
-; CHECK-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP109]] to <2 x i32>
-; CHECK-NEXT: [[TMP111:%.*]] = insertelement <2 x i8> poison, i8 [[TMP12]], i32 0
-; CHECK-NEXT: [[TMP112:%.*]] = insertelement <2 x i8> [[TMP111]], i8 [[TMP13]], i32 1
-; CHECK-NEXT: [[TMP113:%.*]] = zext <2 x i8> [[TMP112]] to <2 x i32>
-; CHECK-NEXT: [[TMP114:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR3]], i32 0
-; CHECK-NEXT: [[TMP115:%.*]] = shufflevector <2 x ptr> [[TMP114]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP116:%.*]] = getelementptr i8, <2 x ptr> [[TMP115]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP117:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP116]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP118:%.*]] = zext <2 x i8> [[TMP117]] to <2 x i32>
-; CHECK-NEXT: [[TMP119:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR644]], i32 0
-; CHECK-NEXT: [[TMP120:%.*]] = shufflevector <2 x ptr> [[TMP119]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP121:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP122:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP121]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP123:%.*]] = zext <2 x i8> [[TMP122]] to <2 x i32>
-; CHECK-NEXT: [[TMP124:%.*]] = sub <2 x i32> [[TMP118]], [[TMP123]]
+; CHECK-NEXT: [[TMP104:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
+; CHECK-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP104]] to <2 x i32>
+; CHECK-NEXT: [[TMP98:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR644]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP103:%.*]] = zext <2 x i8> [[TMP98]] to <2 x i32>
+; CHECK-NEXT: [[TMP100:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP109:%.*]] = zext <2 x i8> [[TMP100]] to <2 x i32>
+; CHECK-NEXT: [[TMP112:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP114:%.*]] = zext <2 x i8> [[TMP112]] to <2 x i32>
+; CHECK-NEXT: [[TMP124:%.*]] = sub <2 x i32> [[TMP109]], [[TMP114]]
; CHECK-NEXT: [[TMP125:%.*]] = shl <2 x i32> [[TMP124]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP126:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP127:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP126]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP144:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32>
-; CHECK-NEXT: [[TMP129:%.*]] = getelementptr i8, <2 x ptr> [[TMP115]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP130:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP129]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP131:%.*]] = zext <2 x i8> [[TMP130]] to <2 x i32>
-; CHECK-NEXT: [[TMP132:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP133:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP132]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP134:%.*]] = zext <2 x i8> [[TMP133]] to <2 x i32>
-; CHECK-NEXT: [[TMP135:%.*]] = sub <2 x i32> [[TMP131]], [[TMP134]]
+; CHECK-NEXT: [[TMP113:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP111:%.*]] = zext <2 x i8> [[TMP113]] to <2 x i32>
+; CHECK-NEXT: [[TMP115:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP118:%.*]] = zext <2 x i8> [[TMP115]] to <2 x i32>
+; CHECK-NEXT: [[TMP116:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP128:%.*]] = zext <2 x i8> [[TMP116]] to <2 x i32>
+; CHECK-NEXT: [[TMP135:%.*]] = sub <2 x i32> [[TMP118]], [[TMP128]]
; CHECK-NEXT: [[TMP136:%.*]] = shl <2 x i32> [[TMP135]], <i32 16, i32 16>
; CHECK-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV33_1]], i32 1
-; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP144]]
-; CHECK-NEXT: [[TMP139:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]]
-; CHECK-NEXT: [[TMP140:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0
-; CHECK-NEXT: [[TMP141:%.*]] = sub <2 x i32> [[TMP140]], [[TMP113]]
-; CHECK-NEXT: [[TMP155:%.*]] = add <2 x i32> [[TMP125]], [[TMP141]]
-; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP139]], [[TMP155]]
-; CHECK-NEXT: [[TMP189:%.*]] = sub <2 x i32> [[TMP155]], [[TMP139]]
+; CHECK-NEXT: [[TMP119:%.*]] = sub <2 x i32> [[TMP137]], [[TMP111]]
+; CHECK-NEXT: [[TMP120:%.*]] = add <2 x i32> [[TMP136]], [[TMP119]]
+; CHECK-NEXT: [[TMP117:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0
+; CHECK-NEXT: [[TMP122:%.*]] = sub <2 x i32> [[TMP117]], [[TMP103]]
+; CHECK-NEXT: [[TMP123:%.*]] = add <2 x i32> [[TMP125]], [[TMP122]]
+; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP120]], [[TMP123]]
+; CHECK-NEXT: [[TMP121:%.*]] = sub <2 x i32> [[TMP123]], [[TMP120]]
; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP143]], i32 0
; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP143]], i32 1
; CHECK-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP146]], [[TMP145]]
-; CHECK-NEXT: [[SHR_I54:%.*]] = lshr i32 [[TMP146]], 15
-; CHECK-NEXT: [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537
-; CHECK-NEXT: [[MUL_I56:%.*]] = mul i32 [[AND_I55]], 65535
+; CHECK-NEXT: [[SUB51_1:%.*]] = sub i32 [[TMP145]], [[TMP146]]
+; CHECK-NEXT: [[TMP126:%.*]] = extractelement <2 x i32> [[TMP121]], i32 0
+; CHECK-NEXT: [[TMP127:%.*]] = extractelement <2 x i32> [[TMP121]], i32 1
+; CHECK-NEXT: [[ADD55_1:%.*]] = add i32 [[TMP127]], [[TMP126]]
+; CHECK-NEXT: [[SUB59_1:%.*]] = sub i32 [[TMP126]], [[TMP127]]
+; CHECK-NEXT: [[SHR_I54_1:%.*]] = lshr i32 [[TMP146]], 15
+; CHECK-NEXT: [[AND_I55_1:%.*]] = and i32 [[SHR_I54_1]], 65537
+; CHECK-NEXT: [[MUL_I56_1:%.*]] = mul i32 [[AND_I55_1]], 65535
; CHECK-NEXT: [[TMP147:%.*]] = lshr <2 x i32> [[TMP110]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP148:%.*]] = and <2 x i32> [[TMP147]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP149:%.*]] = mul <2 x i32> [[TMP148]], <i32 65535, i32 65535>
@@ -218,76 +226,46 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
; CHECK-NEXT: [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
-; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I51_2]], [[ADD103]]
-; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP79]]
-; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51_3]], [[ADD105]]
-; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
-; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]]
+; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I51_3]], [[ADD103]]
+; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP63]]
+; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I_1]], [[ADD105]]
+; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[CONV]]
+; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56_1]], [[SUB104]]
; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP146]]
; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61_1]], [[SUB106]]
; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP108]]
; CHECK-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; CHECK-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
-; CHECK-NEXT: [[TMP150:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB59_2]], i32 1
-; CHECK-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB51_2]], i32 1
-; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]]
-; CHECK-NEXT: [[TMP154:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> [[TMP190]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP184:%.*]] = shufflevector <2 x i32> [[TMP189]], <2 x i32> [[TMP190]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP154]], [[TMP184]]
-; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP153]], i32 1
-; CHECK-NEXT: [[TMP158:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1
-; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP158]], [[TMP157]]
-; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP153]], i32 0
-; CHECK-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0
-; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP161]], [[TMP160]]
-; CHECK-NEXT: [[TMP163:%.*]] = sub <2 x i32> [[TMP153]], [[TMP156]]
-; CHECK-NEXT: [[TMP164:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0
-; CHECK-NEXT: [[TMP165:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1
-; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP165]], [[TMP164]]
-; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP164]], [[TMP165]]
+; CHECK-NEXT: [[ADD78_1:%.*]] = add i32 [[ADD55_1]], [[ADD55]]
+; CHECK-NEXT: [[SUB86_1:%.*]] = sub i32 [[ADD55]], [[ADD55_1]]
+; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB86_1]]
+; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[SUB86_1]], [[SUB102_1]]
; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_5]], [[ADD105_1]]
; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP107]]
-; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP189]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[TMP167:%.*]] = lshr <2 x i32> [[TMP166]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP168:%.*]] = and <2 x i32> [[TMP167]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP169:%.*]] = mul <2 x i32> [[TMP168]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP172:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_1]], i32 0
-; CHECK-NEXT: [[TMP171:%.*]] = shufflevector <2 x i32> [[TMP172]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP208:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
-; CHECK-NEXT: [[TMP209:%.*]] = shufflevector <2 x i32> [[TMP208]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP282:%.*]] = add <2 x i32> [[TMP171]], [[TMP209]]
-; CHECK-NEXT: [[TMP211:%.*]] = sub <2 x i32> [[TMP171]], [[TMP209]]
-; CHECK-NEXT: [[TMP283:%.*]] = shufflevector <2 x i32> [[TMP282]], <2 x i32> [[TMP211]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP177:%.*]] = add <2 x i32> [[TMP169]], [[TMP283]]
-; CHECK-NEXT: [[TMP178:%.*]] = xor <2 x i32> [[TMP177]], [[TMP166]]
+; CHECK-NEXT: [[TMP129:%.*]] = shufflevector <2 x i32> [[TMP17]], <2 x i32> [[TMP121]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP130:%.*]] = lshr <2 x i32> [[TMP129]], <i32 15, i32 15>
+; CHECK-NEXT: [[TMP131:%.*]] = and <2 x i32> [[TMP130]], <i32 65537, i32 65537>
+; CHECK-NEXT: [[TMP132:%.*]] = mul <2 x i32> [[TMP131]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP133:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_1]], i32 0
+; CHECK-NEXT: [[TMP144:%.*]] = shufflevector <2 x i32> [[TMP133]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_1]], i32 0
+; CHECK-NEXT: [[TMP152:%.*]] = shufflevector <2 x i32> [[TMP151]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP144]], [[TMP152]]
+; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP144]], [[TMP152]]
+; CHECK-NEXT: [[TMP139:%.*]] = shufflevector <2 x i32> [[TMP153]], <2 x i32> [[TMP138]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP140:%.*]] = add <2 x i32> [[TMP132]], [[TMP139]]
+; CHECK-NEXT: [[TMP141:%.*]] = xor <2 x i32> [[TMP140]], [[TMP129]]
; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_4]], [[SUB106_1]]
-; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP142]]
+; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[SUB47]]
; CHECK-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
-; CHECK-NEXT: [[TMP179:%.*]] = extractelement <2 x i32> [[TMP178]], i32 0
-; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP179]]
-; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP178]], i32 1
-; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP180]]
+; CHECK-NEXT: [[TMP142:%.*]] = extractelement <2 x i32> [[TMP141]], i32 0
+; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP142]]
+; CHECK-NEXT: [[TMP154:%.*]] = extractelement <2 x i32> [[TMP141]], i32 1
+; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP154]]
; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
-; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP106]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
-; CHECK-NEXT: [[TMP182:%.*]] = insertelement <2 x i32> [[TMP181]], i32 [[ADD44_3]], i32 0
-; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP106]], i32 [[ADD46_2]], i32 0
-; CHECK-NEXT: [[TMP195:%.*]] = sub <2 x i32> [[TMP182]], [[TMP183]]
-; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP187:%.*]] = sub <2 x i32> [[TMP185]], [[TMP186]]
-; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP195]], i32 0
-; CHECK-NEXT: [[TMP196:%.*]] = extractelement <2 x i32> [[TMP187]], i32 0
-; CHECK-NEXT: [[TMP199:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP195]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_4:%.*]] = add i32 [[TMP196]], [[TMP188]]
-; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP195]], i32 1
-; CHECK-NEXT: [[TMP192:%.*]] = extractelement <2 x i32> [[TMP187]], i32 1
-; CHECK-NEXT: [[TMP193:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP195]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP192]], [[TMP191]]
-; CHECK-NEXT: [[TMP194:%.*]] = sub <2 x i32> [[TMP195]], [[TMP187]]
+; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[SUB51_1]], [[SUB51]]
+; CHECK-NEXT: [[SUB86_2:%.*]] = sub i32 [[SUB51]], [[SUB51_1]]
; CHECK-NEXT: [[TMP244:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
; CHECK-NEXT: [[TMP245:%.*]] = shufflevector <2 x i32> [[TMP244]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_4]], i32 0
@@ -295,40 +273,25 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP216:%.*]] = add <2 x i32> [[TMP245]], [[TMP198]]
; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP245]], [[TMP198]]
; CHECK-NEXT: [[TMP221:%.*]] = shufflevector <2 x i32> [[TMP216]], <2 x i32> [[TMP210]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP215:%.*]] = extractelement <2 x i32> [[TMP194]], i32 0
-; CHECK-NEXT: [[TMP203:%.*]] = extractelement <2 x i32> [[TMP194]], i32 1
-; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[TMP215]], [[TMP203]]
-; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[TMP203]], [[TMP215]]
+; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[SUB102_2]], [[SUB86_2]]
+; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[SUB86_2]], [[SUB102_2]]
; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_2]]
; CHECK-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
-; CHECK-NEXT: [[TMP266:%.*]] = add <2 x i32> [[TMP149]], [[TMP221]]
-; CHECK-NEXT: [[TMP267:%.*]] = xor <2 x i32> [[TMP266]], [[TMP110]]
+; CHECK-NEXT: [[TMP134:%.*]] = add <2 x i32> [[TMP149]], [[TMP221]]
+; CHECK-NEXT: [[TMP213:%.*]] = xor <2 x i32> [[TMP134]], [[TMP110]]
; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP238]], 15
; CHECK-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; CHECK-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP238]]
; CHECK-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
-; CHECK-NEXT: [[TMP206:%.*]] = extractelement <2 x i32> [[TMP267]], i32 0
-; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP206]]
-; CHECK-NEXT: [[TMP207:%.*]] = extractelement <2 x i32> [[TMP267]], i32 1
-; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP207]]
-; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
-; CHECK-NEXT: [[TMP222:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB51_2]], i32 0
-; CHECK-NEXT: [[TMP225:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB59_2]], i32 0
-; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP222]], [[TMP225]]
-; CHECK-NEXT: [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP190]], <2 x i32> [[TMP189]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP212:%.*]] = shufflevector <2 x i32> [[TMP190]], <2 x i32> [[TMP189]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP213:%.*]] = sub <2 x i32> [[TMP227]], [[TMP212]]
-; CHECK-NEXT: [[TMP214:%.*]] = extractelement <2 x i32> [[TMP226]], i32 0
; CHECK-NEXT: [[TMP237:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0
-; CHECK-NEXT: [[TMP239:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP226]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ADD94_5:%.*]] = add i32 [[TMP237]], [[TMP214]]
-; CHECK-NEXT: [[TMP217:%.*]] = extractelement <2 x i32> [[TMP226]], i32 1
+; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP237]]
; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1
-; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP226]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP218]], [[TMP217]]
-; CHECK-NEXT: [[TMP240:%.*]] = sub <2 x i32> [[TMP226]], [[TMP213]]
+; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP218]]
+; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
+; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[SUB59_1]], [[SUB59]]
+; CHECK-NEXT: [[SUB86_3:%.*]] = sub i32 [[SUB59]], [[SUB59_1]]
; CHECK-NEXT: [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
; CHECK-NEXT: [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP241:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_5]], i32 0
@@ -336,17 +299,15 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; CHECK-NEXT: [[TMP261:%.*]] = add <2 x i32> [[TMP224]], [[TMP242]]
; CHECK-NEXT: [[TMP262:%.*]] = sub <2 x i32> [[TMP224]], [[TMP242]]
; CHECK-NEXT: [[TMP220:%.*]] = shufflevector <2 x i32> [[TMP261]], <2 x i32> [[TMP262]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP228:%.*]] = extractelement <2 x i32> [[TMP240]], i32 0
-; CHECK-NEXT: [[TMP229:%.*]] = extractelement <2 x i32> [[TMP240]], i32 1
-; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[TMP228]], [[TMP229]]
-; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[TMP229]], [[TMP228]]
+; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[SUB102_3]], [[SUB86_3]]
+; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[SUB86_3]], [[SUB102_3]]
; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_6]], [[ADD105_3]]
; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV1]]
; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP102]], <i32 15, i32 15>
; CHECK-NEXT: [[TMP231:%.*]] = and <2 x i32> [[TMP230]], <i32 65537, i32 65537>
; CHECK-NEXT: [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP233:%.*]] = add <2 x i32> [[TMP232]], [[TMP220]]
-; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP233]], [[TMP102]]
+; CHECK-NEXT: [[TMP150:%.*]] = add <2 x i32> [[TMP232]], [[TMP220]]
+; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP150]], [[TMP102]]
; CHECK-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; CHECK-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; CHECK-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
@@ -368,9 +329,9 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 4
; THR15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr i8, ptr [[PIX2]], i64 4
; THR15-NEXT: [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
-; THR15-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 2
-; THR15-NEXT: [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 6
-; THR15-NEXT: [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 6
+; THR15-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 1
+; THR15-NEXT: [[ARRAYIDX25:%.*]] = getelementptr i8, ptr [[PIX1]], i64 5
+; THR15-NEXT: [[ARRAYIDX27:%.*]] = getelementptr i8, ptr [[PIX2]], i64 5
; THR15-NEXT: [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
; THR15-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
; THR15-NEXT: [[CONV33:%.*]] = zext i8 [[TMP1]] to i32
@@ -381,9 +342,9 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 4
; THR15-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 4
; THR15-NEXT: [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
-; THR15-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 2
-; THR15-NEXT: [[ARRAYIDX25_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 6
-; THR15-NEXT: [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 6
+; THR15-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 1
+; THR15-NEXT: [[ARRAYIDX13_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 5
+; THR15-NEXT: [[ARRAYIDX27_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 5
; THR15-NEXT: [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
; THR15-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
; THR15-NEXT: [[CONV33_1:%.*]] = zext i8 [[TMP3]] to i32
@@ -392,10 +353,10 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
; THR15-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
; THR15-NEXT: [[TMP4:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1
-; THR15-NEXT: [[TMP5:%.*]] = zext <2 x i8> [[TMP4]] to <2 x i32>
+; THR15-NEXT: [[TMP66:%.*]] = zext <2 x i8> [[TMP4]] to <2 x i32>
; THR15-NEXT: [[TMP6:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1
; THR15-NEXT: [[TMP7:%.*]] = zext <2 x i8> [[TMP6]] to <2 x i32>
-; THR15-NEXT: [[TMP8:%.*]] = sub <2 x i32> [[TMP5]], [[TMP7]]
+; THR15-NEXT: [[TMP8:%.*]] = sub <2 x i32> [[TMP66]], [[TMP7]]
; THR15-NEXT: [[TMP9:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1
; THR15-NEXT: [[TMP10:%.*]] = zext <2 x i8> [[TMP9]] to <2 x i32>
; THR15-NEXT: [[TMP11:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1
@@ -484,103 +445,101 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[SHR_I49:%.*]] = lshr i32 [[ADD46_2]], 15
; THR15-NEXT: [[AND_I50:%.*]] = and i32 [[SHR_I49]], 65537
; THR15-NEXT: [[MUL_I51:%.*]] = mul i32 [[AND_I50]], 65535
-; THR15-NEXT: [[ADD94_2:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
-; THR15-NEXT: [[SUB102_2:%.*]] = sub i32 [[SUB51_2]], [[SUB51_3]]
-; THR15-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[CONV_1]], 15
+; THR15-NEXT: [[ADD55_1:%.*]] = add i32 [[ADD55_3]], [[ADD55_2]]
+; THR15-NEXT: [[SUB102_1:%.*]] = sub i32 [[ADD55_2]], [[ADD55_3]]
+; THR15-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[TMP66]], i32 0
+; THR15-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[TMP64]], 15
; THR15-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
; THR15-NEXT: [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
-; THR15-NEXT: [[ADD94_3:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
-; THR15-NEXT: [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
-; THR15-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[CONV]], 15
+; THR15-NEXT: [[ADD94_2:%.*]] = add i32 [[SUB51_3]], [[SUB51_2]]
+; THR15-NEXT: [[SUB102_2:%.*]] = sub i32 [[SUB51_2]], [[SUB51_3]]
+; THR15-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[CONV_1]], 15
; THR15-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
; THR15-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
-; THR15-NEXT: [[TMP64:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
-; THR15-NEXT: [[TMP65:%.*]] = zext <2 x i8> [[TMP64]] to <2 x i32>
-; THR15-NEXT: [[TMP66:%.*]] = load <2 x i8>, ptr [[PIX2]], align 1
-; THR15-NEXT: [[TMP67:%.*]] = zext <2 x i8> [[TMP66]] to <2 x i32>
-; THR15-NEXT: [[TMP68:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3]], align 1
-; THR15-NEXT: [[TMP69:%.*]] = zext <2 x i8> [[TMP68]] to <2 x i32>
-; THR15-NEXT: [[TMP70:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5]], align 1
-; THR15-NEXT: [[TMP71:%.*]] = zext <2 x i8> [[TMP70]] to <2 x i32>
-; THR15-NEXT: [[TMP72:%.*]] = sub <2 x i32> [[TMP69]], [[TMP71]]
+; THR15-NEXT: [[ADD94_3:%.*]] = add i32 [[SUB59_3]], [[SUB59_2]]
+; THR15-NEXT: [[SUB102_3:%.*]] = sub i32 [[SUB59_2]], [[SUB59_3]]
+; THR15-NEXT: [[SHR_I49_4:%.*]] = lshr i32 [[CONV]], 15
+; THR15-NEXT: [[AND_I50_4:%.*]] = and i32 [[SHR_I49_4]], 65537
+; THR15-NEXT: [[MUL_I51_4:%.*]] = mul i32 [[AND_I50_4]], 65535
+; THR15-NEXT: [[TMP65:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
+; THR15-NEXT: [[TMP74:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
+; THR15-NEXT: [[TMP67:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[PIX2]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP68:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
+; THR15-NEXT: [[TMP69:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP70:%.*]] = zext <2 x i8> [[TMP69]] to <2 x i32>
+; THR15-NEXT: [[TMP71:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP81:%.*]] = zext <2 x i8> [[TMP71]] to <2 x i32>
+; THR15-NEXT: [[TMP72:%.*]] = sub <2 x i32> [[TMP70]], [[TMP81]]
; THR15-NEXT: [[TMP73:%.*]] = shl <2 x i32> [[TMP72]], <i32 16, i32 16>
-; THR15-NEXT: [[TMP74:%.*]] = shufflevector <2 x i32> [[TMP65]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
-; THR15-NEXT: [[TMP75:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV]], i32 0
-; THR15-NEXT: [[TMP76:%.*]] = sub <2 x i32> [[TMP75]], [[TMP67]]
-; THR15-NEXT: [[TMP77:%.*]] = add <2 x i32> [[TMP73]], [[TMP76]]
-; THR15-NEXT: [[TMP78:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22]], align 1
-; THR15-NEXT: [[TMP79:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32>
-; THR15-NEXT: [[TMP80:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25]], align 1
-; THR15-NEXT: [[TMP81:%.*]] = zext <2 x i8> [[TMP80]] to <2 x i32>
-; THR15-NEXT: [[TMP82:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27]], align 1
-; THR15-NEXT: [[TMP83:%.*]] = zext <2 x i8> [[TMP82]] to <2 x i32>
-; THR15-NEXT: [[TMP84:%.*]] = sub <2 x i32> [[TMP81]], [[TMP83]]
+; THR15-NEXT: [[TMP75:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP76:%.*]] = zext <2 x i8> [[TMP75]] to <2 x i32>
+; THR15-NEXT: [[TMP82:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX25]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP78:%.*]] = zext <2 x i8> [[TMP82]] to <2 x i32>
+; THR15-NEXT: [[TMP79:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP80:%.*]] = zext <2 x i8> [[TMP79]] to <2 x i32>
+; THR15-NEXT: [[TMP84:%.*]] = sub <2 x i32> [[TMP78]], [[TMP80]]
; THR15-NEXT: [[TMP85:%.*]] = shl <2 x i32> [[TMP84]], <i32 16, i32 16>
; THR15-NEXT: [[TMP86:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV33]], i32 1
-; THR15-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP86]], [[TMP79]]
+; THR15-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP86]], [[TMP76]]
; THR15-NEXT: [[TMP88:%.*]] = add <2 x i32> [[TMP85]], [[TMP87]]
+; THR15-NEXT: [[TMP92:%.*]] = insertelement <2 x i32> [[TMP74]], i32 [[CONV]], i32 0
+; THR15-NEXT: [[TMP93:%.*]] = sub <2 x i32> [[TMP92]], [[TMP68]]
+; THR15-NEXT: [[TMP95:%.*]] = add <2 x i32> [[TMP73]], [[TMP93]]
+; THR15-NEXT: [[TMP97:%.*]] = shufflevector <2 x i32> [[TMP88]], <2 x i32> [[TMP95]], <2 x i32> <i32 0, i32 2>
+; THR15-NEXT: [[TMP77:%.*]] = add <2 x i32> [[TMP88]], [[TMP95]]
+; THR15-NEXT: [[TMP91:%.*]] = sub <2 x i32> [[TMP95]], [[TMP88]]
; THR15-NEXT: [[TMP89:%.*]] = extractelement <2 x i32> [[TMP77]], i32 0
; THR15-NEXT: [[TMP90:%.*]] = extractelement <2 x i32> [[TMP77]], i32 1
-; THR15-NEXT: [[ADD44:%.*]] = add i32 [[TMP90]], [[TMP89]]
-; THR15-NEXT: [[SUB45:%.*]] = sub i32 [[TMP89]], [[TMP90]]
-; THR15-NEXT: [[TMP91:%.*]] = extractelement <2 x i32> [[TMP88]], i32 0
-; THR15-NEXT: [[TMP92:%.*]] = extractelement <2 x i32> [[TMP88]], i32 1
-; THR15-NEXT: [[ADD46:%.*]] = add i32 [[TMP92]], [[TMP91]]
-; THR15-NEXT: [[SUB47:%.*]] = sub i32 [[TMP91]], [[TMP92]]
-; THR15-NEXT: [[ADD48:%.*]] = add i32 [[ADD46]], [[ADD44]]
-; THR15-NEXT: [[SUB51:%.*]] = sub i32 [[ADD44]], [[ADD46]]
-; THR15-NEXT: [[ADD55:%.*]] = add i32 [[SUB47]], [[SUB45]]
-; THR15-NEXT: [[SUB59:%.*]] = sub i32 [[SUB45]], [[SUB47]]
-; THR15-NEXT: [[SHR_I59:%.*]] = lshr i32 [[ADD46]], 15
+; THR15-NEXT: [[ADD48:%.*]] = add i32 [[TMP90]], [[TMP89]]
+; THR15-NEXT: [[SUB51:%.*]] = sub i32 [[TMP89]], [[TMP90]]
+; THR15-NEXT: [[TMP94:%.*]] = extractelement <2 x i32> [[TMP91]], i32 0
+; THR15-NEXT: [[SUB47:%.*]] = extractelement <2 x i32> [[TMP91]], i32 1
+; THR15-NEXT: [[ADD56:%.*]] = add i32 [[SUB47]], [[TMP94]]
+; THR15-NEXT: [[SUB59:%.*]] = sub i32 [[TMP94]], [[SUB47]]
+; THR15-NEXT: [[SHR_I59:%.*]] = lshr i32 [[TMP90]], 15
; THR15-NEXT: [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537
; THR15-NEXT: [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535
; THR15-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[SUB47]], 15
; THR15-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
; THR15-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
-; THR15-NEXT: [[TMP93:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
-; THR15-NEXT: [[TMP94:%.*]] = zext <2 x i8> [[TMP93]] to <2 x i32>
-; THR15-NEXT: [[TMP95:%.*]] = load <2 x i8>, ptr [[ADD_PTR644]], align 1
-; THR15-NEXT: [[TMP96:%.*]] = zext <2 x i8> [[TMP95]] to <2 x i32>
-; THR15-NEXT: [[TMP97:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_1]], align 1
-; THR15-NEXT: [[TMP98:%.*]] = zext <2 x i8> [[TMP97]] to <2 x i32>
-; THR15-NEXT: [[TMP99:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_1]], align 1
-; THR15-NEXT: [[TMP100:%.*]] = zext <2 x i8> [[TMP99]] to <2 x i32>
-; THR15-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP98]], [[TMP100]]
+; THR15-NEXT: [[TMP96:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
+; THR15-NEXT: [[TMP103:%.*]] = zext <2 x i8> [[TMP96]] to <2 x i32>
+; THR15-NEXT: [[TMP98:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ADD_PTR644]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP99:%.*]] = zext <2 x i8> [[TMP98]] to <2 x i32>
+; THR15-NEXT: [[TMP100:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX3_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP104:%.*]] = zext <2 x i8> [[TMP100]] to <2 x i32>
+; THR15-NEXT: [[TMP105:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX5_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP112:%.*]] = zext <2 x i8> [[TMP105]] to <2 x i32>
+; THR15-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP104]], [[TMP112]]
; THR15-NEXT: [[TMP102:%.*]] = shl <2 x i32> [[TMP101]], <i32 16, i32 16>
-; THR15-NEXT: [[TMP103:%.*]] = shufflevector <2 x i32> [[TMP94]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
-; THR15-NEXT: [[TMP104:%.*]] = insertelement <2 x i32> [[TMP103]], i32 [[CONV_1]], i32 0
-; THR15-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP104]], [[TMP96]]
-; THR15-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP102]], [[TMP105]]
-; THR15-NEXT: [[TMP107:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_1]], align 1
-; THR15-NEXT: [[TMP108:%.*]] = zext <2 x i8> [[TMP107]] to <2 x i32>
-; THR15-NEXT: [[TMP109:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25_1]], align 1
-; THR15-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP109]] to <2 x i32>
-; THR15-NEXT: [[TMP111:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_1]], align 1
-; THR15-NEXT: [[TMP112:%.*]] = zext <2 x i8> [[TMP111]] to <2 x i32>
-; THR15-NEXT: [[TMP113:%.*]] = sub <2 x i32> [[TMP110]], [[TMP112]]
+; THR15-NEXT: [[TMP120:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX22_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP107:%.*]] = zext <2 x i8> [[TMP120]] to <2 x i32>
+; THR15-NEXT: [[TMP108:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX13_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP109:%.*]] = zext <2 x i8> [[TMP108]] to <2 x i32>
+; THR15-NEXT: [[TMP110:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 [[ARRAYIDX27_1]], i64 2, <2 x i1> <i1 true, i1 true>, i32 2)
+; THR15-NEXT: [[TMP111:%.*]] = zext <2 x i8> [[TMP110]] to <2 x i32>
+; THR15-NEXT: [[TMP113:%.*]] = sub <2 x i32> [[TMP109]], [[TMP111]]
; THR15-NEXT: [[TMP114:%.*]] = shl <2 x i32> [[TMP113]], <i32 16, i32 16>
; THR15-NEXT: [[TMP115:%.*]] = insertelement <2 x i32> [[TMP103]], i32 [[CONV33_1]], i32 1
-; THR15-NEXT: [[TMP116:%.*]] = sub <2 x i32> [[TMP115]], [[TMP108]]
+; THR15-NEXT: [[TMP116:%.*]] = sub <2 x i32> [[TMP115]], [[TMP107]]
; THR15-NEXT: [[TMP117:%.*]] = add <2 x i32> [[TMP114]], [[TMP116]]
+; THR15-NEXT: [[TMP126:%.*]] = insertelement <2 x i32> [[TMP103]], i32 [[CONV_1]], i32 0
+; THR15-NEXT: [[TMP127:%.*]] = sub <2 x i32> [[TMP126]], [[TMP99]]
+; THR15-NEXT: [[TMP128:%.*]] = add <2 x i32> [[TMP102]], [[TMP127]]
+; THR15-NEXT: [[TMP106:%.*]] = add <2 x i32> [[TMP117]], [[TMP128]]
+; THR15-NEXT: [[TMP121:%.*]] = sub <2 x i32> [[TMP128]], [[TMP117]]
; THR15-NEXT: [[TMP118:%.*]] = extractelement <2 x i32> [[TMP106]], i32 0
; THR15-NEXT: [[TMP119:%.*]] = extractelement <2 x i32> [[TMP106]], i32 1
-; THR15-NEXT: [[ADD44_1:%.*]] = add i32 [[TMP119]], [[TMP118]]
-; THR15-NEXT: [[SUB45_1:%.*]] = sub i32 [[TMP118]], [[TMP119]]
-; THR15-NEXT: [[TMP120:%.*]] = extractelement <2 x i32> [[TMP117]], i32 0
-; THR15-NEXT: [[TMP121:%.*]] = extractelement <2 x i32> [[TMP117]], i32 1
-; THR15-NEXT: [[ADD46_1:%.*]] = add i32 [[TMP121]], [[TMP120]]
-; THR15-NEXT: [[SUB47_1:%.*]] = sub i32 [[TMP120]], [[TMP121]]
-; THR15-NEXT: [[ADD48_1:%.*]] = add i32 [[ADD46_1]], [[ADD44_1]]
-; THR15-NEXT: [[SUB51_1:%.*]] = sub i32 [[ADD44_1]], [[ADD46_1]]
-; THR15-NEXT: [[ADD55_1:%.*]] = add i32 [[SUB47_1]], [[SUB45_1]]
-; THR15-NEXT: [[SUB59_1:%.*]] = sub i32 [[SUB45_1]], [[SUB47_1]]
-; THR15-NEXT: [[SHR_I54:%.*]] = lshr i32 [[ADD46_1]], 15
-; THR15-NEXT: [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537
-; THR15-NEXT: [[MUL_I56:%.*]] = mul i32 [[AND_I55]], 65535
-; THR15-NEXT: [[SHR_I54_1:%.*]] = lshr i32 [[SUB47_1]], 15
+; THR15-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP119]], [[TMP118]]
+; THR15-NEXT: [[SUB51_1:%.*]] = sub i32 [[TMP118]], [[TMP119]]
+; THR15-NEXT: [[TMP129:%.*]] = extractelement <2 x i32> [[TMP121]], i32 0
+; THR15-NEXT: [[TMP125:%.*]] = extractelement <2 x i32> [[TMP121]], i32 1
+; THR15-NEXT: [[ADD55_4:%.*]] = add i32 [[TMP125]], [[TMP129]]
+; THR15-NEXT: [[SUB59_1:%.*]] = sub i32 [[TMP129]], [[TMP125]]
+; THR15-NEXT: [[SHR_I54_1:%.*]] = lshr i32 [[TMP119]], 15
; THR15-NEXT: [[AND_I55_1:%.*]] = and i32 [[SHR_I54_1]], 65537
; THR15-NEXT: [[MUL_I56_1:%.*]] = mul i32 [[AND_I55_1]], 65535
-; THR15-NEXT: [[TMP122:%.*]] = lshr <2 x i32> [[TMP94]], <i32 15, i32 15>
+; THR15-NEXT: [[TMP122:%.*]] = lshr <2 x i32> [[TMP103]], <i32 15, i32 15>
; THR15-NEXT: [[TMP123:%.*]] = and <2 x i32> [[TMP122]], <i32 65537, i32 65537>
; THR15-NEXT: [[TMP124:%.*]] = mul <2 x i32> [[TMP123]], <i32 65535, i32 65535>
; THR15-NEXT: [[ADD78:%.*]] = add i32 [[ADD48_1]], [[ADD48]]
@@ -593,20 +552,20 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP63]]
; THR15-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51]], [[ADD105]]
; THR15-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
-; THR15-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]]
-; THR15-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[ADD46_1]]
+; THR15-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56_1]], [[SUB104]]
+; THR15-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP119]]
; THR15-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]]
-; THR15-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[ADD46]]
+; THR15-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP90]]
; THR15-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; THR15-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
; THR15-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
-; THR15-NEXT: [[TMP125:%.*]] = insertelement <2 x i32> poison, i32 [[ADD55_2]], i32 0
-; THR15-NEXT: [[TMP126:%.*]] = shufflevector <2 x i32> [[TMP125]], <2 x i32> poison, <2 x i32> zeroinitializer
-; THR15-NEXT: [[TMP127:%.*]] = insertelement <2 x i32> poison, i32 [[ADD55_3]], i32 0
-; THR15-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP127]], <2 x i32> poison, <2 x i32> zeroinitializer
-; THR15-NEXT: [[TMP129:%.*]] = sub <2 x i32> [[TMP126]], [[TMP128]]
-; THR15-NEXT: [[TMP130:%.*]] = add <2 x i32> [[TMP126]], [[TMP128]]
-; THR15-NEXT: [[TMP131:%.*]] = shufflevector <2 x i32> [[TMP129]], <2 x i32> [[TMP130]], <2 x i32> <i32 0, i32 3>
+; THR15-NEXT: [[ADD55:%.*]] = add i32 [[ADD55_4]], [[ADD56]]
+; THR15-NEXT: [[SUB86_1:%.*]] = sub i32 [[ADD56]], [[ADD55_4]]
+; THR15-NEXT: [[ADD105_1:%.*]] = add i32 [[SUB102_1]], [[SUB86_1]]
+; THR15-NEXT: [[SUB106_1:%.*]] = sub i32 [[SUB86_1]], [[SUB102_1]]
+; THR15-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_1]]
+; THR15-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP64]]
+; THR15-NEXT: [[TMP5:%.*]] = shufflevector <2 x i32> [[TMP66]], <2 x i32> [[TMP121]], <2 x i32> <i32 1, i32 3>
; THR15-NEXT: [[TMP132:%.*]] = lshr <2 x i32> [[TMP5]], <i32 15, i32 15>
; THR15-NEXT: [[TMP133:%.*]] = and <2 x i32> [[TMP132]], <i32 65537, i32 65537>
; THR15-NEXT: [[TMP134:%.*]] = mul <2 x i32> [[TMP133]], <i32 65535, i32 65535>
@@ -614,28 +573,18 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[TMP136:%.*]] = shufflevector <2 x i32> [[TMP135]], <2 x i32> poison, <2 x i32> zeroinitializer
; THR15-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> poison, i32 [[ADD55_1]], i32 0
; THR15-NEXT: [[TMP138:%.*]] = shufflevector <2 x i32> [[TMP137]], <2 x i32> poison, <2 x i32> zeroinitializer
-; THR15-NEXT: [[TMP139:%.*]] = sub <2 x i32> [[TMP136]], [[TMP138]]
; THR15-NEXT: [[TMP140:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]]
-; THR15-NEXT: [[TMP141:%.*]] = shufflevector <2 x i32> [[TMP139]], <2 x i32> [[TMP140]], <2 x i32> <i32 0, i32 3>
-; THR15-NEXT: [[TMP142:%.*]] = extractelement <2 x i32> [[TMP131]], i32 1
-; THR15-NEXT: [[TMP143:%.*]] = extractelement <2 x i32> [[TMP141]], i32 1
-; THR15-NEXT: [[SUB104_1:%.*]] = sub i32 [[TMP143]], [[TMP142]]
-; THR15-NEXT: [[TMP144:%.*]] = add <2 x i32> [[TMP131]], [[TMP141]]
-; THR15-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP131]], i32 0
-; THR15-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP141]], i32 0
-; THR15-NEXT: [[TMP147:%.*]] = shufflevector <2 x i32> [[TMP141]], <2 x i32> [[TMP131]], <2 x i32> <i32 0, i32 2>
-; THR15-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP146]], [[TMP145]]
+; THR15-NEXT: [[TMP139:%.*]] = sub <2 x i32> [[TMP136]], [[TMP138]]
+; THR15-NEXT: [[TMP144:%.*]] = shufflevector <2 x i32> [[TMP140]], <2 x i32> [[TMP139]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT: [[TMP148:%.*]] = add <2 x i32> [[TMP134]], [[TMP144]]
; THR15-NEXT: [[TMP149:%.*]] = xor <2 x i32> [[TMP148]], [[TMP5]]
-; THR15-NEXT: [[ADD_I57_1:%.*]] = add i32 [[MUL_I56_1]], [[SUB104_1]]
-; THR15-NEXT: [[XOR_I58_1:%.*]] = xor i32 [[ADD_I57_1]], [[SUB47_1]]
; THR15-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
; THR15-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[SUB47]]
+; THR15-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
; THR15-NEXT: [[TMP150:%.*]] = extractelement <2 x i32> [[TMP149]], i32 0
-; THR15-NEXT: [[ADD108_1:%.*]] = add i32 [[TMP150]], [[ADD113]]
+; THR15-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP150]]
; THR15-NEXT: [[TMP151:%.*]] = extractelement <2 x i32> [[TMP149]], i32 1
-; THR15-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP151]]
-; THR15-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[XOR_I58_1]]
+; THR15-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP151]]
; THR15-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
; THR15-NEXT: [[ADD78_2:%.*]] = add i32 [[SUB51_1]], [[SUB51]]
; THR15-NEXT: [[SUB86_2:%.*]] = sub i32 [[SUB51]], [[SUB51_1]]
@@ -648,15 +597,15 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[TMP158:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP157]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT: [[ADD105_2:%.*]] = add i32 [[SUB102_2]], [[SUB86_2]]
; THR15-NEXT: [[SUB106_2:%.*]] = sub i32 [[SUB86_2]], [[SUB102_2]]
-; THR15-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_2]]
+; THR15-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_3]], [[ADD105_2]]
; THR15-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
; THR15-NEXT: [[TMP159:%.*]] = add <2 x i32> [[TMP124]], [[TMP158]]
-; THR15-NEXT: [[TMP160:%.*]] = xor <2 x i32> [[TMP159]], [[TMP94]]
-; THR15-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[ADD44]], 15
+; THR15-NEXT: [[TMP160:%.*]] = xor <2 x i32> [[TMP159]], [[TMP103]]
+; THR15-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP89]], 15
; THR15-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
; THR15-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
; THR15-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
-; THR15-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[ADD44]]
+; THR15-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP89]]
; THR15-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
; THR15-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP160]], i32 0
; THR15-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP161]]
@@ -674,13 +623,13 @@ define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.pt
; THR15-NEXT: [[TMP169:%.*]] = shufflevector <2 x i32> [[TMP167]], <2 x i32> [[TMP168]], <2 x i32> <i32 0, i32 3>
; THR15-NEXT: [[ADD105_3:%.*]] = add i32 [[SUB102_3]], [[SUB86_3]]
; THR15-NEXT: [[SUB106_3:%.*]] = sub i32 [[SUB86_3]], [[SUB102_3]]
-; THR15-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_3]], [[ADD105_3]]
+; THR15-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_4]], [[ADD105_3]]
; THR15-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV]]
-; THR15-NEXT: [[TMP170:%.*]] = lshr <2 x i32> [[TMP65]], <i32 15, i32 15>
+; THR15-NEXT: [[TMP170:%.*]] = lshr <2 x i32> [[TMP74]], <i32 15, i32 15>
; THR15-NEXT: [[TMP171:%.*]] = and <2 x i32> [[TMP170]], <i32 65537, i32 65537>
; THR15-NEXT: [[TMP172:%.*]] = mul <2 x i32> [[TMP171]], <i32 65535, i32 65535>
; THR15-NEXT: [[TMP173:%.*]] = add <2 x i32> [[TMP172]], [[TMP169]]
-; THR15-NEXT: [[TMP174:%.*]] = xor <2 x i32> [[TMP173]], [[TMP65]]
+; THR15-NEXT: [[TMP174:%.*]] = xor <2 x i32> [[TMP173]], [[TMP74]]
; THR15-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
; THR15-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
; THR15-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-use-ptr.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-use-ptr.ll
index ec152c7..b47168c 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-use-ptr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-use-ptr.ll
@@ -8,16 +8,17 @@ define i16 @test() {
; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PPREV_058_I:%.*]] = getelementptr [[S:%.*]], ptr null, i64 -1
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x ptr> <ptr null, ptr poison>, ptr [[PPREV_058_I]], i32 1
; CHECK-NEXT: br label [[WHILE_BODY_I:%.*]]
; CHECK: while.body.i:
-; CHECK-NEXT: [[TMP1:%.*]] = phi i16 [ 0, [[WHILE_BODY_I]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x ptr> [ [[TMP3:%.*]], [[WHILE_BODY_I]] ], [ [[TMP0]], [[ENTRY]] ]
-; CHECK-NEXT: [[TMP3]] = getelementptr [[S]], <2 x ptr> [[TMP2]], <2 x i64> <i64 -1, i64 -1>
-; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr> [[TMP3]], i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> poison)
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i16> [[TMP4]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i16> [[TMP4]], i32 1
-; CHECK-NEXT: [[CMP_I178:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
+; CHECK-NEXT: [[TMP0:%.*]] = phi i16 [ 0, [[WHILE_BODY_I]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[PPREV_062_I:%.*]] = phi ptr [ [[PPREV_0_I:%.*]], [[WHILE_BODY_I]] ], [ [[PPREV_058_I]], [[ENTRY]] ]
+; CHECK-NEXT: [[PEDGE_061_I:%.*]] = phi ptr [ [[INCDEC_PTR_I:%.*]], [[WHILE_BODY_I]] ], [ null, [[ENTRY]] ]
+; CHECK-NEXT: [[INCDEC_PTR_I]] = getelementptr [[S]], ptr [[PEDGE_061_I]], i64 -1
+; CHECK-NEXT: [[PPREV_0_I]] = getelementptr [[S]], ptr [[PPREV_062_I]], i64 -1
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i64(ptr align 2 [[PPREV_0_I]], i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
+; CHECK-NEXT: [[CMP_I178:%.*]] = icmp ult i16 [[TMP3]], [[TMP2]]
; CHECK-NEXT: br label [[WHILE_BODY_I]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR32086.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR32086.ll
index 66bfe05..473b371 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/PR32086.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/PR32086.ll
@@ -3,9 +3,9 @@
define void @i64_simplified(ptr noalias %st, ptr noalias %ld) {
; CHECK-LABEL: @i64_simplified(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr [[LD:%.*]], align 8
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
-; CHECK-NEXT: store <4 x i64> [[SHUFFLE]], ptr [[ST:%.*]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[LD:%.*]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+; CHECK-NEXT: store <4 x i64> [[TMP2]], ptr [[ST:%.*]], align 8
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr inbounds i64, ptr %ld, i64 1
@@ -26,9 +26,9 @@ define void @i64_simplified(ptr noalias %st, ptr noalias %ld) {
define void @i64_simplifiedi_reversed(ptr noalias %st, ptr noalias %ld) {
; CHECK-LABEL: @i64_simplifiedi_reversed(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr [[LD:%.*]], align 8
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 0>
-; CHECK-NEXT: store <4 x i64> [[SHUFFLE]], ptr [[ST:%.*]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[LD:%.*]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 0>
+; CHECK-NEXT: store <4 x i64> [[TMP2]], ptr [[ST:%.*]], align 8
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr inbounds i64, ptr %ld, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll
index 5f8941e..f7bd243 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll
@@ -65,6 +65,62 @@ entry:
ret void
}
+define void @addsub_freeze() #0 {
+; CHECK-LABEL: @addsub_freeze(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @b, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @c, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @d, align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr @e, align 4
+; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT: [[TMP9:%.*]] = freeze <4 x i32> [[TMP8]]
+; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr @a, align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, ptr @b, align 4
+ %1 = load i32, ptr @c, align 4
+ %add = add nsw i32 %0, %1
+ %2 = load i32, ptr @d, align 4
+ %3 = load i32, ptr @e, align 4
+ %add1 = add nsw i32 %2, %3
+ %add2 = add nsw i32 %add, %add1
+ %freeze.add2 = freeze i32 %add2
+ store i32 %freeze.add2, ptr @a, align 4
+ %4 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @b, i32 0, i64 1), align 4
+ %5 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @c, i32 0, i64 1), align 4
+ %add3 = add nsw i32 %4, %5
+ %6 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @d, i32 0, i64 1), align 4
+ %7 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @e, i32 0, i64 1), align 4
+ %add4 = add nsw i32 %6, %7
+ %sub = sub nsw i32 %add3, %add4
+ %freeze.sub = freeze i32 %sub
+ store i32 %freeze.sub, ptr getelementptr inbounds ([4 x i32], ptr @a, i32 0, i64 1), align 4
+ %8 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @b, i32 0, i64 2), align 4
+ %9 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @c, i32 0, i64 2), align 4
+ %add5 = add nsw i32 %8, %9
+ %10 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @d, i32 0, i64 2), align 4
+ %11 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @e, i32 0, i64 2), align 4
+ %add6 = add nsw i32 %10, %11
+ %add7 = add nsw i32 %add5, %add6
+ %freeze.add7 = freeze i32 %add7
+ store i32 %freeze.add7, ptr getelementptr inbounds ([4 x i32], ptr @a, i32 0, i64 2), align 4
+ %12 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @b, i32 0, i64 3), align 4
+ %13 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @c, i32 0, i64 3), align 4
+ %add8 = add nsw i32 %12, %13
+ %14 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @d, i32 0, i64 3), align 4
+ %15 = load i32, ptr getelementptr inbounds ([4 x i32], ptr @e, i32 0, i64 3), align 4
+ %add9 = add nsw i32 %14, %15
+ %sub10 = sub nsw i32 %add8, %add9
+ %freeze.sub10 = freeze i32 %sub10
+ store i32 %freeze.sub10, ptr getelementptr inbounds ([4 x i32], ptr @a, i32 0, i64 3), align 4
+ ret void
+}
+
; Function Attrs: nounwind uwtable
define void @subadd() #0 {
; CHECK-LABEL: @subadd(
@@ -301,14 +357,14 @@ define void @reorder_alt_subTree() #0 {
define void @reorder_alt_rightsubTree(ptr nocapture %c, ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture readonly %d) {
; CHECK-LABEL: @reorder_alt_rightsubTree(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[D:%.*]], align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
-; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP4]], [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = fsub <2 x double> [[TMP7]], [[TMP2]]
-; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[TMP7]], [[TMP2]]
-; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x double> [[TMP8]], <2 x double> [[TMP9]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: store <2 x double> [[TMP10]], ptr [[C:%.*]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr [[D:%.*]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP2]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = fsub <2 x double> [[TMP4]], [[TMP1]]
+; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[TMP4]], [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> [[TMP6]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: store <2 x double> [[TMP7]], ptr [[C:%.*]], align 8
; CHECK-NEXT: ret void
;
%1 = load double, ptr %a
@@ -332,20 +388,20 @@ define void @reorder_alt_rightsubTree(ptr nocapture %c, ptr noalias nocapture re
define void @vec_shuff_reorder() #0 {
; CHECK-LABEL: @vec_shuff_reorder(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr @fa, align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x float>, ptr @fb, align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x float>, ptr getelementptr inbounds ([4 x float], ptr @fb, i32 0, i64 2), align 4
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, ptr getelementptr inbounds ([4 x float], ptr @fa, i32 0, i64 2), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr @fa, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr @fb, align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x float>, ptr getelementptr inbounds ([4 x float], ptr @fb, i32 0, i64 2), align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr getelementptr inbounds ([4 x float], ptr @fa, i32 0, i64 2), align 4
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP5]], <4 x float> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP5]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> [[TMP9]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x float> [[TMP12]], <4 x float> [[TMP13]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; CHECK-NEXT: [[TMP15:%.*]] = fadd <4 x float> [[TMP10]], [[TMP14]]
-; CHECK-NEXT: [[TMP16:%.*]] = fsub <4 x float> [[TMP10]], [[TMP14]]
-; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <4 x float> [[TMP15]], <4 x float> [[TMP16]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-NEXT: store <4 x float> [[TMP17]], ptr @fc, align 4
+; CHECK-NEXT: [[TMP11:%.*]] = fadd <4 x float> [[TMP7]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = fsub <4 x float> [[TMP7]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x float> [[TMP11]], <4 x float> [[TMP12]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT: store <4 x float> [[TMP13]], ptr @fc, align 4
; CHECK-NEXT: ret void
;
%1 = load float, ptr @fb, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp-diff-sized.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp-diff-sized.ll
new file mode 100644
index 0000000..c8bd106
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp-diff-sized.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64--- | FileCheck %s
+
+define void @test(ptr noalias %a, ptr %b) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: [[PA1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i32 64
+; CHECK-NEXT: [[A1:%.*]] = load i64, ptr [[PA1]], align 8
+; CHECK-NEXT: [[PB1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i32 64
+; CHECK-NEXT: [[B1:%.*]] = load i64, ptr [[PB1]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[A]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[B]], align 4
+; CHECK-NEXT: [[C1:%.*]] = icmp eq i64 [[B1]], [[A1]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <2 x i32> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret void
+;
+ %pa1 = getelementptr inbounds i64, ptr %a, i32 64
+ %pa2 = getelementptr inbounds i32, ptr %a, i32 1
+ %a0 = load i32, ptr %a, align 4
+ %a1 = load i64, ptr %pa1, align 8
+ %a2 = load i32, ptr %pa2, align 4
+ %pb1 = getelementptr inbounds i64, ptr %b, i32 64
+ %pb2 = getelementptr inbounds i32, ptr %b, i32 1
+ %b0 = load i32, ptr %b, align 4
+ %b1 = load i64, ptr %pb1, align 8
+ %b2 = load i32, ptr %pb2, align 4
+ %c0 = icmp eq i32 %a0, %b0
+ %c1 = icmp eq i64 %b1, %a1
+ %c2 = icmp eq i32 %b2, %a2
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extractelement-phi-in-landingpad.ll b/llvm/test/Transforms/SLPVectorizer/X86/extractelement-phi-in-landingpad.ll
new file mode 100644
index 0000000..7476c77
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/extractelement-phi-in-landingpad.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @test() personality ptr null {
+; CHECK-LABEL: define void @test() personality ptr null {
+; CHECK-NEXT: [[BB:.*]]:
+; CHECK-NEXT: invoke void null()
+; CHECK-NEXT: to label %[[BB65:.*]] unwind label %[[BB4:.*]]
+; CHECK: [[BB2:.*]]:
+; CHECK-NEXT: invoke void null()
+; CHECK-NEXT: to label %[[BB65]] unwind label %[[BB4]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ zeroinitializer, %[[BB]] ], [ poison, %[[BB2]] ]
+; CHECK-NEXT: [[LANDINGPAD:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
+; CHECK-NEXT: call void null(i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]], i32 [[TMP1]])
+; CHECK-NEXT: ret void
+; CHECK: [[BB65]]:
+; CHECK-NEXT: ret void
+;
+bb:
+ invoke void null()
+ to label %bb65 unwind label %bb4
+
+bb2:
+ invoke void null()
+ to label %bb65 unwind label %bb4
+
+bb4:
+ %phi5 = phi i32 [ 0, %bb ], [ 0, %bb2 ]
+ %phi6 = phi i32 [ 0, %bb ], [ 0, %bb2 ]
+ %landingpad = landingpad { ptr, i32 }
+ cleanup
+ call void null(i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5, i32 %phi5)
+ ret void
+
+bb65:
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/fmuladd.ll b/llvm/test/Transforms/SLPVectorizer/X86/fmuladd.ll
index 28e837c..1804ef5 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/fmuladd.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/fmuladd.ll
@@ -46,6 +46,31 @@ define void @fmuladd_2f64() #0 {
ret void
}
+define void @fmuladd_2f64_freeze() #0 {
+; CHECK-LABEL: @fmuladd_2f64_freeze(
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr @srcC64, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]], <2 x double> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = freeze <2 x double> [[TMP4]]
+; CHECK-NEXT: store <2 x double> [[TMP5]], ptr @dst64, align 8
+; CHECK-NEXT: ret void
+;
+ %a0 = load double, ptr @srcA64, align 8
+ %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8
+ %b0 = load double, ptr @srcB64, align 8
+ %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8
+ %c0 = load double, ptr @srcC64, align 8
+ %c1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcC64, i32 0, i64 1), align 8
+ %fmuladd0 = call double @llvm.fmuladd.f64(double %a0, double %b0, double %c0)
+ %fmuladd1 = call double @llvm.fmuladd.f64(double %a1, double %b1, double %c1)
+ %freeze0 = freeze double %fmuladd0
+ %freeze1 = freeze double %fmuladd1
+ store double %freeze0, ptr @dst64, align 8
+ store double %freeze1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
+ ret void
+}
+
define void @fmuladd_4f64() #0 {
; SSE-LABEL: @fmuladd_4f64(
; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/landing_pad.ll b/llvm/test/Transforms/SLPVectorizer/X86/landing_pad.ll
index f795fef..47b42bc 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/landing_pad.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/landing_pad.ll
@@ -10,12 +10,12 @@ define void @foo() personality ptr @bar {
; CHECK: bb2.loopexit:
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP0:%.*]] = phi <4 x i32> [ [[TMP8:%.*]], [[BB9:%.*]] ], [ poison, [[BB2_LOOPEXIT:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi <4 x i32> [ [[TMP7:%.*]], [[BB9:%.*]] ], [ poison, [[BB2_LOOPEXIT:%.*]] ]
; CHECK-NEXT: ret void
; CHECK: bb3:
; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ [[TMP3:%.*]], [[BB6:%.*]] ], [ poison, [[BB1:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = invoke i32 poison(ptr addrspace(1) nonnull poison, i32 0, i32 0, i32 poison) [ "deopt"() ]
-; CHECK-NEXT: to label [[BB4:%.*]] unwind label [[BB10:%.*]]
+; CHECK-NEXT: to label [[BB4:%.*]] unwind label [[BB10:%.*]]
; CHECK: bb4:
; CHECK-NEXT: br i1 poison, label [[BB11:%.*]], label [[BB5:%.*]]
; CHECK: bb5:
@@ -27,26 +27,25 @@ define void @foo() personality ptr @bar {
; CHECK-NEXT: [[LOCAL_5_84111:%.*]] = phi i32 [ poison, [[BB8]] ], [ poison, [[BB5]] ]
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i32> poison, i32 [[LOCAL_5_84111]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = invoke i32 poison(ptr addrspace(1) nonnull poison, i32 poison, i32 poison, i32 poison) [ "deopt"() ]
-; CHECK-NEXT: to label [[BB8]] unwind label [[BB12:%.*]]
+; CHECK-NEXT: to label [[BB8]] unwind label [[BB12:%.*]]
; CHECK: bb8:
; CHECK-NEXT: br i1 poison, label [[BB7]], label [[BB6]]
; CHECK: bb9:
; CHECK-NEXT: [[INDVARS_IV528799:%.*]] = phi i64 [ poison, [[BB10]] ], [ poison, [[BB12]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = phi <2 x i32> [ [[TMP9:%.*]], [[BB10]] ], [ [[TMP10:%.*]], [[BB12]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP6]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP8]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> poison, <4 x i32> <i32 poison, i32 poison, i32 0, i32 1>
+; CHECK-NEXT: [[TMP6:%.*]] = phi <2 x i32> [ [[TMP8:%.*]], [[BB10]] ], [ [[TMP9:%.*]], [[BB12]] ]
+; CHECK-NEXT: [[TMP7]] = shufflevector <2 x i32> [[TMP6]], <2 x i32> poison, <4 x i32> <i32 poison, i32 poison, i32 1, i32 0>
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb10:
-; CHECK-NEXT: [[TMP9]] = phi <2 x i32> [ [[TMP1]], [[BB3]] ]
+; CHECK-NEXT: [[TMP8]] = phi <2 x i32> [ [[TMP1]], [[BB3]] ]
; CHECK-NEXT: [[LANDING_PAD68:%.*]] = landingpad { ptr, i32 }
-; CHECK-NEXT: cleanup
+; CHECK-NEXT: cleanup
; CHECK-NEXT: br label [[BB9]]
; CHECK: bb11:
; CHECK-NEXT: ret void
; CHECK: bb12:
-; CHECK-NEXT: [[TMP10]] = phi <2 x i32> [ [[TMP4]], [[BB7]] ]
+; CHECK-NEXT: [[TMP9]] = phi <2 x i32> [ [[TMP4]], [[BB7]] ]
; CHECK-NEXT: [[LANDING_PAD149:%.*]] = landingpad { ptr, i32 }
-; CHECK-NEXT: cleanup
+; CHECK-NEXT: cleanup
; CHECK-NEXT: br label [[BB9]]
;
bb1:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduced-value-replace-extractelement.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduced-value-replace-extractelement.ll
new file mode 100644
index 0000000..edf8756
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduced-value-replace-extractelement.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -slp-threshold=-99999 < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+define void @test() {
+; CHECK-LABEL: define void @test() {
+; CHECK-NEXT: [[BB:.*]]:
+; CHECK-NEXT: br label %[[BB1:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ zeroinitializer, %[[BB]] ], [ [[TMP4:%.*]], %[[BB1]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> zeroinitializer)
+; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i32
+; CHECK-NEXT: [[OP_RDX:%.*]] = mul i32 [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[OP_RDX1:%.*]] = mul i32 [[OP_RDX]], 0
+; CHECK-NEXT: [[TMP4]] = insertelement <2 x i32> <i32 0, i32 poison>, i32 [[OP_RDX1]], i32 1
+; CHECK-NEXT: br label %[[BB1]]
+;
+bb:
+ br label %bb1
+
+bb1:
+ %phi = phi i32 [ 0, %bb ], [ %mul9, %bb1 ]
+ %phi2 = phi i32 [ 0, %bb ], [ 0, %bb1 ]
+ %trunc = trunc i64 0 to i32
+ %mul = mul i32 0, %trunc
+ %mul3 = mul i32 %trunc, %phi
+ %mul4 = mul i32 %mul3, %mul
+ %mul5 = mul i32 %mul4, %mul
+ %trunc6 = trunc i64 0 to i32
+ %mul7 = mul i32 0, %trunc6
+ %mul8 = mul i32 %mul5, %mul7
+ %mul9 = mul i32 %mul8, %mul7
+ br label %bb1
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll
index e8abcce..03c8767 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll
@@ -10,7 +10,8 @@ define void @tes() {
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i1> zeroinitializer, <2 x i1> [[TMP0]], <4 x i32> <i32 0, i32 0, i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> [[TMP3]])
; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 false, i1 [[TMP4]], i1 false
-; CHECK-NEXT: br i1 [[OP_RDX]], label [[TMP6:%.*]], label [[TMP5:%.*]]
+; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 false, i1 [[OP_RDX]], i1 false
+; CHECK-NEXT: br i1 [[OP_RDX1]], label [[TMP6:%.*]], label [[TMP5:%.*]]
; CHECK: 4:
; CHECK-NEXT: ret void
; CHECK: 5:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/stores_mix_sizes.ll b/llvm/test/Transforms/SLPVectorizer/X86/stores_mix_sizes.ll
index 1e2a87b..3795b0e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/stores_mix_sizes.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/stores_mix_sizes.ll
@@ -5,17 +5,9 @@ define void @test(ptr %p) {
; CHECK-SAME: ptr [[P:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[IDX1:%.*]] = getelementptr i8, ptr [[P]], i64 1
-; CHECK-NEXT: store i8 0, ptr [[IDX1]], align 4
; CHECK-NEXT: [[IDX_64_9:%.*]] = getelementptr i64, ptr [[P]], i64 9
; CHECK-NEXT: store i64 1, ptr [[IDX_64_9]], align 8
-; CHECK-NEXT: [[IDX2:%.*]] = getelementptr i8, ptr [[P]], i64 2
-; CHECK-NEXT: store <4 x i8> zeroinitializer, ptr [[IDX2]], align 4
-; CHECK-NEXT: [[IDX6:%.*]] = getelementptr i8, ptr [[P]], i64 6
-; CHECK-NEXT: store i8 0, ptr [[IDX6]], align 4
-; CHECK-NEXT: [[IDX7:%.*]] = getelementptr i8, ptr [[P]], i64 7
-; CHECK-NEXT: store i8 0, ptr [[IDX7]], align 4
-; CHECK-NEXT: [[IDX8:%.*]] = getelementptr i8, ptr [[P]], i64 8
-; CHECK-NEXT: store i8 0, ptr [[IDX8]], align 4
+; CHECK-NEXT: store <8 x i8> zeroinitializer, ptr [[IDX1]], align 4
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/revec.ll b/llvm/test/Transforms/SLPVectorizer/revec.ll
index a6e10611..d6dd412 100644
--- a/llvm/test/Transforms/SLPVectorizer/revec.ll
+++ b/llvm/test/Transforms/SLPVectorizer/revec.ll
@@ -88,3 +88,39 @@ entry:
store <4 x i32> %9, ptr %10, align 4
ret void
}
+
+define void @test4(ptr %in, ptr %out) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x float>, ptr [[IN:%.*]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> poison, <8 x float> poison, i64 8)
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> [[TMP1]], <8 x float> [[TMP0]], i64 0)
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[TMP2]], <16 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP4:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> poison, <8 x float> zeroinitializer, i64 0)
+; CHECK-NEXT: [[TMP5:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> [[TMP4]], <8 x float> zeroinitializer, i64 8)
+; CHECK-NEXT: [[TMP6:%.*]] = fmul <16 x float> [[TMP3]], [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> poison, <8 x float> poison, i64 0)
+; CHECK-NEXT: [[TMP8:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> [[TMP7]], <8 x float> zeroinitializer, i64 8)
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x float> [[TMP2]], <16 x float> [[TMP8]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP10:%.*]] = fadd <16 x float> [[TMP9]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = fcmp ogt <16 x float> [[TMP10]], [[TMP5]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i1, ptr [[OUT:%.*]], i64 8
+; CHECK-NEXT: [[TMP13:%.*]] = call <8 x i1> @llvm.vector.extract.v8i1.v16i1(<16 x i1> [[TMP11]], i64 8)
+; CHECK-NEXT: store <8 x i1> [[TMP13]], ptr [[OUT]], align 1
+; CHECK-NEXT: [[TMP14:%.*]] = call <8 x i1> @llvm.vector.extract.v8i1.v16i1(<16 x i1> [[TMP11]], i64 0)
+; CHECK-NEXT: store <8 x i1> [[TMP14]], ptr [[TMP12]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load <8 x float>, ptr %in, align 4
+ %1 = fmul <8 x float> %0, zeroinitializer
+ %2 = fmul <8 x float> %0, zeroinitializer
+ %3 = fadd <8 x float> zeroinitializer, %1
+ %4 = fadd <8 x float> %0, %2
+ %5 = fcmp ogt <8 x float> %3, zeroinitializer
+ %6 = fcmp ogt <8 x float> %4, zeroinitializer
+ %7 = getelementptr i1, ptr %out, i64 8
+ store <8 x i1> %5, ptr %out, align 1
+ store <8 x i1> %6, ptr %7, align 1
+ ret void
+}
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
index be2f17dc..0150b3b 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
@@ -1846,6 +1846,193 @@ join:
ret i64 %phi1
}
+define i64 @load_with_sunk_gep(i1 %cond, ptr %p, i64 %a, i64 %b) {
+; CHECK-LABEL: @load_with_sunk_gep(
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[B_SINK:%.*]] = phi i64 [ [[A:%.*]], [[IF]] ], [ [[B:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[B_SINK]]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B]], align 8
+; CHECK-NEXT: ret i64 [[V_B]]
+;
+ br i1 %cond, label %if, label %else
+
+if:
+ call void @dummy()
+ %gep.a = getelementptr i8, ptr %p, i64 %a
+ %v.a = load i64, ptr %gep.a
+ br label %join
+
+else:
+ %gep.b = getelementptr i8, ptr %p, i64 %b
+ %v.b = load i64, ptr %gep.b
+ br label %join
+
+join:
+ %v = phi i64 [ %v.a, %if ], [ %v.b, %else ]
+ ret i64 %v
+}
+
+define i64 @load_with_non_sunk_gep_both(i1 %cond, ptr %p.a, ptr %p.b, i64 %a, i64 %b) {
+; CHECK-LABEL: @load_with_non_sunk_gep_both(
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
+; CHECK: if:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: br label [[JOIN:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[GEP_A]], [[IF]] ]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B_SINK]], align 8
+; CHECK-NEXT: ret i64 [[V_B]]
+;
+ br i1 %cond, label %if, label %else
+
+if:
+ call void @dummy()
+ %gep.a = getelementptr i8, ptr %p.a, i64 %a
+ %v.a = load i64, ptr %gep.a
+ br label %join
+
+else:
+ %gep.b = getelementptr i8, ptr %p.b, i64 %b
+ %v.b = load i64, ptr %gep.b
+ br label %join
+
+join:
+ %v = phi i64 [ %v.a, %if ], [ %v.b, %else ]
+ ret i64 %v
+}
+
+define i64 @load_with_non_sunk_gep_left(i1 %cond, ptr %p.a, ptr %p.b, i64 %b) {
+; CHECK-LABEL: @load_with_non_sunk_gep_left(
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
+; CHECK: if:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: br label [[JOIN:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[P_A:%.*]], [[IF]] ]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B_SINK]], align 8
+; CHECK-NEXT: ret i64 [[V_B]]
+;
+ br i1 %cond, label %if, label %else
+
+if:
+ call void @dummy()
+ %v.a = load i64, ptr %p.a
+ br label %join
+
+else:
+ %gep.b = getelementptr i8, ptr %p.b, i64 %b
+ %v.b = load i64, ptr %gep.b
+ br label %join
+
+join:
+ %v = phi i64 [ %v.a, %if ], [ %v.b, %else ]
+ ret i64 %v
+}
+
+define i64 @load_with_non_sunk_gep_right(i1 %cond, ptr %p.a, ptr %p.b, i64 %a) {
+; CHECK-LABEL: @load_with_non_sunk_gep_right(
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[P_B_SINK:%.*]] = phi ptr [ [[GEP_A]], [[IF]] ], [ [[P_B:%.*]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[P_B_SINK]], align 8
+; CHECK-NEXT: ret i64 [[V_B]]
+;
+ br i1 %cond, label %if, label %else
+
+if:
+ call void @dummy()
+ %gep.a = getelementptr i8, ptr %p.a, i64 %a
+ %v.a = load i64, ptr %gep.a
+ br label %join
+
+else:
+ %v.b = load i64, ptr %p.b
+ br label %join
+
+join:
+ %v = phi i64 [ %v.a, %if ], [ %v.b, %else ]
+ ret i64 %v
+}
+
+define void @store_with_non_sunk_gep(i1 %cond, ptr %p.a, ptr %p.b, i64 %a, i64 %b) {
+; CHECK-LABEL: @store_with_non_sunk_gep(
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
+; CHECK: if:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: br label [[JOIN:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[GEP_A]], [[IF]] ]
+; CHECK-NEXT: store i64 0, ptr [[GEP_B_SINK]], align 8
+; CHECK-NEXT: ret void
+;
+ br i1 %cond, label %if, label %else
+
+if:
+ call void @dummy()
+ %gep.a = getelementptr i8, ptr %p.a, i64 %a
+ store i64 0, ptr %gep.a
+ br label %join
+
+else:
+ %gep.b = getelementptr i8, ptr %p.b, i64 %b
+ store i64 0, ptr %gep.b
+ br label %join
+
+join:
+ ret void
+}
+
+define void @store_with_non_sunk_gep_as_value(i1 %cond, ptr %p, ptr %p.a, ptr %p.b, i64 %a, i64 %b) {
+; CHECK-LABEL: @store_with_non_sunk_gep_as_value(
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
+; CHECK: if:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: br label [[JOIN:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[GEP_A]], [[IF]] ]
+; CHECK-NEXT: store ptr [[GEP_B_SINK]], ptr [[P:%.*]], align 8
+; CHECK-NEXT: ret void
+;
+ br i1 %cond, label %if, label %else
+
+if:
+ call void @dummy()
+ %gep.a = getelementptr i8, ptr %p.a, i64 %a
+ store ptr %gep.a, ptr %p
+ br label %join
+
+else:
+ %gep.b = getelementptr i8, ptr %p.b, i64 %b
+ store ptr %gep.b, ptr %p
+ br label %join
+
+join:
+ ret void
+}
+
declare void @dummy()
declare void @use.ptr(ptr)
diff --git a/llvm/test/Transforms/StructurizeCFG/loop-break-phi.ll b/llvm/test/Transforms/StructurizeCFG/loop-break-phi.ll
new file mode 100644
index 0000000..c832b7d
--- /dev/null
+++ b/llvm/test/Transforms/StructurizeCFG/loop-break-phi.ll
@@ -0,0 +1,213 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes='structurizecfg' %s -o - | FileCheck %s
+
+define float @while_break(i32 %z, float %v, i32 %x, i32 %y) #0 {
+; CHECK-LABEL: define float @while_break(
+; CHECK-SAME: i32 [[Z:%.*]], float [[V:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[HEADER:.*]]
+; CHECK: [[HEADER]]:
+; CHECK-NEXT: [[V_1:%.*]] = phi float [ [[V]], %[[ENTRY]] ], [ [[TMP7:%.*]], %[[FLOW2:.*]] ]
+; CHECK-NEXT: [[IND:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP6:%.*]], %[[FLOW2]] ]
+; CHECK-NEXT: [[CC:%.*]] = icmp sge i32 [[IND]], [[X]]
+; CHECK-NEXT: br i1 [[CC]], label %[[ELSE:.*]], label %[[FLOW:.*]]
+; CHECK: [[FLOW]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[V_1]], %[[ELSE]] ], [ undef, %[[HEADER]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi i1 [ [[CC2:%.*]], %[[ELSE]] ], [ false, %[[HEADER]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ false, %[[ELSE]] ], [ true, %[[HEADER]] ]
+; CHECK-NEXT: br i1 [[TMP2]], label %[[IF:.*]], label %[[FLOW1:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[V_IF:%.*]] = fadd float [[V_1]], 1.000000e+00
+; CHECK-NEXT: br label %[[FLOW1]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[CC2]] = icmp slt i32 [[IND]], [[Y]]
+; CHECK-NEXT: br label %[[FLOW]]
+; CHECK: [[FLOW1]]:
+; CHECK-NEXT: [[TMP3:%.*]] = phi float [ undef, %[[IF]] ], [ [[TMP0]], %[[FLOW]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi float [ [[V_IF]], %[[IF]] ], [ [[TMP0]], %[[FLOW]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi i1 [ true, %[[IF]] ], [ [[TMP1]], %[[FLOW]] ]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[LATCH:.*]], label %[[FLOW2]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[IND_INC:%.*]] = add i32 [[IND]], 1
+; CHECK-NEXT: [[CC3:%.*]] = icmp slt i32 [[IND]], [[Z]]
+; CHECK-NEXT: br label %[[FLOW2]]
+; CHECK: [[FLOW2]]:
+; CHECK-NEXT: [[TMP6]] = phi i32 [ [[IND_INC]], %[[LATCH]] ], [ undef, %[[FLOW1]] ]
+; CHECK-NEXT: [[TMP7]] = phi float [ [[TMP4]], %[[LATCH]] ], [ undef, %[[FLOW1]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = phi float [ [[TMP4]], %[[LATCH]] ], [ [[TMP3]], %[[FLOW1]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i1 [ [[CC3]], %[[LATCH]] ], [ true, %[[FLOW1]] ]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[END:.*]], label %[[HEADER]]
+; CHECK: [[END]]:
+; CHECK-NEXT: ret float [[TMP8]]
+;
+entry:
+ br label %header
+
+header:
+ %v.1 = phi float [ %v, %entry ], [ %v.2, %latch ]
+ %ind = phi i32 [ 0, %entry], [ %ind.inc, %latch ]
+ %cc = icmp slt i32 %ind, %x
+ br i1 %cc, label %if, label %else
+
+if:
+ %v.if = fadd float %v.1, 1.0
+ br label %latch
+
+else:
+ %cc2 = icmp slt i32 %ind, %y
+ br i1 %cc2, label %latch, label %end
+
+latch:
+ %v.2 = phi float [ %v.if, %if ], [ %v.1, %else ]
+ %ind.inc = add i32 %ind, 1
+ %cc3 = icmp slt i32 %ind, %z
+ br i1 %cc3, label %end, label %header
+
+end:
+ %r = phi float [ %v.2, %latch ], [ %v.1, %else ]
+ ret float %r
+}
+
+; Just different dfs order from while_break.
+define float @while_break2(i32 %z, float %v, i32 %x, i32 %y) #0 {
+; CHECK-LABEL: define float @while_break2(
+; CHECK-SAME: i32 [[Z:%.*]], float [[V:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[HEADER:.*]]
+; CHECK: [[HEADER]]:
+; CHECK-NEXT: [[V_1:%.*]] = phi float [ [[V]], %[[ENTRY]] ], [ [[TMP7:%.*]], %[[FLOW2:.*]] ]
+; CHECK-NEXT: [[IND:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP6:%.*]], %[[FLOW2]] ]
+; CHECK-NEXT: [[CC:%.*]] = icmp sge i32 [[IND]], [[X]]
+; CHECK-NEXT: br i1 [[CC]], label %[[IF:.*]], label %[[FLOW:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[V_IF:%.*]] = fadd float [[V_1]], 1.000000e+00
+; CHECK-NEXT: br label %[[FLOW]]
+; CHECK: [[FLOW]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[V_IF]], %[[IF]] ], [ undef, %[[HEADER]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi i1 [ true, %[[IF]] ], [ false, %[[HEADER]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ false, %[[IF]] ], [ true, %[[HEADER]] ]
+; CHECK-NEXT: br i1 [[TMP2]], label %[[ELSE:.*]], label %[[FLOW1:.*]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[CC2:%.*]] = icmp slt i32 [[IND]], [[Y]]
+; CHECK-NEXT: br label %[[FLOW1]]
+; CHECK: [[FLOW1]]:
+; CHECK-NEXT: [[TMP3:%.*]] = phi float [ [[V_1]], %[[ELSE]] ], [ undef, %[[FLOW]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi float [ [[V_1]], %[[ELSE]] ], [ [[TMP0]], %[[FLOW]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi i1 [ [[CC2]], %[[ELSE]] ], [ [[TMP1]], %[[FLOW]] ]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[LATCH:.*]], label %[[FLOW2]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[IND_INC:%.*]] = add i32 [[IND]], 1
+; CHECK-NEXT: [[CC3:%.*]] = icmp slt i32 [[IND]], [[Z]]
+; CHECK-NEXT: br label %[[FLOW2]]
+; CHECK: [[FLOW2]]:
+; CHECK-NEXT: [[TMP6]] = phi i32 [ [[IND_INC]], %[[LATCH]] ], [ undef, %[[FLOW1]] ]
+; CHECK-NEXT: [[TMP7]] = phi float [ [[TMP4]], %[[LATCH]] ], [ undef, %[[FLOW1]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = phi float [ [[TMP4]], %[[LATCH]] ], [ [[TMP3]], %[[FLOW1]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i1 [ [[CC3]], %[[LATCH]] ], [ true, %[[FLOW1]] ]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[END:.*]], label %[[HEADER]]
+; CHECK: [[END]]:
+; CHECK-NEXT: ret float [[TMP8]]
+;
+entry:
+ br label %header
+
+header:
+ %v.1 = phi float [ %v, %entry ], [ %v.2, %latch ]
+ %ind = phi i32 [ 0, %entry], [ %ind.inc, %latch ]
+ %cc = icmp slt i32 %ind, %x
+ br i1 %cc, label %else, label %if
+
+if:
+ %v.if = fadd float %v.1, 1.0
+ br label %latch
+
+else:
+ %cc2 = icmp slt i32 %ind, %y
+ br i1 %cc2, label %latch, label %end
+
+latch:
+ %v.2 = phi float [ %v.if, %if ], [ %v.1, %else ]
+ %ind.inc = add i32 %ind, 1
+ %cc3 = icmp slt i32 %ind, %z
+ br i1 %cc3, label %end, label %header
+
+end:
+ %r = phi float [ %v.2, %latch ], [ %v.1, %else ]
+ ret float %r
+}
+
+; Two chains of phi network that have the same value from %if block.
+define < 2 x float> @while_break_two_chains_of_phi(float %v, i32 %x, i32 %y, i32 %z, ptr addrspace(1) %p) #0 {
+; CHECK-LABEL: define <2 x float> @while_break_two_chains_of_phi(
+; CHECK-SAME: float [[V:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]], i32 [[Z:%.*]], ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[HEADER:.*]]
+; CHECK: [[HEADER]]:
+; CHECK-NEXT: [[V_1:%.*]] = phi float [ [[V]], %[[ENTRY]] ], [ [[TMP6:%.*]], %[[FLOW1:.*]] ]
+; CHECK-NEXT: [[V_COPY:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP5:%.*]], %[[FLOW1]] ]
+; CHECK-NEXT: [[IND:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP4:%.*]], %[[FLOW1]] ]
+; CHECK-NEXT: [[CC:%.*]] = icmp slt i32 [[IND]], [[X]]
+; CHECK-NEXT: [[CC_INV:%.*]] = xor i1 [[CC]], true
+; CHECK-NEXT: br i1 [[CC]], label %[[IF:.*]], label %[[FLOW:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[V_PTR:%.*]] = getelementptr float, ptr addrspace(1) [[P]], i32 [[IND]]
+; CHECK-NEXT: [[V_LOAD:%.*]] = load float, ptr addrspace(1) [[V_PTR]], align 4
+; CHECK-NEXT: [[V_IF:%.*]] = fadd float [[V_LOAD]], 1.000000e+00
+; CHECK-NEXT: [[CC2:%.*]] = icmp slt i32 [[IND]], [[Y]]
+; CHECK-NEXT: br label %[[FLOW]]
+; CHECK: [[FLOW]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[V_IF]], %[[IF]] ], [ undef, %[[HEADER]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi float [ [[V_IF]], %[[IF]] ], [ [[V_COPY]], %[[HEADER]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi float [ [[V_IF]], %[[IF]] ], [ [[V_1]], %[[HEADER]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = phi i1 [ [[CC2]], %[[IF]] ], [ [[CC_INV]], %[[HEADER]] ]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[LATCH:.*]], label %[[FLOW1]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[IND_INC:%.*]] = add i32 [[IND]], 1
+; CHECK-NEXT: [[CC3:%.*]] = icmp slt i32 [[IND]], [[Z]]
+; CHECK-NEXT: br label %[[FLOW1]]
+; CHECK: [[FLOW1]]:
+; CHECK-NEXT: [[TMP4]] = phi i32 [ [[IND_INC]], %[[LATCH]] ], [ undef, %[[FLOW]] ]
+; CHECK-NEXT: [[TMP5]] = phi float [ [[TMP1]], %[[LATCH]] ], [ undef, %[[FLOW]] ]
+; CHECK-NEXT: [[TMP6]] = phi float [ [[TMP2]], %[[LATCH]] ], [ undef, %[[FLOW]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = phi float [ [[TMP1]], %[[LATCH]] ], [ [[TMP0]], %[[FLOW]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = phi float [ [[TMP2]], %[[LATCH]] ], [ [[TMP0]], %[[FLOW]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i1 [ [[CC3]], %[[LATCH]] ], [ true, %[[FLOW]] ]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[END:.*]], label %[[HEADER]]
+; CHECK: [[END]]:
+; CHECK-NEXT: [[PACKED0:%.*]] = insertelement <2 x float> poison, float [[TMP8]], i32 0
+; CHECK-NEXT: [[PACKED1:%.*]] = insertelement <2 x float> [[PACKED0]], float [[TMP7]], i32 1
+; CHECK-NEXT: ret <2 x float> [[PACKED1]]
+;
+entry:
+ br label %header
+
+header:
+ %v.1 = phi float [ %v, %entry ], [ %v.2, %latch ]
+ %v.copy = phi float [ 0.0, %entry ], [ %v.copy.2, %latch ]
+ %ind = phi i32 [ 0, %entry], [ %ind.inc, %latch ]
+ %cc = icmp slt i32 %ind, %x
+ br i1 %cc, label %if, label %latch
+
+if:
+ %v.ptr = getelementptr float, ptr addrspace(1) %p, i32 %ind
+ %v.load = load float, ptr addrspace(1) %v.ptr
+ %v.if = fadd float %v.load, 1.0
+ %cc2 = icmp slt i32 %ind, %y
+ br i1 %cc2, label %latch, label %end
+
+latch:
+ %v.2 = phi float [ %v.1, %header ], [ %v.if, %if ]
+ %v.copy.2 = phi float [ %v.copy, %header ], [ %v.if, %if ]
+ %ind.inc = add i32 %ind, 1
+ %cc3 = icmp slt i32 %ind, %z
+ br i1 %cc3, label %end, label %header
+
+end:
+ %r = phi float [ %v.2, %latch ], [ %v.if, %if ]
+ %r2 = phi float [ %v.copy.2, %latch ], [ %v.if, %if ]
+ %packed0 = insertelement < 2 x float > poison, float %r, i32 0
+ %packed1 = insertelement < 2 x float > %packed0, float %r2, i32 1
+ ret < 2 x float> %packed1
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/tools/llvm-readobj/COFF/arm64x-reloc-invalid.yaml b/llvm/test/tools/llvm-readobj/COFF/arm64x-reloc-invalid.yaml
new file mode 100644
index 0000000..18a43c5
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/COFF/arm64x-reloc-invalid.yaml
@@ -0,0 +1,1618 @@
+# Test ARM64X dynamic relocation validation.
+
+# RUN: yaml2obj %s --docnum=1 -o %t1.dll
+# RUN: not llvm-readobj %t1.dll 2>&1 | FileCheck --check-prefix=ERR-HEADER-RVA %s
+# ERR-HEADER-RVA: RVA 0x1002 for ARM64X reloc not found
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x5000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x2000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x5000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x6000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0x1000 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0040 # ZERO offset 0xf00, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=2 -o %t2.dll
+# RUN: not llvm-readobj %t2.dll 2>&1 | FileCheck --check-prefix=ERR-SEC-INDEX %s
+# ERR-SEC-INDEX: section index out of bounds
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 5
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=3 -o %t3.dll
+# RUN: not llvm-readobj %t3.dll 2>&1 | FileCheck --check-prefix=ERR-DIR-INDEX %s
+# ERR-DIR-INDEX: Too large DynamicValueRelocTableOffset (4)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 11
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - Binary: 010000 # coff_dynamic_reloc_table.Size
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=4 -o %t4.dll
+# RUN: not llvm-readobj %t4.dll 2>&1 | FileCheck --check-prefix=ERR-DIR-SIZE %s
+# ERR-DIR-SIZE: Indvalid dynamic relocations directory size (2)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 13
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 2 # coff_dynamic_reloc_table.Size
+ - Binary: 00
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=5 -o %t5.dll
+# RUN: not llvm-readobj %t5.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-SIZE %s
+# ERR-RELOC-SIZE: Unexpected end of dynamic relocations data
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 11 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - Binary: 000000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=6 -o %t6.dll
+# RUN: not llvm-readobj %t6.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-SIZE2 %s
+# ERR-RELOC-SIZE2: Too large dynamic relocation size (4)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 15 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 4 # coff_dynamic_relocation64.BaseRelocSize
+ - Binary: 000000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=7 -o %t7.dll
+# RUN: not llvm-readobj %t7.dll 2>&1 | FileCheck --check-prefix=ERR-BLOCK-SIZE %s
+# ERR-BLOCK-SIZE: ARM64X relocations block size (7) is too small
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 21 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 9 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 7 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 00
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=8 -o %t8.dll
+# RUN: not llvm-readobj %t8.dll 2>&1 | FileCheck --check-prefix=ERR-BLOCK-SIZE2 %s
+# ERR-BLOCK-SIZE2: Unaligned ARM64X relocations block size (10)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 22 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 10 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 10 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=9 -o %t9.dll
+# RUN: not llvm-readobj %t9.dll 2>&1 | FileCheck --check-prefix=ERR-BLOCK-SIZE3 %s
+# ERR-BLOCK-SIZE3: ARM64X relocations block size (12) is too large
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 23 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 11 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 000000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=10 -o %t10.dll
+# RUN: not llvm-readobj %t10.dll 2>&1 | FileCheck --check-prefix=ERR-TERMINATOR %s
+# ERR-TERMINATOR: Unexpected ARM64X relocations terminator
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 00000000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=11 -o %t11.dll
+# RUN: not llvm-readobj %t11.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-TYPE %s
+# ERR-RELOC-TYPE: Invalid relocation type
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 00300000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=12 -o %t12.dll
+# RUN: not llvm-readobj %t12.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-VALUE %s
+# ERR-RELOC-VALUE: Invalid ARM64X relocation value size (0)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 00100000
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=13 -o %t13.dll
+# RUN: not llvm-readobj %t13.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-RVA %s
+# ERR-RELOC-RVA: RVA 0x6062 for ARM64X reloc not found
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0x6000 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 6040 # ZERO offset 0x60, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=14 -o %t14.dll
+# RUN: not llvm-readobj %t14.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-RVA-SIZE %s
+# ERR-RELOC-RVA-SIZE: RVA 0x6062 for ARM64X reloc not found
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0x6000 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 6040 # ZERO offset 0x60, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=15 -o %t15.dll
+# RUN: not llvm-readobj %t15.dll 2>&1 | FileCheck --check-prefix=ERR-PAGE-RVA %s
+# ERR-PAGE-RVA: Unaligned ARM64X relocations page RVA (24577)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0x6001 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0040 # ZERO offset 0, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=16 -o %t16.dll
+# RUN: not llvm-readobj %t16.dll 2>&1 | FileCheck --check-prefix=ERR-V2-HEADER-SIZE %s
+# ERR-V2-HEADER-SIZE: Invalid dynamic relocation header size (23)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 2 # coff_dynamic_reloc_table.Version
+ - UInt32: 36 # coff_dynamic_reloc_table.Size
+ - UInt32: 23 # coff_dynamic_relocation64_v2.HeaderSize
+ - UInt32: 12 # coff_dynamic_relocation64_v2.FixupInfoSize
+ - UInt32: 6 # coff_dynamic_relocation64_v2.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64_v2.Symbol(high)
+ - UInt32: 0 # coff_dynamic_relocation64_v2.SymbolGroup
+ - UInt32: 0 # coff_dynamic_relocation64_v2.Flags
+ - UInt32: 0x6000 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0040 # ZERO offset 0, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=17 -o %t17.dll
+# RUN: not llvm-readobj %t17.dll 2>&1 | FileCheck --check-prefix=ERR-RELOC-SIZE %s
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 2 # coff_dynamic_reloc_table.Version
+ - UInt32: 20 # coff_dynamic_reloc_table.Size
+ - UInt32: 24 # coff_dynamic_relocation64_v2.HeaderSize
+ - UInt32: 12 # coff_dynamic_relocation64_v2.FixupInfoSize
+ - UInt32: 6 # coff_dynamic_relocation64_v2.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64_v2.Symbol(high)
+ - UInt32: 0 # coff_dynamic_relocation64_v2.SymbolGroup
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=18 -o %t18.dll
+# RUN: not llvm-readobj %t18.dll 2>&1 | FileCheck --check-prefix=ERR-RVA-ALIGN %s
+# ERR-RVA-ALIGN: Unaligned ARM64X relocation RVA (24577)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0x6000 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0140 # ZERO offset 0, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=19 -o %t19.dll
+# RUN: not llvm-readobj %t19.dll 2>&1 | FileCheck --check-prefix=ERR-INVALID-VERSION %s
+# ERR-INVALID-VERSION: Unsupported dynamic relocations table version (3)
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 3 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0x6000 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0140 # ZERO offset 0, size 2
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=20 -o %t20.dll
+# RUN: not llvm-readobj %t20.dll 2>&1 | FileCheck --check-prefix=ERR-END %s
+# ERR-END: Unexpected end of ARM64X relocations
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x5000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x2000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x5000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x6000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 24 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 12 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 2490 # VALUE offset 0x24, size 4
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=21 -o %t21.dll
+# RUN: not llvm-readobj %t21.dll 2>&1 | FileCheck --check-prefix=ERR-TERMINATOR %s
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 4
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0 # CodeRangesToEntryPointsCount
+ - UInt32: 0 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 28 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 16 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 16 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 0040 # ZERO offset 0, size 2
+ - Binary: 00000000
+symbols: []
+...
diff --git a/llvm/test/tools/llvm-readobj/COFF/arm64x-reloc.yaml b/llvm/test/tools/llvm-readobj/COFF/arm64x-reloc.yaml
new file mode 100644
index 0000000..df2d83d
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/COFF/arm64x-reloc.yaml
@@ -0,0 +1,322 @@
+# Test reading ARM64X dynamic relocations.
+
+# RUN: yaml2obj %s -o %t.dll
+# RUN: llvm-readobj --coff-load-config %t.dll | FileCheck --check-prefixes=CHECK,V1 %s
+
+# CHECK: CHPEMetadataPointer: 0x180005000
+# CHECK: CHPEMetadata [
+# CHECK-NEXT: Version: 0x1
+# CHECK-NEXT: CodeMap [
+# CHECK-NEXT: 0x1000 - 0x1030 ARM64EC
+# CHECK-NEXT: 0x2000 - 0x2040 ARM64
+# CHECK-NEXT: 0x3000 - 0x3050 X64
+# CHECK-NEXT: ]
+# CHECK-NEXT: CodeRangesToEntryPoints [
+# CHECK-NEXT: 0x1000 - 0x1020 -> 0x1000
+# CHECK-NEXT: 0x1020 - 0x1040 -> 0x2000
+# CHECK-NEXT: ]
+# CHECK-NEXT: RedirectionMetadata [
+# CHECK-NEXT: 0x1000 -> 0x2000
+# CHECK-NEXT: 0x1020 -> 0x2030
+# CHECK-NEXT: ]
+
+# CHECK: DynamicRelocations [
+# V1: Version: 0x1
+# V2: Version: 0x2
+# CHECK: Arm64X [
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x84
+# CHECK-NEXT: Type: VALUE
+# CHECK-NEXT: Size: 0x2
+# CHECK-NEXT: Value: 0x8664
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6020
+# CHECK-NEXT: Type: VALUE
+# CHECK-NEXT: Size: 0x2
+# CHECK-NEXT: Value: 0x3412
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6024
+# CHECK-NEXT: Type: VALUE
+# CHECK-NEXT: Size: 0x4
+# CHECK-NEXT: Value: 0x89674523
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6028
+# CHECK-NEXT: Type: VALUE
+# CHECK-NEXT: Size: 0x8
+# CHECK-NEXT: Value: 0x8877665544332211
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6000
+# CHECK-NEXT: Type: ZERO
+# CHECK-NEXT: Size: 0x2
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6004
+# CHECK-NEXT: Type: ZERO
+# CHECK-NEXT: Size: 0x4
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6010
+# CHECK-NEXT: Type: ZERO
+# CHECK-NEXT: Size: 0x8
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6040
+# CHECK-NEXT: Type: DELTA
+# CHECK-NEXT: Value: 68740
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6044
+# CHECK-NEXT: Type: DELTA
+# CHECK-NEXT: Value: 137480
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x6048
+# CHECK-NEXT: Type: DELTA
+# CHECK-NEXT: Value: -16
+# CHECK-NEXT: ]
+# CHECK-NEXT: Entry [
+# CHECK-NEXT: RVA: 0x604C
+# CHECK-NEXT: Type: DELTA
+# CHECK-NEXT: Value: -32
+# CHECK-NEXT: ]
+# CHECK-NEXT: ]
+# CHECK-NEXT: ]
+
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 5
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 2 # CodeRangesToEntryPointsCount
+ - UInt32: 2 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - UInt32: 0x1000 # CodeRangesToEntryPoints[0]
+ - UInt32: 0x1020
+ - UInt32: 0x1000
+ - UInt32: 0x1020 # CodeRangesToEntryPoints[1]
+ - UInt32: 0x1040
+ - UInt32: 0x2000
+ - UInt32: 0x1000 # RedirectionMetadata[0]
+ - UInt32: 0x2000
+ - UInt32: 0x1020 # RedirectionMetadata[1]
+ - UInt32: 0x2030
+ - Name: .test
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x6000
+ VirtualSize: 0x60
+ StructuredData:
+ - Binary: 111122223333444455556666777788889999aaaabbbbccccddddeeeeffff0000
+ - Binary: 0000000000000000000000000000000000000000000000000000000000000000
+ - Binary: 1010101020202020303030304040404050505050606060607070707080808080
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 1 # coff_dynamic_reloc_table.Version
+ - UInt32: 76 # coff_dynamic_reloc_table.Size
+ - UInt32: 6 # coff_dynamic_relocation64.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64.Symbol(high)
+ - UInt32: 64 # coff_dynamic_relocation64.BaseRelocSize
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 8450 # VALUE offset 0x84 (PE header Machine), size 2
+ - Binary: 6486 # IMAGE_FILE_MACHINE_AMD64
+ - UInt32: 0x6000 # coff_base_reloc_block_header[1].PageRVA
+ - UInt32: 52 # coff_base_reloc_block_header[1].BlockSize
+ - Binary: 2050 # VALUE offset 0x20, size 2
+ - Binary: 1234 # test value
+ - Binary: 2490 # VALUE offset 0x24, size 4
+ - Binary: 23456789 # test value
+ - Binary: 28d0 # VALUE offset 0x28, size 4
+ - Binary: 11223344 # test value
+ - Binary: 55667788 # test value
+ - Binary: 0040 # ZERO offset 0, size 2
+ - Binary: 0480 # ZERO offset 4, size 4
+ - Binary: 10c0 # ZERO offset 16, size 8
+ - Binary: 4020 # DELTA offset 0x40, mul 4
+ - Binary: 2143
+ - Binary: 44a0 # DELTA offset 0x44, mul 8
+ - Binary: 2143
+ - Binary: 4860 # DELTA offset 0x48, mul 4
+ - Binary: 0400
+ - Binary: 4ce0 # DELTA offset 0x4c, mul 8
+ - Binary: 0400
+ - Binary: 0000 # terminator
+symbols: []
+...
+
+# RUN: yaml2obj %s --docnum=2 -o %t2.dll
+# RUN: llvm-readobj --coff-load-config %t2.dll | FileCheck --check-prefixes=CHECK,V2 %s
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_ARM64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 328
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x180005000
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 5
+ - Name: .data
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE ]
+ VirtualAddress: 0x5000
+ VirtualSize: 144
+ StructuredData:
+ - UInt32: 1 # Version
+ - UInt32: 0x5050 # CodeMap
+ - UInt32: 3 # CodeMapCount
+ - UInt32: 0x5068 # CodeRangesToEntryPoints
+ - UInt32: 0x5080 # RedirectionMetadata
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 2 # CodeRangesToEntryPointsCount
+ - UInt32: 2 # RedirectionMetadataCount
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0
+ - UInt32: 0x1001 # CodeMap[0]
+ - UInt32: 0x30
+ - UInt32: 0x2000 # CodeMap[1]
+ - UInt32: 0x40
+ - UInt32: 0x3002 # CodeMap[2]
+ - UInt32: 0x50
+ - UInt32: 0x1000 # CodeRangesToEntryPoints[0]
+ - UInt32: 0x1020
+ - UInt32: 0x1000
+ - UInt32: 0x1020 # CodeRangesToEntryPoints[1]
+ - UInt32: 0x1040
+ - UInt32: 0x2000
+ - UInt32: 0x1000 # RedirectionMetadata[0]
+ - UInt32: 0x2000
+ - UInt32: 0x1020 # RedirectionMetadata[1]
+ - UInt32: 0x2030
+ - Name: .test
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x6000
+ VirtualSize: 0x60
+ StructuredData:
+ - Binary: 111122223333444455556666777788889999aaaabbbbccccddddeeeeffff0000
+ - Binary: 0000000000000000000000000000000000000000000000000000000000000000
+ - Binary: 1010101020202020303030304040404050505050606060607070707080808080
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_DISCARDABLE ]
+ VirtualAddress: 0x7000
+ VirtualSize: 328
+ StructuredData:
+ - UInt32: 0 # pad
+ - UInt32: 2 # coff_dynamic_reloc_table.Version
+ - UInt32: 88 # coff_dynamic_reloc_table.Size
+ - UInt32: 24 # coff_dynamic_relocation64_v2.HeaderSize
+ - UInt32: 64 # coff_dynamic_relocation64_v2.FixupInfoSize
+ - UInt32: 6 # coff_dynamic_relocation64_v2.Symbol(low) = IMAGE_DYNAMIC_RELOCATION_ARM64X
+ - UInt32: 0 # coff_dynamic_relocation64_v2.Symbol(high)
+ - UInt32: 0 # coff_dynamic_relocation64_v2.SymbolGroup
+ - UInt32: 0 # coff_dynamic_relocation64_v2.Flags
+ - UInt32: 0 # coff_base_reloc_block_header[0].PageRVA
+ - UInt32: 12 # coff_base_reloc_block_header[0].BlockSize
+ - Binary: 8450 # VALUE offset 0x84 (PE header Machine), size 2
+ - Binary: 6486 # IMAGE_FILE_MACHINE_AMD64
+ - UInt32: 0x6000 # coff_base_reloc_block_header[1].PageRVA
+ - UInt32: 52 # coff_base_reloc_block_header[1].BlockSize
+ - Binary: 2050 # VALUE offset 0x20, size 2
+ - Binary: 1234 # test value
+ - Binary: 2490 # VALUE offset 0x24, size 4
+ - Binary: 23456789 # test value
+ - Binary: 28d0 # VALUE offset 0x28, size 4
+ - Binary: 11223344 # test value
+ - Binary: 55667788 # test value
+ - Binary: 0040 # ZERO offset 0, size 2
+ - Binary: 0480 # ZERO offset 4, size 4
+ - Binary: 10c0 # ZERO offset 16, size 8
+ - Binary: 4020 # DELTA offset 0x40, mul 4
+ - Binary: 2143
+ - Binary: 44a0 # DELTA offset 0x44, mul 8
+ - Binary: 2143
+ - Binary: 4860 # DELTA offset 0x48, mul 4
+ - Binary: 0400
+ - Binary: 4ce0 # DELTA offset 0x4c, mul 8
+ - Binary: 0400
+ - Binary: 0000 # terminator
+symbols: []
+...
diff --git a/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s b/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s
index 91cc8b0..a71f8a2 100644
--- a/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s
+++ b/llvm/test/tools/llvm-readobj/ELF/AArch64/aarch64-feature-pauth.s
@@ -89,7 +89,7 @@ end:
# RUN: llvm-readobj --notes gnu-1-0.o | \
# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x1 (baremetal)" -DVERSION=0x0 %s
-#--- gnu-0x10000002-85.s
+#--- gnu-0x10000002-0.s
.section ".note.gnu.property", "a"
.long 4 // Name length is always 4 ("GNU")
.long end - begin // Data length
@@ -101,19 +101,91 @@ begin:
.long 0xc0000001 // Type: GNU_PROPERTY_AARCH64_FEATURE_PAUTH
.long 16 // Data size
.quad 0x10000002 // PAuth ABI platform
- .quad 85 // PAuth ABI version
+ .quad 0 // PAuth ABI version
+ .p2align 3 // Align to 8 byte for 64 bit
+end:
+
+# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-0.s -o gnu-0x10000002-0.o
+# RUN: llvm-readelf --notes gnu-0x10000002-0.o | \
+# RUN: FileCheck --check-prefix=ELF -DPLATFORM="0x10000002 (llvm_linux)" \
+# RUN: -DVERSION="0x0 (!PointerAuthIntrinsics, !PointerAuthCalls, !PointerAuthReturns, !PointerAuthAuthTraps, !PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, !PointerAuthInitFini, !PointerAuthInitFiniAddressDiscrimination, !PointerAuthELFGOT)" %s
+# RUN: llvm-readobj --notes gnu-0x10000002-0.o | \
+# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x10000002 (llvm_linux)" \
+# RUN: -DVERSION="0x0 (!PointerAuthIntrinsics, !PointerAuthCalls, !PointerAuthReturns, !PointerAuthAuthTraps, !PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, !PointerAuthInitFini, !PointerAuthInitFiniAddressDiscrimination, !PointerAuthELFGOT)" %s
+
+#--- gnu-0x10000002-341.s
+.section ".note.gnu.property", "a"
+ .long 4 // Name length is always 4 ("GNU")
+ .long end - begin // Data length
+ .long 5 // Type: NT_GNU_PROPERTY_TYPE_0
+ .asciz "GNU" // Name
+ .p2align 3
+begin:
+ # PAuth ABI property note
+ .long 0xc0000001 // Type: GNU_PROPERTY_AARCH64_FEATURE_PAUTH
+ .long 16 // Data size
+ .quad 0x10000002 // PAuth ABI platform
+ .quad 341 // PAuth ABI version
+ .p2align 3 // Align to 8 byte for 64 bit
+end:
+
+# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-341.s -o gnu-0x10000002-341.o
+# RUN: llvm-readelf --notes gnu-0x10000002-341.o | \
+# RUN: FileCheck --check-prefix=ELF -DPLATFORM="0x10000002 (llvm_linux)" \
+# RUN: -DVERSION="0x155 (PointerAuthIntrinsics, !PointerAuthCalls, PointerAuthReturns, !PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini, !PointerAuthInitFiniAddressDiscrimination, PointerAuthELFGOT)" %s
+# RUN: llvm-readobj --notes gnu-0x10000002-341.o | \
+# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x10000002 (llvm_linux)" \
+# RUN: -DVERSION="0x155 (PointerAuthIntrinsics, !PointerAuthCalls, PointerAuthReturns, !PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini, !PointerAuthInitFiniAddressDiscrimination, PointerAuthELFGOT)" %s
+
+#--- gnu-0x10000002-170.s
+.section ".note.gnu.property", "a"
+ .long 4 // Name length is always 4 ("GNU")
+ .long end - begin // Data length
+ .long 5 // Type: NT_GNU_PROPERTY_TYPE_0
+ .asciz "GNU" // Name
+ .p2align 3
+begin:
+ # PAuth ABI property note
+ .long 0xc0000001 // Type: GNU_PROPERTY_AARCH64_FEATURE_PAUTH
+ .long 16 // Data size
+ .quad 0x10000002 // PAuth ABI platform
+ .quad 170 // PAuth ABI version
+ .p2align 3 // Align to 8 byte for 64 bit
+end:
+
+# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-170.s -o gnu-0x10000002-170.o
+# RUN: llvm-readelf --notes gnu-0x10000002-170.o | \
+# RUN: FileCheck --check-prefix=ELF -DPLATFORM="0x10000002 (llvm_linux)" \
+# RUN: -DVERSION="0xaa (!PointerAuthIntrinsics, PointerAuthCalls, !PointerAuthReturns, PointerAuthAuthTraps, !PointerAuthVTPtrAddressDiscrimination, PointerAuthVTPtrTypeDiscrimination, !PointerAuthInitFini, PointerAuthInitFiniAddressDiscrimination, !PointerAuthELFGOT)" %s
+# RUN: llvm-readobj --notes gnu-0x10000002-170.o | \
+# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x10000002 (llvm_linux)" \
+# RUN: -DVERSION="0xaa (!PointerAuthIntrinsics, PointerAuthCalls, !PointerAuthReturns, PointerAuthAuthTraps, !PointerAuthVTPtrAddressDiscrimination, PointerAuthVTPtrTypeDiscrimination, !PointerAuthInitFini, PointerAuthInitFiniAddressDiscrimination, !PointerAuthELFGOT)" %s
+
+#--- gnu-0x10000002-511.s
+.section ".note.gnu.property", "a"
+ .long 4 // Name length is always 4 ("GNU")
+ .long end - begin // Data length
+ .long 5 // Type: NT_GNU_PROPERTY_TYPE_0
+ .asciz "GNU" // Name
+ .p2align 3
+begin:
+ # PAuth ABI property note
+ .long 0xc0000001 // Type: GNU_PROPERTY_AARCH64_FEATURE_PAUTH
+ .long 16 // Data size
+ .quad 0x10000002 // PAuth ABI platform
+ .quad 511 // PAuth ABI version
.p2align 3 // Align to 8 byte for 64 bit
end:
-# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-85.s -o gnu-0x10000002-85.o
-# RUN: llvm-readelf --notes gnu-0x10000002-85.o | \
+# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-511.s -o gnu-0x10000002-511.o
+# RUN: llvm-readelf --notes gnu-0x10000002-511.o | \
# RUN: FileCheck --check-prefix=ELF -DPLATFORM="0x10000002 (llvm_linux)" \
-# RUN: -DVERSION="0x55 (PointerAuthIntrinsics, !PointerAuthCalls, PointerAuthReturns, !PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini)" %s
-# RUN: llvm-readobj --notes gnu-0x10000002-85.o | \
+# RUN: -DVERSION="0x1ff (PointerAuthIntrinsics, PointerAuthCalls, PointerAuthReturns, PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini, PointerAuthInitFiniAddressDiscrimination, PointerAuthELFGOT)" %s
+# RUN: llvm-readobj --notes gnu-0x10000002-511.o | \
# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x10000002 (llvm_linux)" \
-# RUN: -DVERSION="0x55 (PointerAuthIntrinsics, !PointerAuthCalls, PointerAuthReturns, !PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, !PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini)" %s
+# RUN: -DVERSION="0x1ff (PointerAuthIntrinsics, PointerAuthCalls, PointerAuthReturns, PointerAuthAuthTraps, PointerAuthVTPtrAddressDiscrimination, PointerAuthVTPtrTypeDiscrimination, PointerAuthInitFini, PointerAuthInitFiniAddressDiscrimination, PointerAuthELFGOT)" %s
-#--- gnu-0x10000002-128.s
+#--- gnu-0x10000002-512.s
.section ".note.gnu.property", "a"
.long 4 // Name length is always 4 ("GNU")
.long end - begin // Data length
@@ -125,15 +197,15 @@ begin:
.long 0xc0000001 // Type: GNU_PROPERTY_AARCH64_FEATURE_PAUTH
.long 16 // Data size
.quad 0x10000002 // PAuth ABI platform
- .quad 128 // PAuth ABI version
+ .quad 512 // PAuth ABI version
.p2align 3 // Align to 8 byte for 64 bit
end:
-# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-128.s -o gnu-0x10000002-128.o
-# RUN: llvm-readelf --notes gnu-0x10000002-128.o | \
-# RUN: FileCheck --check-prefix=ELF -DPLATFORM="0x10000002 (llvm_linux)" -DVERSION="0x80 (unknown)" %s
-# RUN: llvm-readobj --notes gnu-0x10000002-128.o | \
-# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x10000002 (llvm_linux)" -DVERSION="0x80 (unknown)" %s
+# RUN: llvm-mc -filetype=obj -triple aarch64-linux-gnu gnu-0x10000002-512.s -o gnu-0x10000002-512.o
+# RUN: llvm-readelf --notes gnu-0x10000002-512.o | \
+# RUN: FileCheck --check-prefix=ELF -DPLATFORM="0x10000002 (llvm_linux)" -DVERSION="0x200 (unknown)" %s
+# RUN: llvm-readobj --notes gnu-0x10000002-512.o | \
+# RUN: FileCheck --check-prefix=OBJ -DPLATFORM="0x10000002 (llvm_linux)" -DVERSION="0x200 (unknown)" %s
#--- gnu-short.s
.section ".note.gnu.property", "a"
diff --git a/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml b/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
index 81f2c9c..9b37b49 100644
--- a/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
+++ b/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
@@ -32,14 +32,15 @@
# RUN: FileCheck %s --check-prefix=AVAIL --input-file %t3.txt
# RUN: FileCheck %s --check-prefix=UNAVAIL --input-file %t3.txt
#
-# CHECK: << Total TLI yes SDK no: 8
+# CHECK: << Total TLI yes SDK no: 12
# CHECK: >> Total TLI no SDK yes: 0
# CHECK: == Total TLI yes SDK yes: 248
#
# WRONG_DETAIL: << TLI yes SDK no : '_ZdaPv' aka operator delete[](void*)
# WRONG_DETAIL: >> TLI no SDK yes: '_ZdaPvj' aka operator delete[](void*, unsigned int)
-# WRONG_DETAIL-COUNT-8: << TLI yes SDK no : {{.*}}__hot_cold_t
-# WRONG_SUMMARY: << Total TLI yes SDK no: 9{{$}}
+# WRONG_DETAIL-COUNT-8: << TLI yes SDK no : '_Zn{{.*}}__hot_cold_t
+# WRONG_DETAIL-COUNT-4: << TLI yes SDK no : '__size_returning_new{{.*}}
+# WRONG_SUMMARY: << Total TLI yes SDK no: 13{{$}}
# WRONG_SUMMARY: >> Total TLI no SDK yes: 1{{$}}
# WRONG_SUMMARY: == Total TLI yes SDK yes: 247
#
@@ -47,8 +48,8 @@
## the exact count first; the two directives should add up to that.
## Yes, this means additions to TLI will fail this test, but the argument
## to -COUNT can't be an expression.
-# AVAIL: TLI knows 489 symbols, 256 available
-# AVAIL-COUNT-256: {{^}} available
+# AVAIL: TLI knows 493 symbols, 260 available
+# AVAIL-COUNT-260: {{^}} available
# AVAIL-NOT: {{^}} available
# UNAVAIL-COUNT-233: not available
# UNAVAIL-NOT: not available
diff --git a/llvm/test/tools/yaml2obj/COFF/load-config.yaml b/llvm/test/tools/yaml2obj/COFF/load-config.yaml
index 8d177bd..dfdb7e4 100644
--- a/llvm/test/tools/yaml2obj/COFF/load-config.yaml
+++ b/llvm/test/tools/yaml2obj/COFF/load-config.yaml
@@ -40,8 +40,8 @@
# ALL-NEXT: CHPEMetadataPointer: 0x0
# ALL-NEXT: GuardRFFailureRoutine: 0x4
# ALL-NEXT: GuardRFFailureRoutineFunctionPointer: 0x5
-# ALL-NEXT: DynamicValueRelocTableOffset: 0x6
-# ALL-NEXT: DynamicValueRelocTableSection: 7
+# ALL-NEXT: DynamicValueRelocTableOffset: 0x4
+# ALL-NEXT: DynamicValueRelocTableSection: 2
# ALL-NEXT: GuardRFVerifyStackPointerFunctionPointer: 0x8
# ALL-NEXT: HotPatchTableOffset: 0x9
# ALL-NEXT: EnclaveConfigurationPointer: 0x1
@@ -106,8 +106,8 @@ sections:
CHPEMetadataPointer: 0
GuardRFFailureRoutine: 4
GuardRFFailureRoutineFunctionPointer: 5
- DynamicValueRelocTableOffset: 6
- DynamicValueRelocTableSection: 7
+ DynamicValueRelocTableOffset: 4
+ DynamicValueRelocTableSection: 2
GuardRFVerifyStackPointerFunctionPointer: 8
HotPatchTableOffset: 9
EnclaveConfigurationPointer: 1
@@ -118,6 +118,14 @@ sections:
GuardXFGDispatchFunctionPointer: 6
GuardXFGTableDispatchFunctionPointer: 7
CastGuardOsDeterminedFailureMode: 8
+ - Name: .reloc
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x2000
+ VirtualSize: 12
+ StructuredData:
+ - UInt32: 1 # pad
+ - UInt32: 2 # Version
+ - UInt32: 0 # Size
symbols: []
...
diff --git a/llvm/tools/bugpoint/CMakeLists.txt b/llvm/tools/bugpoint/CMakeLists.txt
index b0e7191..f846aed 100644
--- a/llvm/tools/bugpoint/CMakeLists.txt
+++ b/llvm/tools/bugpoint/CMakeLists.txt
@@ -37,5 +37,5 @@ add_llvm_tool(bugpoint
DEPENDS
intrinsics_gen
SUPPORT_PLUGINS
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
-export_executable_symbols_for_plugins(bugpoint)
diff --git a/llvm/tools/llc/CMakeLists.txt b/llvm/tools/llc/CMakeLists.txt
index 01825c6..c540794 100644
--- a/llvm/tools/llc/CMakeLists.txt
+++ b/llvm/tools/llc/CMakeLists.txt
@@ -30,6 +30,5 @@ add_llvm_tool(llc
DEPENDS
intrinsics_gen
SUPPORT_PLUGINS
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
-
-export_executable_symbols_for_plugins(llc)
diff --git a/llvm/tools/llc/llc.cpp b/llvm/tools/llc/llc.cpp
index e7bf192..d3f7c2b 100644
--- a/llvm/tools/llc/llc.cpp
+++ b/llvm/tools/llc/llc.cpp
@@ -342,7 +342,6 @@ int main(int argc, char **argv) {
initializeVectorization(*Registry);
initializeScalarizeMaskedMemIntrinLegacyPassPass(*Registry);
initializeExpandReductionsPass(*Registry);
- initializeExpandVectorPredicationPass(*Registry);
initializeHardwareLoopsLegacyPass(*Registry);
initializeTransformUtils(*Registry);
initializeReplaceWithVeclibLegacyPass(*Registry);
diff --git a/llvm/tools/lli/CMakeLists.txt b/llvm/tools/lli/CMakeLists.txt
index e3fca22..3106f25 100644
--- a/llvm/tools/lli/CMakeLists.txt
+++ b/llvm/tools/lli/CMakeLists.txt
@@ -56,6 +56,6 @@ add_llvm_tool(lli
DEPENDS
intrinsics_gen
- )
-export_executable_symbols(lli)
+ EXPORT_SYMBOLS
+ )
diff --git a/llvm/tools/lli/ChildTarget/CMakeLists.txt b/llvm/tools/lli/ChildTarget/CMakeLists.txt
index 5a81faa..b3c1dd2 100644
--- a/llvm/tools/lli/ChildTarget/CMakeLists.txt
+++ b/llvm/tools/lli/ChildTarget/CMakeLists.txt
@@ -10,6 +10,6 @@ add_llvm_utility(lli-child-target
DEPENDS
intrinsics_gen
-)
-export_executable_symbols(lli-child-target)
+ EXPORT_SYMBOLS
+)
diff --git a/llvm/tools/llvm-jitlink/CMakeLists.txt b/llvm/tools/llvm-jitlink/CMakeLists.txt
index 1487e78..e337fe5 100644
--- a/llvm/tools/llvm-jitlink/CMakeLists.txt
+++ b/llvm/tools/llvm-jitlink/CMakeLists.txt
@@ -26,6 +26,8 @@ add_llvm_tool(llvm-jitlink
llvm-jitlink-elf.cpp
llvm-jitlink-macho.cpp
llvm-jitlink-statistics.cpp
+
+ EXPORT_SYMBOLS
)
if(${CMAKE_SYSTEM_NAME} MATCHES "Haiku")
@@ -35,5 +37,3 @@ endif()
if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
target_link_libraries(llvm-jitlink PRIVATE socket)
endif()
-
-export_executable_symbols(llvm-jitlink)
diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink-executor/CMakeLists.txt b/llvm/tools/llvm-jitlink/llvm-jitlink-executor/CMakeLists.txt
index f6d882d..f2daa29 100644
--- a/llvm/tools/llvm-jitlink/llvm-jitlink-executor/CMakeLists.txt
+++ b/llvm/tools/llvm-jitlink/llvm-jitlink-executor/CMakeLists.txt
@@ -9,6 +9,6 @@ add_llvm_utility(llvm-jitlink-executor
DEPENDS
intrinsics_gen
-)
-export_executable_symbols(llvm-jitlink-executor)
+ EXPORT_SYMBOLS
+)
diff --git a/llvm/tools/llvm-lto2/CMakeLists.txt b/llvm/tools/llvm-lto2/CMakeLists.txt
index 3b4644d..335392f 100644
--- a/llvm/tools/llvm-lto2/CMakeLists.txt
+++ b/llvm/tools/llvm-lto2/CMakeLists.txt
@@ -21,5 +21,6 @@ add_llvm_tool(llvm-lto2
DEPENDS
intrinsics_gen
+
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
-export_executable_symbols_for_plugins(llvm-lto2)
diff --git a/llvm/tools/llvm-readobj/COFFDumper.cpp b/llvm/tools/llvm-readobj/COFFDumper.cpp
index b104774..65d67d2 100644
--- a/llvm/tools/llvm-readobj/COFFDumper.cpp
+++ b/llvm/tools/llvm-readobj/COFFDumper.cpp
@@ -972,6 +972,43 @@ void COFFDumper::printCOFFLoadConfig() {
printRVATable(Tables.GuardEHContTableVA, Tables.GuardEHContTableCount,
4 + Stride, PrintExtra);
}
+
+ if (const coff_dynamic_reloc_table *DynRelocTable =
+ Obj->getDynamicRelocTable()) {
+ ListScope LS(W, "DynamicRelocations");
+ W.printHex("Version", DynRelocTable->Version);
+ for (auto reloc : Obj->dynamic_relocs()) {
+ switch (reloc.getType()) {
+ case COFF::IMAGE_DYNAMIC_RELOCATION_ARM64X: {
+ ListScope TLS(W, "Arm64X");
+ for (auto Arm64XReloc : reloc.arm64x_relocs()) {
+ ListScope ELS(W, "Entry");
+ W.printHex("RVA", Arm64XReloc.getRVA());
+ switch (Arm64XReloc.getType()) {
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_ZEROFILL:
+ W.printString("Type", "ZEROFILL");
+ W.printHex("Size", Arm64XReloc.getSize());
+ break;
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE:
+ W.printString("Type", "VALUE");
+ W.printHex("Size", Arm64XReloc.getSize());
+ W.printHex("Value", Arm64XReloc.getValue());
+ break;
+ case COFF::IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA:
+ W.printString("Type", "DELTA");
+ W.printNumber("Value",
+ static_cast<int32_t>(Arm64XReloc.getValue()));
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ W.printHex("Type", reloc.getType());
+ break;
+ }
+ }
+ }
}
template <typename T>
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index f0a22f1..1457ae8 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -5284,8 +5284,11 @@ static bool printAArch64PAuthABICoreInfo(raw_ostream &OS, uint32_t DataSize,
Flags[AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR] =
"VTPtrTypeDiscrimination";
Flags[AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI] = "InitFini";
+ Flags[AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINIADDRDISC] =
+ "InitFiniAddressDiscrimination";
+ Flags[AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT] = "ELFGOT";
- static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI ==
+ static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_GOT ==
AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST,
"Update when new enum items are defined");
diff --git a/llvm/tools/llvm-readobj/ObjDumper.cpp b/llvm/tools/llvm-readobj/ObjDumper.cpp
index 0980d2a..20e99d9 100644
--- a/llvm/tools/llvm-readobj/ObjDumper.cpp
+++ b/llvm/tools/llvm-readobj/ObjDumper.cpp
@@ -82,8 +82,8 @@ void ObjDumper::printAsStringList(StringRef StringContent,
continue;
}
W.startLine() << format("[%6tx] ", CurrentWord - StrContent);
- printAsPrintable(W.startLine(), CurrentWord, WordSize);
- W.startLine() << '\n';
+ printAsPrintable(W.getOStream(), CurrentWord, WordSize);
+ W.getOStream() << '\n';
CurrentWord += WordSize + 1;
}
}
@@ -91,7 +91,7 @@ void ObjDumper::printAsStringList(StringRef StringContent,
void ObjDumper::printFileSummary(StringRef FileStr, object::ObjectFile &Obj,
ArrayRef<std::string> InputFilenames,
const object::Archive *A) {
- W.startLine() << "\n";
+ W.getOStream() << "\n";
W.printString("File", FileStr);
W.printString("Format", Obj.getFileFormatName());
W.printString("Arch", Triple::getArchTypeName(Obj.getArch()));
@@ -163,7 +163,8 @@ void ObjDumper::printSectionsAsString(const object::ObjectFile &Obj,
for (object::SectionRef Section :
getSectionRefsByNameOrIndex(Obj, Sections)) {
StringRef SectionName = unwrapOrError(Obj.getFileName(), Section.getName());
- W.startLine() << "\nString dump of section '" << SectionName << "':\n";
+ W.getOStream() << '\n';
+ W.startLine() << "String dump of section '" << SectionName << "':\n";
StringRef SectionContent =
unwrapOrError(Obj.getFileName(), Section.getContents());
@@ -180,7 +181,8 @@ void ObjDumper::printSectionsAsHex(const object::ObjectFile &Obj,
for (object::SectionRef Section :
getSectionRefsByNameOrIndex(Obj, Sections)) {
StringRef SectionName = unwrapOrError(Obj.getFileName(), Section.getName());
- W.startLine() << "\nHex dump of section '" << SectionName << "':\n";
+ W.getOStream() << '\n';
+ W.startLine() << "Hex dump of section '" << SectionName << "':\n";
StringRef SectionContent =
unwrapOrError(Obj.getFileName(), Section.getContents());
@@ -196,13 +198,13 @@ void ObjDumper::printSectionsAsHex(const object::ObjectFile &Obj,
W.startLine() << format_hex(Section.getAddress() + (SecPtr - SecContent),
10);
- W.startLine() << ' ';
+ W.getOStream() << ' ';
for (i = 0; TmpSecPtr < SecEnd && i < 4; ++i) {
for (k = 0; TmpSecPtr < SecEnd && k < 4; k++, TmpSecPtr++) {
uint8_t Val = *(reinterpret_cast<const uint8_t *>(TmpSecPtr));
- W.startLine() << format_hex_no_prefix(Val, 2);
+ W.getOStream() << format_hex_no_prefix(Val, 2);
}
- W.startLine() << ' ';
+ W.getOStream() << ' ';
}
// We need to print the correct amount of spaces to match the format.
@@ -211,17 +213,17 @@ void ObjDumper::printSectionsAsHex(const object::ObjectFile &Obj,
// Least, if we cut in a middle of a row, we add the remaining characters,
// which is (8 - (k * 2)).
if (i < 4)
- W.startLine() << format("%*c", (4 - i) * 8 + (4 - i), ' ');
+ W.getOStream() << format("%*c", (4 - i) * 8 + (4 - i), ' ');
if (k < 4)
- W.startLine() << format("%*c", 8 - k * 2, ' ');
+ W.getOStream() << format("%*c", 8 - k * 2, ' ');
TmpSecPtr = SecPtr;
for (i = 0; TmpSecPtr + i < SecEnd && i < 16; ++i)
- W.startLine() << (isPrint(TmpSecPtr[i])
- ? static_cast<char>(TmpSecPtr[i])
- : '.');
+ W.getOStream() << (isPrint(TmpSecPtr[i])
+ ? static_cast<char>(TmpSecPtr[i])
+ : '.');
- W.startLine() << '\n';
+ W.getOStream() << '\n';
}
}
}
diff --git a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
index 3d36d80..1510e9f 100644
--- a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
+++ b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
@@ -306,9 +306,10 @@ static std::unique_ptr<MachineFunction> cloneMF(MachineFunction *SrcMF,
DstMRI->setType(NewReg, RegTy);
// Copy register allocation hints.
- const auto &Hints = SrcMRI->getRegAllocationHints(Reg);
- for (Register PrefReg : Hints.second)
- DstMRI->addRegAllocationHint(NewReg, PrefReg);
+ const auto *Hints = SrcMRI->getRegAllocationHints(Reg);
+ if (Hints)
+ for (Register PrefReg : Hints->second)
+ DstMRI->addRegAllocationHint(NewReg, PrefReg);
}
const TargetSubtargetInfo &STI = DstMF->getSubtarget();
@@ -530,7 +531,8 @@ static uint64_t computeMIRComplexityScoreImpl(const MachineFunction &MF) {
const MachineRegisterInfo &MRI = MF.getRegInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
Register Reg = Register::index2VirtReg(I);
- Score += MRI.getRegAllocationHints(Reg).second.size();
+ if (const auto *Hints = MRI.getRegAllocationHints(Reg))
+ Score += Hints->second.size();
}
for (const MachineBasicBlock &MBB : MF) {
@@ -641,12 +643,27 @@ static uint64_t computeIRComplexityScoreImpl(const Function &F) {
++Score;
if (OverflowOp->hasNoSignedWrap())
++Score;
- } else if (const auto *GEP = dyn_cast<GEPOperator>(&I)) {
- if (GEP->isInBounds())
+ } else if (const auto *Trunc = dyn_cast<TruncInst>(&I)) {
+ if (Trunc->hasNoSignedWrap())
+ ++Score;
+ if (Trunc->hasNoUnsignedWrap())
++Score;
} else if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
if (ExactOp->isExact())
++Score;
+ } else if (const auto *NNI = dyn_cast<PossiblyNonNegInst>(&I)) {
+ if (NNI->hasNonNeg())
+ ++Score;
+ } else if (const auto *PDI = dyn_cast<PossiblyDisjointInst>(&I)) {
+ if (PDI->isDisjoint())
+ ++Score;
+ } else if (const auto *GEP = dyn_cast<GEPOperator>(&I)) {
+ if (GEP->isInBounds())
+ ++Score;
+ if (GEP->hasNoUnsignedSignedWrap())
+ ++Score;
+ if (GEP->hasNoUnsignedWrap())
+ ++Score;
} else if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
FastMathFlags FMF = FPOp->getFastMathFlags();
if (FMF.allowReassoc())
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp b/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp
index ba345d3..ff9dde40 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp
@@ -20,6 +20,7 @@
using namespace llvm;
static void reduceFlagsInModule(Oracle &O, ReducerWorkItem &WorkItem) {
+ // Keep this in sync with computeIRComplexityScoreImpl().
for (Function &F : WorkItem.getModule()) {
for (Instruction &I : instructions(F)) {
if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I)) {
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp b/llvm/tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp
index 2b97e65..3ec9555 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp
@@ -23,9 +23,9 @@ static void dropRegisterHintsFromFunction(Oracle &O, MachineFunction &MF) {
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
Register Reg = Register::index2VirtReg(I);
- const std::pair<unsigned, SmallVector<Register, 4>> &Hints =
+ const std::pair<unsigned, SmallVector<Register, 4>> *Hints =
MRI.getRegAllocationHints(Reg);
- if (Hints.second.empty())
+ if (!Hints || Hints->second.empty())
continue;
if (!O.shouldKeep())
diff --git a/llvm/tools/opt/CMakeLists.txt b/llvm/tools/opt/CMakeLists.txt
index 8d031b2..c235fcf 100644
--- a/llvm/tools/opt/CMakeLists.txt
+++ b/llvm/tools/opt/CMakeLists.txt
@@ -45,8 +45,7 @@ add_llvm_tool(opt
DEPENDS
intrinsics_gen
SUPPORT_PLUGINS
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
target_link_libraries(opt PRIVATE LLVMOptDriver)
-
-export_executable_symbols_for_plugins(opt)
diff --git a/llvm/tools/opt/optdriver.cpp b/llvm/tools/opt/optdriver.cpp
index b26aeb0..eb4821d5 100644
--- a/llvm/tools/opt/optdriver.cpp
+++ b/llvm/tools/opt/optdriver.cpp
@@ -443,7 +443,6 @@ extern "C" int optMain(
initializePostInlineEntryExitInstrumenterPass(Registry);
initializeUnreachableBlockElimLegacyPassPass(Registry);
initializeExpandReductionsPass(Registry);
- initializeExpandVectorPredicationPass(Registry);
initializeWasmEHPreparePass(Registry);
initializeWriteBitcodePassPass(Registry);
initializeReplaceWithVeclibLegacyPass(Registry);
diff --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt
index 3cba630..a1199ad 100644
--- a/llvm/unittests/Analysis/CMakeLists.txt
+++ b/llvm/unittests/Analysis/CMakeLists.txt
@@ -62,8 +62,14 @@ else()
LIST(APPEND LLVM_OPTIONAL_SOURCES ${MLGO_TESTS})
endif()
+# Export symbols from the plugins shared objects.
+if(NOT WIN32)
+ set(export_symbols EXPORT_SYMBOLS_FOR_PLUGINS)
+endif()
+
add_llvm_unittest_with_input_files(AnalysisTests
${ANALYSIS_TEST_SOURCES}
+ ${export_symbols}
)
add_dependencies(AnalysisTests intrinsics_gen)
@@ -76,10 +82,5 @@ if(CMAKE_SYSTEM_NAME STREQUAL "AIX")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-brtl")
endif()
-# Export symbols from the plugins shared objects.
-if(NOT WIN32)
- export_executable_symbols_for_plugins(AnalysisTests)
-endif()
-
add_subdirectory(InlineAdvisorPlugin)
add_subdirectory(InlineOrderPlugin)
diff --git a/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp b/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
index d344ebe..d200956 100644
--- a/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
+++ b/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
@@ -472,6 +472,10 @@ TEST_F(TargetLibraryInfoTest, ValidProto) {
"declare i8* @_ZnwmSt11align_val_tRKSt9nothrow_t(i64, i64, %struct*)\n"
"declare i8* @_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t(i64, i64, "
"%struct*, i8)\n"
+ "declare %struct @__size_returning_new(i64)\n"
+ "declare %struct @__size_returning_new_hot_cold(i64, i8)\n"
+ "declare %struct @__size_returning_new_aligned(i64, i64)\n"
+ "declare %struct @__size_returning_new_aligned_hot_cold(i64, i64, i8)\n"
"declare void @\"??3@YAXPEAX@Z\"(i8*)\n"
"declare void @\"??3@YAXPEAXAEBUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
diff --git a/llvm/unittests/BinaryFormat/MachOTest.cpp b/llvm/unittests/BinaryFormat/MachOTest.cpp
index 391298f..78b20c2 100644
--- a/llvm/unittests/BinaryFormat/MachOTest.cpp
+++ b/llvm/unittests/BinaryFormat/MachOTest.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/bit.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/TargetParser/Triple.h"
#include "gtest/gtest.h"
@@ -13,7 +14,15 @@
using namespace llvm;
using namespace llvm::MachO;
-TEST(MachOTest, UnalignedLC) {
+#if BYTE_ORDER == BIG_ENDIAN
+// As discussed in Issue #86793, this test cannot work on a strict-alignment
+// targets like SPARC. Besides, it's undefined behaviour on big-endian hosts.
+#define MAYBE_UnalignedLC DISABLED_UnalignedLC
+#else
+#define MAYBE_UnalignedLC UnalignedLC
+#endif
+
+TEST(MachOTest, MAYBE_UnalignedLC) {
unsigned char Valid32BitMachO[] = {
0xCE, 0xFA, 0xED, 0xFE, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00,
diff --git a/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt b/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt
index cc868e7..dc3380d 100644
--- a/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt
+++ b/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt
@@ -43,10 +43,10 @@ add_llvm_unittest(OrcJITTests
TaskDispatchTest.cpp
ThreadSafeModuleTest.cpp
WrapperFunctionUtilsTest.cpp
+
+ EXPORT_SYMBOLS
)
target_link_libraries(OrcJITTests PRIVATE
LLVMTestingSupport
${ORC_JIT_TEST_LIBS})
-
-export_executable_symbols(OrcJITTests)
diff --git a/llvm/unittests/IR/ConstantRangeTest.cpp b/llvm/unittests/IR/ConstantRangeTest.cpp
index 1705f3e..4815117 100644
--- a/llvm/unittests/IR/ConstantRangeTest.cpp
+++ b/llvm/unittests/IR/ConstantRangeTest.cpp
@@ -228,6 +228,12 @@ static bool CheckNonSignWrappedOnly(const ConstantRange &CR1,
return !CR1.isSignWrappedSet() && !CR2.isSignWrappedSet();
}
+static bool
+CheckNoSignedWrappedLHSAndNoWrappedRHSOnly(const ConstantRange &CR1,
+ const ConstantRange &CR2) {
+ return !CR1.isSignWrappedSet() && !CR2.isWrappedSet();
+}
+
static bool CheckNonWrappedOrSignWrappedOnly(const ConstantRange &CR1,
const ConstantRange &CR2) {
return !CR1.isWrappedSet() && !CR1.isSignWrappedSet() &&
@@ -1506,7 +1512,9 @@ TEST_F(ConstantRangeTest, ShlWithNoWrap) {
using OBO = OverflowingBinaryOperator;
TestBinaryOpExhaustive(
[](const ConstantRange &CR1, const ConstantRange &CR2) {
- return CR1.shlWithNoWrap(CR2, OBO::NoUnsignedWrap);
+ ConstantRange Res = CR1.shlWithNoWrap(CR2, OBO::NoUnsignedWrap);
+ EXPECT_TRUE(CR1.shl(CR2).contains(Res));
+ return Res;
},
[](const APInt &N1, const APInt &N2) -> std::optional<APInt> {
bool IsOverflow;
@@ -1515,7 +1523,7 @@ TEST_F(ConstantRangeTest, ShlWithNoWrap) {
return std::nullopt;
return Res;
},
- PreferSmallest, CheckCorrectnessOnly);
+ PreferSmallest, CheckNonWrappedOnly);
TestBinaryOpExhaustive(
[](const ConstantRange &CR1, const ConstantRange &CR2) {
return CR1.shlWithNoWrap(CR2, OBO::NoSignedWrap);
@@ -1527,7 +1535,7 @@ TEST_F(ConstantRangeTest, ShlWithNoWrap) {
return std::nullopt;
return Res;
},
- PreferSmallest, CheckCorrectnessOnly);
+ PreferSmallestSigned, CheckNoSignedWrappedLHSAndNoWrappedRHSOnly);
TestBinaryOpExhaustive(
[](const ConstantRange &CR1, const ConstantRange &CR2) {
return CR1.shlWithNoWrap(CR2, OBO::NoUnsignedWrap | OBO::NoSignedWrap);
@@ -1542,6 +1550,31 @@ TEST_F(ConstantRangeTest, ShlWithNoWrap) {
return Res1;
},
PreferSmallest, CheckCorrectnessOnly);
+
+ EXPECT_EQ(One.shlWithNoWrap(Full, OBO::NoSignedWrap),
+ ConstantRange(APInt(16, 10), APInt(16, 20481)));
+ EXPECT_EQ(One.shlWithNoWrap(Full, OBO::NoUnsignedWrap),
+ ConstantRange(APInt(16, 10), APInt(16, -24575)));
+ EXPECT_EQ(One.shlWithNoWrap(Full, OBO::NoSignedWrap | OBO::NoUnsignedWrap),
+ ConstantRange(APInt(16, 10), APInt(16, 20481)));
+ ConstantRange NegOne(APInt(16, 0xffff));
+ EXPECT_EQ(NegOne.shlWithNoWrap(Full, OBO::NoSignedWrap),
+ ConstantRange(APInt(16, -32768), APInt(16, 0)));
+ EXPECT_EQ(NegOne.shlWithNoWrap(Full, OBO::NoUnsignedWrap), NegOne);
+ EXPECT_EQ(ConstantRange(APInt(16, 768))
+ .shlWithNoWrap(Full, OBO::NoSignedWrap | OBO::NoUnsignedWrap),
+ ConstantRange(APInt(16, 768), APInt(16, 24577)));
+ EXPECT_EQ(Full.shlWithNoWrap(ConstantRange(APInt(16, 1), APInt(16, 16)),
+ OBO::NoUnsignedWrap),
+ ConstantRange(APInt(16, 0), APInt(16, -1)));
+ EXPECT_EQ(ConstantRange(APInt(4, 3), APInt(4, -8))
+ .shlWithNoWrap(ConstantRange(APInt(4, 0), APInt(4, 4)),
+ OBO::NoSignedWrap),
+ ConstantRange(APInt(4, 3), APInt(4, -8)));
+ EXPECT_EQ(ConstantRange(APInt(4, -1), APInt(4, 0))
+ .shlWithNoWrap(ConstantRange(APInt(4, 1), APInt(4, 4)),
+ OBO::NoSignedWrap),
+ ConstantRange(APInt(4, -8), APInt(4, -1)));
}
TEST_F(ConstantRangeTest, Lshr) {
diff --git a/llvm/unittests/Passes/Plugins/CMakeLists.txt b/llvm/unittests/Passes/Plugins/CMakeLists.txt
index e90cae1..55d7e71 100644
--- a/llvm/unittests/Passes/Plugins/CMakeLists.txt
+++ b/llvm/unittests/Passes/Plugins/CMakeLists.txt
@@ -6,8 +6,9 @@ if (NOT WIN32 AND NOT CYGWIN)
set(LLVM_LINK_COMPONENTS Support Passes Core AsmParser)
add_llvm_unittest(PluginsTests
PluginsTest.cpp
+
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
- export_executable_symbols_for_plugins(PluginsTests)
target_link_libraries(PluginsTests PRIVATE LLVMTestingSupport)
unset(LLVM_LINK_COMPONENTS)
diff --git a/llvm/unittests/ProfileData/PGOCtxProfReaderWriterTest.cpp b/llvm/unittests/ProfileData/PGOCtxProfReaderWriterTest.cpp
index 476f293..7be01445 100644
--- a/llvm/unittests/ProfileData/PGOCtxProfReaderWriterTest.cpp
+++ b/llvm/unittests/ProfileData/PGOCtxProfReaderWriterTest.cpp
@@ -64,7 +64,7 @@ public:
const std::map<GUID, const ContextNode *> &roots() const { return Roots; }
};
-void checkSame(const ContextNode &Raw, const PGOContextualProfile &Profile) {
+void checkSame(const ContextNode &Raw, const PGOCtxProfContext &Profile) {
EXPECT_EQ(Raw.guid(), Profile.guid());
ASSERT_EQ(Raw.counters_size(), Profile.counters().size());
for (auto I = 0U; I < Raw.counters_size(); ++I)
diff --git a/llvm/unittests/SandboxIR/SandboxIRTest.cpp b/llvm/unittests/SandboxIR/SandboxIRTest.cpp
index 550c057..f4b2378 100644
--- a/llvm/unittests/SandboxIR/SandboxIRTest.cpp
+++ b/llvm/unittests/SandboxIR/SandboxIRTest.cpp
@@ -1475,6 +1475,147 @@ define void @foo(ptr %ptr, <2 x ptr> %ptrs) {
EXPECT_EQ(NewGEP2->getNextNode(), nullptr);
}
+TEST_F(SandboxIRTest, AllocaInst) {
+ parseIR(C, R"IR(
+define void @foo() {
+ %allocaScalar = alloca i32, align 1024
+ %allocaArray = alloca i32, i32 42
+ ret void
+}
+)IR");
+ DataLayout DL(M.get());
+ llvm::Function &LLVMF = *M->getFunction("foo");
+ llvm::BasicBlock *LLVMBB = &*LLVMF.begin();
+ auto LLVMIt = LLVMBB->begin();
+ auto *LLVMAllocaScalar = cast<llvm::AllocaInst>(&*LLVMIt++);
+ auto *LLVMAllocaArray = cast<llvm::AllocaInst>(&*LLVMIt++);
+
+ sandboxir::Context Ctx(C);
+ sandboxir::Function *F = Ctx.createFunction(&LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *AllocaScalar = cast<sandboxir::AllocaInst>(&*It++);
+ auto *AllocaArray = cast<sandboxir::AllocaInst>(&*It++);
+ auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
+
+ // Check isArrayAllocation().
+ EXPECT_EQ(AllocaScalar->isArrayAllocation(),
+ LLVMAllocaScalar->isArrayAllocation());
+ EXPECT_EQ(AllocaArray->isArrayAllocation(),
+ LLVMAllocaArray->isArrayAllocation());
+ // Check getArraySize().
+ EXPECT_EQ(AllocaScalar->getArraySize(),
+ Ctx.getValue(LLVMAllocaScalar->getArraySize()));
+ EXPECT_EQ(AllocaArray->getArraySize(),
+ Ctx.getValue(LLVMAllocaArray->getArraySize()));
+ // Check getType().
+ EXPECT_EQ(AllocaScalar->getType(), LLVMAllocaScalar->getType());
+ EXPECT_EQ(AllocaArray->getType(), LLVMAllocaArray->getType());
+ // Check getAddressSpace().
+ EXPECT_EQ(AllocaScalar->getAddressSpace(),
+ LLVMAllocaScalar->getAddressSpace());
+ EXPECT_EQ(AllocaArray->getAddressSpace(), LLVMAllocaArray->getAddressSpace());
+ // Check getAllocationSize().
+ EXPECT_EQ(AllocaScalar->getAllocationSize(DL),
+ LLVMAllocaScalar->getAllocationSize(DL));
+ EXPECT_EQ(AllocaArray->getAllocationSize(DL),
+ LLVMAllocaArray->getAllocationSize(DL));
+ // Check getAllocationSizeInBits().
+ EXPECT_EQ(AllocaScalar->getAllocationSizeInBits(DL),
+ LLVMAllocaScalar->getAllocationSizeInBits(DL));
+ EXPECT_EQ(AllocaArray->getAllocationSizeInBits(DL),
+ LLVMAllocaArray->getAllocationSizeInBits(DL));
+ // Check getAllocatedType().
+ EXPECT_EQ(AllocaScalar->getAllocatedType(),
+ LLVMAllocaScalar->getAllocatedType());
+ EXPECT_EQ(AllocaArray->getAllocatedType(),
+ LLVMAllocaArray->getAllocatedType());
+ // Check setAllocatedType().
+ auto *OrigType = AllocaScalar->getAllocatedType();
+ auto *NewType = PointerType::get(C, 0);
+ EXPECT_NE(NewType, OrigType);
+ AllocaScalar->setAllocatedType(NewType);
+ EXPECT_EQ(AllocaScalar->getAllocatedType(), NewType);
+ AllocaScalar->setAllocatedType(OrigType);
+ EXPECT_EQ(AllocaScalar->getAllocatedType(), OrigType);
+ // Check getAlign().
+ EXPECT_EQ(AllocaScalar->getAlign(), LLVMAllocaScalar->getAlign());
+ EXPECT_EQ(AllocaArray->getAlign(), LLVMAllocaArray->getAlign());
+ // Check setAlignment().
+ Align OrigAlign = AllocaScalar->getAlign();
+ Align NewAlign(16);
+ EXPECT_NE(NewAlign, OrigAlign);
+ AllocaScalar->setAlignment(NewAlign);
+ EXPECT_EQ(AllocaScalar->getAlign(), NewAlign);
+ AllocaScalar->setAlignment(OrigAlign);
+ EXPECT_EQ(AllocaScalar->getAlign(), OrigAlign);
+ // Check isStaticAlloca().
+ EXPECT_EQ(AllocaScalar->isStaticAlloca(), LLVMAllocaScalar->isStaticAlloca());
+ EXPECT_EQ(AllocaArray->isStaticAlloca(), LLVMAllocaArray->isStaticAlloca());
+ // Check isUsedWithInAlloca(), setUsedWithInAlloca().
+ EXPECT_EQ(AllocaScalar->isUsedWithInAlloca(),
+ LLVMAllocaScalar->isUsedWithInAlloca());
+ bool OrigUsedWithInAlloca = AllocaScalar->isUsedWithInAlloca();
+ bool NewUsedWithInAlloca = true;
+ EXPECT_NE(NewUsedWithInAlloca, OrigUsedWithInAlloca);
+ AllocaScalar->setUsedWithInAlloca(NewUsedWithInAlloca);
+ EXPECT_EQ(AllocaScalar->isUsedWithInAlloca(), NewUsedWithInAlloca);
+ AllocaScalar->setUsedWithInAlloca(OrigUsedWithInAlloca);
+ EXPECT_EQ(AllocaScalar->isUsedWithInAlloca(), OrigUsedWithInAlloca);
+
+ auto *Ty = Type::getInt32Ty(C);
+ unsigned AddrSpace = 42;
+ auto *PtrTy = PointerType::get(C, AddrSpace);
+ auto *ArraySize = sandboxir::Constant::createInt(Ty, 43, Ctx);
+ {
+ // Check create() WhereIt, WhereBB.
+ auto *NewI = cast<sandboxir::AllocaInst>(sandboxir::AllocaInst::create(
+ Ty, AddrSpace, /*WhereIt=*/Ret->getIterator(),
+ /*WhereBB=*/Ret->getParent(), Ctx, ArraySize, "NewAlloca1"));
+ // Check getOpcode().
+ EXPECT_EQ(NewI->getOpcode(), sandboxir::Instruction::Opcode::Alloca);
+ // Check getType().
+ EXPECT_EQ(NewI->getType(), PtrTy);
+ // Check getArraySize().
+ EXPECT_EQ(NewI->getArraySize(), ArraySize);
+ // Check getAddrSpace().
+ EXPECT_EQ(NewI->getAddressSpace(), AddrSpace);
+ // Check instr position.
+ EXPECT_EQ(NewI->getNextNode(), Ret);
+ }
+ {
+ // Check create() InsertBefore.
+ auto *NewI = cast<sandboxir::AllocaInst>(sandboxir::AllocaInst::create(
+ Ty, AddrSpace, /*InsertBefore=*/Ret, Ctx, ArraySize, "NewAlloca2"));
+ // Check getOpcode().
+ EXPECT_EQ(NewI->getOpcode(), sandboxir::Instruction::Opcode::Alloca);
+ // Check getType().
+ EXPECT_EQ(NewI->getType(), PtrTy);
+ // Check getArraySize().
+ EXPECT_EQ(NewI->getArraySize(), ArraySize);
+ // Check getAddrSpace().
+ EXPECT_EQ(NewI->getAddressSpace(), AddrSpace);
+ // Check instr position.
+ EXPECT_EQ(NewI->getNextNode(), Ret);
+ }
+ {
+ // Check create() InsertAtEnd.
+ auto *NewI = cast<sandboxir::AllocaInst>(sandboxir::AllocaInst::create(
+ Ty, AddrSpace, /*InsertAtEnd=*/BB, Ctx, ArraySize, "NewAlloca3"));
+ // Check getOpcode().
+ EXPECT_EQ(NewI->getOpcode(), sandboxir::Instruction::Opcode::Alloca);
+ // Check getType().
+ EXPECT_EQ(NewI->getType(), PtrTy);
+ // Check getArraySize().
+ EXPECT_EQ(NewI->getArraySize(), ArraySize);
+ // Check getAddrSpace().
+ EXPECT_EQ(NewI->getAddressSpace(), AddrSpace);
+ // Check instr position.
+ EXPECT_EQ(NewI->getParent(), BB);
+ EXPECT_EQ(NewI->getNextNode(), nullptr);
+ }
+}
+
TEST_F(SandboxIRTest, CastInst) {
parseIR(C, R"IR(
define void @foo(i32 %arg, float %farg, double %darg, ptr %ptr) {
@@ -1925,10 +2066,17 @@ bb1:
br label %bb2
bb2:
- %phi = phi i32 [ %arg, %bb1 ], [ 0, %bb2 ]
+ %phi = phi i32 [ %arg, %bb1 ], [ 0, %bb2 ], [ 1, %bb3 ], [ 2, %bb4 ], [ 3, %bb5 ]
br label %bb2
bb3:
+ br label %bb2
+
+bb4:
+ br label %bb2
+
+bb5:
+ br label %bb2
ret void
}
)IR");
@@ -2023,7 +2171,29 @@ bb3:
EXPECT_EQ(PHI->hasConstantOrUndefValue(), LLVMPHI->hasConstantOrUndefValue());
// Check isComplete().
EXPECT_EQ(PHI->isComplete(), LLVMPHI->isComplete());
-
+ // Check replaceIncomingValueIf
+ EXPECT_EQ(PHI->getNumIncomingValues(), 5u);
+ auto *RemainBB0 = PHI->getIncomingBlock(0);
+ auto *RemoveBB0 = PHI->getIncomingBlock(1);
+ auto *RemainBB1 = PHI->getIncomingBlock(2);
+ auto *RemoveBB1 = PHI->getIncomingBlock(3);
+ auto *RemainBB2 = PHI->getIncomingBlock(4);
+ PHI->removeIncomingValueIf([&](unsigned Idx) {
+ return PHI->getIncomingBlock(Idx) == RemoveBB0 ||
+ PHI->getIncomingBlock(Idx) == RemoveBB1;
+ });
+ EXPECT_EQ(PHI->getNumIncomingValues(), 3u);
+ EXPECT_EQ(PHI->getIncomingBlock(0), RemainBB0);
+ EXPECT_EQ(PHI->getIncomingBlock(1), RemainBB1);
+ EXPECT_EQ(PHI->getIncomingBlock(2), RemainBB2);
+ // Check replaceIncomingBlockWith
+ OrigBB = RemainBB0;
+ auto *NewBB = RemainBB1;
+ EXPECT_NE(NewBB, OrigBB);
+ PHI->replaceIncomingBlockWith(OrigBB, NewBB);
+ EXPECT_EQ(PHI->getIncomingBlock(0), NewBB);
+ EXPECT_EQ(PHI->getIncomingBlock(1), RemainBB1);
+ EXPECT_EQ(PHI->getIncomingBlock(2), RemainBB2);
// Check create().
auto *NewPHI = cast<sandboxir::PHINode>(
sandboxir::PHINode::create(PHI->getType(), 0, Br, Ctx, "NewPHI"));
@@ -2037,3 +2207,29 @@ bb3:
}
EXPECT_EQ(NewPHI->getNumIncomingValues(), PHI->getNumIncomingValues());
}
+
+TEST_F(SandboxIRTest, UnreachableInst) {
+ parseIR(C, R"IR(
+define void @foo() {
+ unreachable
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ sandboxir::Function *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *UI = cast<sandboxir::UnreachableInst>(&*It++);
+
+ EXPECT_EQ(UI->getNumSuccessors(), 0u);
+ EXPECT_EQ(UI->getNumOfIRInstrs(), 1u);
+ // Check create(InsertBefore)
+ sandboxir::UnreachableInst *NewUI =
+ sandboxir::UnreachableInst::create(/*InsertBefore=*/UI, Ctx);
+ EXPECT_EQ(NewUI->getNextNode(), UI);
+ // Check create(InsertAtEnd)
+ sandboxir::UnreachableInst *NewUIEnd =
+ sandboxir::UnreachableInst::create(/*InsertAtEnd=*/BB, Ctx);
+ EXPECT_EQ(NewUIEnd->getParent(), BB);
+ EXPECT_EQ(NewUIEnd->getNextNode(), nullptr);
+}
diff --git a/llvm/unittests/SandboxIR/TrackerTest.cpp b/llvm/unittests/SandboxIR/TrackerTest.cpp
index e612583..a8cf41a1 100644
--- a/llvm/unittests/SandboxIR/TrackerTest.cpp
+++ b/llvm/unittests/SandboxIR/TrackerTest.cpp
@@ -644,6 +644,51 @@ define void @foo(i8 %arg) {
EXPECT_EQ(Invoke->getSuccessor(1), ExceptionBB);
}
+TEST_F(TrackerTest, AllocaInstSetters) {
+ parseIR(C, R"IR(
+define void @foo(i8 %arg) {
+ %alloca = alloca i32, align 64
+ ret void
+}
+)IR");
+ Function &LLVMF = *M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ auto &F = *Ctx.createFunction(&LLVMF);
+ auto *BB = &*F.begin();
+ auto It = BB->begin();
+ auto *Alloca = cast<sandboxir::AllocaInst>(&*It++);
+
+ // Check setAllocatedType().
+ Ctx.save();
+ auto *OrigTy = Alloca->getAllocatedType();
+ auto *NewTy = Type::getInt64Ty(C);
+ EXPECT_NE(NewTy, OrigTy);
+ Alloca->setAllocatedType(NewTy);
+ EXPECT_EQ(Alloca->getAllocatedType(), NewTy);
+ Ctx.revert();
+ EXPECT_EQ(Alloca->getAllocatedType(), OrigTy);
+
+ // Check setAlignment().
+ Ctx.save();
+ auto OrigAlign = Alloca->getAlign();
+ Align NewAlign(128);
+ EXPECT_NE(NewAlign, OrigAlign);
+ Alloca->setAlignment(NewAlign);
+ EXPECT_EQ(Alloca->getAlign(), NewAlign);
+ Ctx.revert();
+ EXPECT_EQ(Alloca->getAlign(), OrigAlign);
+
+ // Check setUsedWithInAlloca().
+ Ctx.save();
+ auto OrigWIA = Alloca->isUsedWithInAlloca();
+ bool NewWIA = true;
+ EXPECT_NE(NewWIA, OrigWIA);
+ Alloca->setUsedWithInAlloca(NewWIA);
+ EXPECT_EQ(Alloca->isUsedWithInAlloca(), NewWIA);
+ Ctx.revert();
+ EXPECT_EQ(Alloca->isUsedWithInAlloca(), OrigWIA);
+}
+
TEST_F(TrackerTest, CallBrSetters) {
parseIR(C, R"IR(
define void @foo(i8 %arg) {
@@ -779,6 +824,15 @@ bb2:
EXPECT_EQ(PHI->getIncomingBlock(1), BB1);
EXPECT_EQ(PHI->getIncomingValue(1), Arg1);
+ // Check removeIncomingValueIf(FromBB1).
+ Ctx.save();
+ PHI->removeIncomingValueIf(
+ [&](unsigned Idx) { return PHI->getIncomingBlock(Idx) == BB1; });
+ EXPECT_EQ(PHI->getNumIncomingValues(), 1u);
+ Ctx.revert();
+ EXPECT_EQ(PHI->getNumIncomingValues(), 2u);
+ EXPECT_EQ(PHI->getIncomingBlock(0), BB0);
+ EXPECT_EQ(PHI->getIncomingBlock(1), BB1);
// Check removeIncomingValue() remove all.
Ctx.save();
PHI->removeIncomingValue(0u);
diff --git a/llvm/unittests/Support/CMakeLists.txt b/llvm/unittests/Support/CMakeLists.txt
index 631f2e6..db47a17 100644
--- a/llvm/unittests/Support/CMakeLists.txt
+++ b/llvm/unittests/Support/CMakeLists.txt
@@ -44,6 +44,7 @@ add_llvm_unittest(SupportTests
FileOutputBufferTest.cpp
FormatVariadicTest.cpp
FSUniqueIDTest.cpp
+ GenericDomTreeTest.cpp
GlobPatternTest.cpp
HashBuilderTest.cpp
IndexedAccessorTest.cpp
diff --git a/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt b/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt
index 4574acd..d8dff1e 100644
--- a/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt
+++ b/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt
@@ -17,9 +17,10 @@ set_output_directory(DynamicLibraryLib
add_llvm_unittest(DynamicLibraryTests
DynamicLibraryTest.cpp
+
+ EXPORT_SYMBOLS
)
target_link_libraries(DynamicLibraryTests PRIVATE DynamicLibraryLib)
-export_executable_symbols(DynamicLibraryTests)
function(dynlib_add_module NAME)
add_library(${NAME} MODULE
@@ -38,7 +39,7 @@ function(dynlib_add_module NAME)
)
add_dependencies(DynamicLibraryTests ${NAME})
-
+
if(LLVM_INTEGRATED_CRT_ALLOC)
# We need to link in the Support lib for the Memory allocator override,
# otherwise the DynamicLibrary.Shutdown test will fail, because it would
@@ -48,7 +49,7 @@ function(dynlib_add_module NAME)
llvm_map_components_to_libnames(llvm_libs Support)
target_link_libraries(${NAME} ${llvm_libs} "-INCLUDE:malloc")
endif()
-
+
endfunction(dynlib_add_module)
# Revert -Wl,-z,nodelete on this test since it relies on the file
diff --git a/llvm/unittests/Support/GenericDomTreeTest.cpp b/llvm/unittests/Support/GenericDomTreeTest.cpp
new file mode 100644
index 0000000..f0f87e3
--- /dev/null
+++ b/llvm/unittests/Support/GenericDomTreeTest.cpp
@@ -0,0 +1,109 @@
+//===- unittests/Support/GenericDomTreeTest.cpp - GenericDomTree.h tests --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/Support/DataTypes.h"
+#include "gtest/gtest.h"
+using namespace llvm;
+
+namespace {
+
+// Very simple (fake) graph structure to test dominator tree on.
+struct NumberedGraph;
+
+struct NumberedNode {
+ NumberedGraph *Parent;
+ unsigned Number;
+
+ NumberedNode(NumberedGraph *Parent, unsigned Number)
+ : Parent(Parent), Number(Number) {}
+
+ NumberedGraph *getParent() const { return Parent; }
+};
+
+struct NumberedGraph {
+ SmallVector<std::unique_ptr<NumberedNode>> Nodes;
+ unsigned NumberEpoch = 0;
+
+ NumberedNode *addNode() {
+ unsigned Num = Nodes.size();
+ return Nodes.emplace_back(std::make_unique<NumberedNode>(this, Num)).get();
+ }
+};
+} // namespace
+
+namespace llvm {
+template <> struct GraphTraits<NumberedNode *> {
+ using NodeRef = NumberedNode *;
+ static unsigned getNumber(NumberedNode *Node) { return Node->Number; }
+};
+
+template <> struct GraphTraits<const NumberedNode *> {
+ using NodeRef = NumberedNode *;
+ static unsigned getNumber(const NumberedNode *Node) { return Node->Number; }
+};
+
+template <> struct GraphTraits<NumberedGraph *> {
+ using NodeRef = NumberedNode *;
+ static unsigned getMaxNumber(NumberedGraph *G) { return G->Nodes.size(); }
+ static unsigned getNumberEpoch(NumberedGraph *G) { return G->NumberEpoch; }
+};
+
+namespace DomTreeBuilder {
+// Dummy specialization. Only needed so that we can call recalculate(), which
+// sets DT.Parent -- but we can't access DT.Parent here.
+template <> void Calculate(DomTreeBase<NumberedNode> &DT) {}
+} // end namespace DomTreeBuilder
+} // end namespace llvm
+
+namespace {
+
+TEST(GenericDomTree, BlockNumbers) {
+ NumberedGraph G;
+ NumberedNode *N1 = G.addNode();
+ NumberedNode *N2 = G.addNode();
+
+ DomTreeBase<NumberedNode> DT;
+ DT.recalculate(G); // only sets parent
+ // Construct fake domtree: node 0 dominates all other nodes
+ DT.setNewRoot(N1);
+ DT.addNewBlock(N2, N1);
+
+ // Roundtrip is correct
+ for (auto &N : G.Nodes)
+ EXPECT_EQ(DT.getNode(N.get())->getBlock(), N.get());
+ // If we manually change a number, we should get a different node.
+ ASSERT_EQ(N1->Number, 0u);
+ ASSERT_EQ(N2->Number, 1u);
+ N1->Number = 1;
+ EXPECT_EQ(DT.getNode(N1)->getBlock(), N2);
+ EXPECT_EQ(DT.getNode(N2)->getBlock(), N2);
+ N2->Number = 0;
+ EXPECT_EQ(DT.getNode(N2)->getBlock(), N1);
+
+ // Renumer blocks, should fix node domtree-internal node map
+ DT.updateBlockNumbers();
+ for (auto &N : G.Nodes)
+ EXPECT_EQ(DT.getNode(N.get())->getBlock(), N.get());
+
+ // Adding a new node with a higher number is no problem
+ NumberedNode *N3 = G.addNode();
+ EXPECT_EQ(DT.getNode(N3), nullptr);
+ // ... even if it exceeds getMaxNumber()
+ NumberedNode *N4 = G.addNode();
+ N4->Number = 1000;
+ EXPECT_EQ(DT.getNode(N4), nullptr);
+
+ DT.addNewBlock(N3, N1);
+ DT.addNewBlock(N4, N1);
+ for (auto &N : G.Nodes)
+ EXPECT_EQ(DT.getNode(N.get())->getBlock(), N.get());
+}
+
+} // namespace
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index 3d55b03..9efebeb 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -1591,7 +1591,7 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64::AEK_PAUTH, AArch64::AEK_FPAC, AArch64::AEK_PERFMON}),
AArch64CPUTestParams(
"apple-m2", "armv8.6-a",
{AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
@@ -1600,7 +1600,7 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON}),
+ AArch64::AEK_PAUTH, AArch64::AEK_FPAC, AArch64::AEK_PERFMON}),
AArch64CPUTestParams(
"apple-a16", "armv8.6-a",
{AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
@@ -1609,7 +1609,8 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
+ AArch64::AEK_PAUTH, AArch64::AEK_FPAC, AArch64::AEK_PERFMON,
+ AArch64::AEK_HCX}),
AArch64CPUTestParams(
"apple-m3", "armv8.6-a",
{AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
@@ -1618,7 +1619,8 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
+ AArch64::AEK_PAUTH, AArch64::AEK_FPAC, AArch64::AEK_PERFMON,
+ AArch64::AEK_HCX}),
AArch64CPUTestParams(
"apple-a17", "armv8.6-a",
{AArch64::AEK_CRC, AArch64::AEK_AES, AArch64::AEK_SHA2,
@@ -1627,7 +1629,8 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_RCPC, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
AArch64::AEK_FP16FML, AArch64::AEK_SHA3, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT, AArch64::AEK_FCMA,
- AArch64::AEK_PAUTH, AArch64::AEK_PERFMON, AArch64::AEK_HCX}),
+ AArch64::AEK_PAUTH, AArch64::AEK_FPAC, AArch64::AEK_PERFMON,
+ AArch64::AEK_HCX}),
AArch64CPUTestParams("apple-m4", "armv9.2-a",
{AArch64::AEK_CRC, AArch64::AEK_AES,
AArch64::AEK_SHA2, AArch64::AEK_SHA3,
@@ -1637,10 +1640,10 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
AArch64::AEK_FP16FML, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_PAUTH, AArch64::AEK_FPAC,
+ AArch64::AEK_FCMA, AArch64::AEK_PERFMON,
AArch64::AEK_SME, AArch64::AEK_SME2,
- AArch64::AEK_SMEF64F64, AArch64::AEK_SMEI16I64,
- AArch64::AEK_PERFMON}),
+ AArch64::AEK_SMEF64F64, AArch64::AEK_SMEI16I64}),
AArch64CPUTestParams("exynos-m3", "armv8-a",
{AArch64::AEK_CRC, AArch64::AEK_AES,
AArch64::AEK_SHA2, AArch64::AEK_FP,
@@ -2005,7 +2008,7 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
AArch64::AEK_CPA, AArch64::AEK_PAUTHLR,
AArch64::AEK_TLBIW, AArch64::AEK_JSCVT,
AArch64::AEK_FCMA, AArch64::AEK_FP8,
-
+ AArch64::AEK_SVEB16B16,
};
std::vector<StringRef> Features;
@@ -2037,6 +2040,7 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
EXPECT_TRUE(llvm::is_contained(Features, "+spe"));
EXPECT_TRUE(llvm::is_contained(Features, "+ras"));
EXPECT_TRUE(llvm::is_contained(Features, "+sve"));
+ EXPECT_TRUE(llvm::is_contained(Features, "+sve-b16b16"));
EXPECT_TRUE(llvm::is_contained(Features, "+sve2"));
EXPECT_TRUE(llvm::is_contained(Features, "+sve2-aes"));
EXPECT_TRUE(llvm::is_contained(Features, "+sve2-sm4"));
@@ -2188,6 +2192,7 @@ TEST(TargetParserTest, AArch64ArchExtFeature) {
{"lse", "nolse", "+lse", "-lse"},
{"rdm", "nordm", "+rdm", "-rdm"},
{"sve", "nosve", "+sve", "-sve"},
+ {"sve-b16b16", "nosve-b16b16", "+sve-b16b16", "-sve-b16b16"},
{"sve2", "nosve2", "+sve2", "-sve2"},
{"sve2-aes", "nosve2-aes", "+sve2-aes", "-sve2-aes"},
{"sve2-sm4", "nosve2-sm4", "+sve2-sm4", "-sve2-sm4"},
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index bcba295..9b9686c 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -1706,9 +1706,8 @@ OperandMatcher &
InstructionMatcher::addOperand(unsigned OpIdx, const std::string &SymbolicName,
unsigned AllocatedTemporariesBaseID,
bool IsVariadic) {
- assert(Operands.empty() ||
- !Operands.back()->isVariadic() &&
- "Cannot add more operands after a variadic operand");
+ assert((Operands.empty() || !Operands.back()->isVariadic()) &&
+ "Cannot add more operands after a variadic operand");
Operands.emplace_back(new OperandMatcher(
*this, OpIdx, SymbolicName, AllocatedTemporariesBaseID, IsVariadic));
if (!SymbolicName.empty())
diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index 22330727..dce6c2a 100644
--- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -1149,6 +1149,8 @@ void RegisterInfoEmitter::runTargetHeader(raw_ostream &OS,
<< " ArrayRef<const uint32_t *> getRegMasks() const override;\n"
<< " bool isGeneralPurposeRegister(const MachineFunction &, "
<< "MCRegister) const override;\n"
+ << " bool isGeneralPurposeRegisterClass(const TargetRegisterClass *RC)"
+ << " const override;\n"
<< " bool isFixedRegister(const MachineFunction &, "
<< "MCRegister) const override;\n"
<< " bool isArgumentRegister(const MachineFunction &, "
@@ -1740,6 +1742,20 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "}\n\n";
OS << "bool " << ClassName << "::\n"
+ << "isGeneralPurposeRegisterClass(const TargetRegisterClass *RC)"
+ << " const {\n"
+ << " return\n";
+ for (const CodeGenRegisterCategory &Category : RegCategories)
+ if (Category.getName() == "GeneralPurposeRegisters") {
+ for (const CodeGenRegisterClass *RC : Category.getClasses())
+ OS << " " << RC->getQualifiedName()
+ << "RegClass.hasSubClassEq(RC) ||\n";
+ break;
+ }
+ OS << " false;\n";
+ OS << "}\n\n";
+
+ OS << "bool " << ClassName << "::\n"
<< "isFixedRegister(const MachineFunction &MF, "
<< "MCRegister PhysReg) const {\n"
<< " return\n";
diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp
index 1adefea..163a9dc 100644
--- a/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -14,9 +14,11 @@
#include "Common/CodeGenSchedule.h"
#include "Common/CodeGenTarget.h"
#include "Common/PredicateExpander.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"
@@ -31,8 +33,6 @@
#include <cassert>
#include <cstdint>
#include <iterator>
-#include <map>
-#include <set>
#include <string>
#include <vector>
@@ -259,8 +259,8 @@ unsigned SubtargetEmitter::FeatureKeyValues(
llvm::sort(FeatureList, LessRecordFieldName());
- // Check that there are no duplicate keys
- std::set<StringRef> UniqueKeys;
+ // Check that there are no duplicate features.
+ DenseMap<StringRef, const Record *> UniqueFeatures;
// Begin feature table
OS << "// Sorted (by key) array of values for CPU features.\n"
@@ -291,9 +291,12 @@ unsigned SubtargetEmitter::FeatureKeyValues(
OS << " },\n";
++NumFeatures;
- if (!UniqueKeys.insert(CommandLineName).second)
- PrintFatalError("Duplicate key in SubtargetFeatureKV: " +
- CommandLineName);
+ auto [It, Inserted] = UniqueFeatures.insert({CommandLineName, Feature});
+ if (!Inserted) {
+ PrintError(Feature, "Feature `" + CommandLineName + "` already defined.");
+ const Record *Previous = It->second;
+ PrintFatalNote(Previous, "Previous definition here.");
+ }
}
// End feature table
@@ -494,7 +497,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(
// operand cycles, and pipeline bypass tables. Then add the new Itinerary
// object with computed offsets to the ProcItinLists result.
unsigned StageCount = 1, OperandCycleCount = 1;
- std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
+ StringMap<unsigned> ItinStageMap, ItinOperandMap;
for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
// Add process itinerary to the list.
std::vector<InstrItinerary> &ItinList = ProcItinLists.emplace_back();
diff --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py
index 10fdf14..684e124 100755
--- a/llvm/utils/extract_symbols.py
+++ b/llvm/utils/extract_symbols.py
@@ -140,7 +140,7 @@ def should_keep_itanium_symbol(symbol, calling_convention_decoration):
if not symbol.startswith("_") and not symbol.startswith("."):
return symbol
# Discard manglings that aren't nested names
- match = re.match("_Z(T[VTIS])?(N.+)", symbol)
+ match = re.match("\.?_Z(T[VTIS])?(N.+)", symbol)
if not match:
return None
# Demangle the name. If the name is too complex then we don't need to keep
@@ -323,7 +323,7 @@ def get_template_name(sym, mangling):
if mangling == "microsoft":
names = parse_microsoft_mangling(sym)
else:
- match = re.match("_Z(T[VTIS])?(N.+)", sym)
+ match = re.match("\.?_Z(T[VTIS])?(N.+)", sym)
if match:
names, _ = parse_itanium_nested_name(match.group(2))
else:
@@ -483,6 +483,9 @@ if __name__ == "__main__":
else:
outfile = sys.stdout
for k, v in list(symbol_defs.items()):
+ # On AIX, export function descriptors instead of function entries.
+ if platform.system() == "AIX" and k.startswith("."):
+ continue
template = get_template_name(k, args.mangling)
if v == 1 and (not template or template in template_instantiation_refs):
print(k, file=outfile)
diff --git a/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn
index 576ab1d..d2cf524 100644
--- a/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn
@@ -108,7 +108,6 @@ static_library("Basic") {
"Targets/DirectX.cpp",
"Targets/Hexagon.cpp",
"Targets/Lanai.cpp",
- "Targets/Le64.cpp",
"Targets/LoongArch.cpp",
"Targets/M68k.cpp",
"Targets/MSP430.cpp",
diff --git a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
index ce4bbf7..293c024 100644
--- a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
@@ -136,8 +136,10 @@ copy("Headers") {
"armintr.h",
"avx10_2_512minmaxintrin.h",
"avx10_2_512niintrin.h",
+ "avx10_2_512satcvtintrin.h",
"avx10_2minmaxintrin.h",
"avx10_2niintrin.h",
+ "avx10_2satcvtintrin.h",
"avx2intrin.h",
"avx512bf16intrin.h",
"avx512bitalgintrin.h",
diff --git a/llvm/utils/gn/secondary/clang/test/BUILD.gn b/llvm/utils/gn/secondary/clang/test/BUILD.gn
index b1b0dfb..1ec94a4 100644
--- a/llvm/utils/gn/secondary/clang/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/test/BUILD.gn
@@ -65,8 +65,6 @@ write_lit_config("lit_site_cfg") {
"CMAKE_C_COMPILER=cc",
"ENABLE_BACKTRACES=1",
"ENABLE_SHARED=0",
- "LLVM_BUILD_EXAMPLES=0",
- "LLVM_BYE_LINK_INTO_TOOLS=0",
"LLVM_EXTERNAL_LIT=",
"LLVM_HOST_TRIPLE=$llvm_current_triple",
"LLVM_LIT_TOOLS_DIR=", # Intentionally empty, matches cmake build.
@@ -113,37 +111,26 @@ write_lit_config("lit_site_cfg") {
}
if (host_os == "mac") {
- extra_values += [
- "LLVM_PLUGIN_EXT=.dylib",
- "SHLIBEXT=.dylib",
- ]
+ extra_values += [ "LLVM_PLUGIN_EXT=.dylib" ]
} else if (host_os == "win") {
- extra_values += [
- "LLVM_PLUGIN_EXT=.dll",
- "SHLIBEXT=.dll",
- ]
+ extra_values += [ "LLVM_PLUGIN_EXT=.dll" ]
} else {
- extra_values += [
- "LLVM_PLUGIN_EXT=.so",
- "SHLIBEXT=.so",
- ]
+ extra_values += [ "LLVM_PLUGIN_EXT=.so" ]
}
if (host_os == "win") {
extra_values += [
# See comment for Windows solink in llvm/utils/gn/build/toolchain/BUILD.gn
"CMAKE_LIBRARY_OUTPUT_DIRECTORY=" + rebase_path("$root_out_dir/bin", dir),
- "LLVM_ENABLE_PLUGINS=1",
"LLVM_LIT_ERRC_MESSAGES=no such file or directory;is a directory;" +
"invalid argument;permission denied",
- "PERL_EXECUTABLE=",
+ "PERL_EXECUTABLE="
]
} else {
extra_values += [
"CMAKE_LIBRARY_OUTPUT_DIRECTORY=" + rebase_path("$root_out_dir/lib", dir),
- "LLVM_ENABLE_PLUGINS=1",
"LLVM_LIT_ERRC_MESSAGES=",
- "PERL_EXECUTABLE=/usr/bin/perl",
+ "PERL_EXECUTABLE=/usr/bin/perl"
]
}
diff --git a/llvm/utils/gn/secondary/lldb/include/lldb/Host/BUILD.gn b/llvm/utils/gn/secondary/lldb/include/lldb/Host/BUILD.gn
index c46a916..d05b236 100644
--- a/llvm/utils/gn/secondary/lldb/include/lldb/Host/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/include/lldb/Host/BUILD.gn
@@ -1,3 +1,4 @@
+import("//llvm/utils/gn/build/libs/curl/enable.gni")
import("//llvm/utils/gn/build/libs/xml/enable.gni")
import("//llvm/utils/gn/build/write_cmake_config.gni")
import("libedit.gni")
@@ -54,6 +55,12 @@ write_cmake_config("Config") {
]
}
+ if (llvm_enable_libcurl) {
+ values += [ "LLVM_ENABLE_CURL=1" ]
+ } else {
+ values += [ "LLVM_ENABLE_CURL=" ]
+ }
+
if (current_os == "win" || current_os == "linux" || current_os == "android") {
values += [ "HAVE_SYS_EVENT_H=" ]
} else {
diff --git a/llvm/utils/gn/secondary/lldb/source/Target/BUILD.gn b/llvm/utils/gn/secondary/lldb/source/Target/BUILD.gn
index 20a8bfd..fe8a3f5 100644
--- a/llvm/utils/gn/secondary/lldb/source/Target/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/source/Target/BUILD.gn
@@ -85,6 +85,7 @@ static_library("Target") {
"ThreadPlanCallUserExpression.cpp",
"ThreadPlanRunToAddress.cpp",
"ThreadPlanShouldStopHere.cpp",
+ "ThreadPlanSingleThreadTimeout.cpp",
"ThreadPlanStack.cpp",
"ThreadPlanStepInRange.cpp",
"ThreadPlanStepInstruction.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
index e47a5f7..ac3a3d8 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
@@ -36,6 +36,7 @@ static_library("Analysis") {
"ConstantFolding.cpp",
"ConstraintSystem.cpp",
"CostModel.cpp",
+ "CtxProfAnalysis.cpp",
"CycleAnalysis.cpp",
"DDG.cpp",
"DDGPrinter.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/RISCV/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/RISCV/BUILD.gn
index afd7fb5..39b321c 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/RISCV/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/RISCV/BUILD.gn
@@ -120,6 +120,7 @@ static_library("LLVMRISCVCodeGen") {
"RISCVGatherScatterLowering.cpp",
"RISCVISelDAGToDAG.cpp",
"RISCVISelLowering.cpp",
+ "RISCVIndirectBranchTracking.cpp",
"RISCVInsertReadWriteCSR.cpp",
"RISCVInsertVSETVLI.cpp",
"RISCVInsertWriteVXRM.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
index 4883845..3a660a8 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/Support/BUILD.gn
@@ -47,6 +47,7 @@ unittest("SupportTests") {
"FileCollectorTest.cpp",
"FileOutputBufferTest.cpp",
"FormatVariadicTest.cpp",
+ "GenericDomTreeTest.cpp",
"GlobPatternTest.cpp",
"HashBuilderTest.cpp",
"IndexedAccessorTest.cpp",
diff --git a/mlir/docs/DefiningDialects/AttributesAndTypes.md b/mlir/docs/DefiningDialects/AttributesAndTypes.md
index d6941c0..1430edd 100644
--- a/mlir/docs/DefiningDialects/AttributesAndTypes.md
+++ b/mlir/docs/DefiningDialects/AttributesAndTypes.md
@@ -551,13 +551,13 @@ For Types, these methods will have the form:
- `static Type MyType::parse(AsmParser &parser)`
-- `Type MyType::print(AsmPrinter &p) const`
+- `void MyType::print(AsmPrinter &p) const`
For Attributes, these methods will have the form:
- `static Attribute MyAttr::parse(AsmParser &parser, Type attrType)`
-- `Attribute MyAttr::print(AsmPrinter &p) const`
+- `void MyAttr::print(AsmPrinter &p) const`
#### Using `assemblyFormat`
diff --git a/mlir/docs/Dialects/Vector.md b/mlir/docs/Dialects/Vector.md
index 399571b..ade0068 100644
--- a/mlir/docs/Dialects/Vector.md
+++ b/mlir/docs/Dialects/Vector.md
@@ -74,12 +74,30 @@ following top-down rewrites and conversions:
### LLVM level
On CPU, the `n-D` `vector` type currently lowers to `!llvm<array<vector>>`.
-More concretely, `vector<4x8x128xf32>` lowers to `!llvm<[4 x [ 8 x [ 128 x
-float ]]]>`. There are tradeoffs involved related to how one can access
-subvectors and how one uses `llvm.extractelement`, `llvm.insertelement` and
-`llvm.shufflevector`. The section on [LLVM Lowering
-Tradeoffs](#llvm-lowering-tradeoffs) offers a deeper dive into the current
-design choices and tradeoffs.
+More concretely,
+* `vector<4x8x128xf32>` lowers to `!llvm<[4 x [ 8 x < 128
+x float >]]>` (fixed-width vector), and
+* `vector<4x8x[128]xf32>` lowers to `!llvm<[4 x [ 8 x < vscale x 128
+x float >]]>` (scalable vector).
+
+There are tradeoffs involved related to how one can access subvectors and how
+one uses `llvm.extractelement`, `llvm.insertelement` and `llvm.shufflevector`.
+The section on [LLVM Lowering Tradeoffs](#llvm-lowering-tradeoffs) offers a
+deeper dive into the current design choices and tradeoffs.
+
+Note, while LLVM supports arrarys of scalable vectors, these are required to be
+fixed-width arrays of 1-D scalable vectors. This means scalable vectors with a
+non-trailing scalable dimension (e.g. `vector<4x[8]x128xf32`) are not
+convertible to LLVM.
+
+Finally, MLIR takes the same view on scalable Vectors as LLVM (c.f.
+[VectorType](https://llvm.org/docs/LangRef.html#vector-type)):
+> For scalable vectors, the total number of elements is a constant multiple
+> (called vscale) of the specified number of elements; vscale is a positive
+> integer that is unknown at compile time and the same hardware-dependent
+> constant for all scalable vectors at run time. The size of a specific
+> scalable vector type is thus constant within IR, even if the exact size in
+> bytes cannot be determined until run time.
### Hardware Vector Ops
@@ -269,11 +287,6 @@ proposal for now, this assumes LLVM only has built-in support for 1-D vector.
The relationship with the LLVM Matrix proposal is discussed at the end of this
document.
-MLIR does not currently support dynamic vector sizes (i.e. SVE style) so the
-discussion is limited to static rank and static vector sizes (e.g.
-`vector<4x8x16x32xf32>`). This section discusses operations on vectors in LLVM
-and MLIR.
-
LLVM instructions are prefixed by the `llvm.` dialect prefix (e.g.
`llvm.insertvalue`). Such ops operate exclusively on 1-D vectors and aggregates
following the [LLVM LangRef](https://llvm.org/docs/LangRef.html). MLIR
@@ -287,10 +300,11 @@ Consider a vector of rank n with static sizes `{s_0, ... s_{n-1}}` (i.e. an MLIR
`vector<s_0x...s_{n-1}xf32>`). Lowering such an `n-D` MLIR vector type to an
LLVM descriptor can be done by either:
-1. Flattening to a `1-D` vector: `!llvm<"(s_0*...*s_{n-1})xfloat">` in the MLIR
+1. Nested aggregate type of `1-D` vector:
+ `!llvm."[s_0x[s_1x[...<s_{n-1}xf32>]]]">` in the MLIR LLVM dialect (current
+ lowering in MLIR).
+2. Flattening to a `1-D` vector: `!llvm<"(s_0*...*s_{n-1})xfloat">` in the MLIR
LLVM dialect.
-2. Nested aggregate type of `1-D` vector:
- `!llvm."[s_0x[s_1x[...<s_{n-1}xf32>]]]">` in the MLIR LLVM dialect.
3. A mix of both.
There are multiple tradeoffs involved in choosing one or the other that we
@@ -303,9 +317,11 @@ vector<4x8x16x32xf32> to vector<4x4096xf32>` operation, that flattens the most
The first constraint was already mentioned: LLVM only supports `1-D` `vector`
types natively. Additional constraints are related to the difference in LLVM
-between vector and aggregate types: `“Aggregate Types are a subset of derived
-types that can contain multiple member types. Arrays and structs are aggregate
-types. Vectors are not considered to be aggregate types.”.`
+between vector and
+[aggregate types](https://llvm.org/docs/LangRef.html#aggregate-types):
+> Aggregate Types are a subset of derived types that can contain multiple
+> member types. Arrays and structs are aggregate types. Vectors are not
+> considered to be aggregate types.
This distinction is also reflected in some of the operations. For `1-D` vectors,
the operations `llvm.extractelement`, `llvm.insertelement`, and
@@ -314,12 +330,15 @@ vectors with `n>1`, and thus aggregate types at LLVM level, the more restrictive
operations `llvm.extractvalue` and `llvm.insertvalue` apply, which only accept
static indices. There is no direct shuffling support for aggregate types.
-The next sentence illustrates a recurrent tradeoff, also found in MLIR, between
+The next sentence (cf. LangRef [structure
+type](https://llvm.org/docs/LangRef.html#structure-type)) illustrates a
+recurrent tradeoff, also found in MLIR, between
“value types” (subject to SSA use-def chains) and “memory types” (subject to
-aliasing and side-effects): `“Structures in memory are accessed using ‘load’ and
-‘store’ by getting a pointer to a field with the llvm.getelementptr instruction.
-Structures in registers are accessed using the llvm.extractvalue and
-llvm.insertvalue instructions.”`
+aliasing and side-effects):
+> Structures in memory are accessed using ‘load’ and ‘store’ by getting a
+> pointer to a field with the llvm.getelementptr instruction. Structures in
+> registers are accessed using the llvm.extractvalue and llvm.insertvalue
+> instructions.
When transposing this to MLIR, `llvm.getelementptr` works on pointers to `n-D`
vectors in memory. For `n-D`, vectors values that live in registers we can use
diff --git a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
index 9178655..3f1776f 100644
--- a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
+++ b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
@@ -910,11 +910,11 @@ def FMopa2WayOp
The 2 outer products in the example above can be fused into a single outer
product as follows:
- ```mlir
- %a_packed = "llvm.intr.experimental.vector.interleave2"(%a0, %a1) : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16>
- %b_packed = "llvm.intr.experimental.vector.interleave2"(%b0, %b1) : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16>
+ ```mlir
+ %a_packed = vector.interleave %a0, %a1 : vector<[4]xf16> -> vector<[8]xf16>
+ %b_packed = vector.interleave %b0, %b1 : vector<[4]xf16> -> vector<[8]xf16>
%0 = arm_sme.fmopa_2way %a_packed, %b_packed : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
- ```
+ ```
This is implemented in the `-arm-sme-outer-product-fusion` pass.
@@ -1167,13 +1167,13 @@ def SMopa4WayOp
product as follows:
```mlir
- %lhs0 = "llvm.intr.experimental.vector.interleave2"(%a0, %a2) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
- %lhs1 = "llvm.intr.experimental.vector.interleave2"(%a1, %a3) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
- %lhs = "llvm.intr.experimental.vector.interleave2"(%lhs0, %lhs1) : (vector<[8]xi8>, vector<[8]xi8>) -> vector<[16]xi8>
+ %lhs0 = vector.interleave %a0, %a2 : vector<[4]xi8> -> vector<[8]xi8>
+ %lhs1 = vector.interleave %a1, %a3 : vector<[4]xi8> -> vector<[8]xi8>
+ %lhs = vector.interleave %lhs0, %lhs1 : vector<[8]xi8> -> vector<[16]xi8>
- %rhs0 = "llvm.intr.experimental.vector.interleave2"(%b0, %b2) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
- %rhs1 = "llvm.intr.experimental.vector.interleave2"(%b1, %b3) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
- %rhs = "llvm.intr.experimental.vector.interleave2"(%rhs0, %rhs1) : (vector<[8]xi8>, vector<[8]xi8>) -> vector<[16]xi8>
+ %rhs0 = vector.interleave %b0, %b2 : vector<[4]xi8> -> vector<[8]xi8>
+ %rhs1 = vector.interleave %b1, %b3 : vector<[4]xi8> -> vector<[8]xi8>
+ %rhs = vector.interleave %rhs0, %rhs1 : vector<[8]xi8> -> vector<[16]xi8>
%0 = arm_sme.smopa_4way %lhs, %rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
```
diff --git a/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td b/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td
index 921234d..45efabf 100644
--- a/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td
@@ -180,7 +180,7 @@ def OuterProductFusion
https://mlir.llvm.org/docs/Dialects/ArmSME/#arm_smesmopa_4way-arm_smesmopa_4wayop
}];
let constructor = "mlir::arm_sme::createOuterProductFusionPass()";
- let dependentDialects = ["func::FuncDialect", "arm_sme::ArmSMEDialect", "LLVM::LLVMDialect"];
+ let dependentDialects = ["func::FuncDialect", "arm_sme::ArmSMEDialect"];
}
def VectorLegalization
diff --git a/mlir/include/mlir/Pass/PassRegistry.h b/mlir/include/mlir/Pass/PassRegistry.h
index 08874f0..f9cdee6 100644
--- a/mlir/include/mlir/Pass/PassRegistry.h
+++ b/mlir/include/mlir/Pass/PassRegistry.h
@@ -44,6 +44,9 @@ using PassAllocatorFunction = std::function<std::unique_ptr<Pass>()>;
// PassRegistry
//===----------------------------------------------------------------------===//
+/// Prints the passes that were previously registered and stored in passRegistry
+void printRegisteredPasses();
+
/// Structure to group information about a passes and pass pipelines (argument
/// to invoke via mlir-opt, description, pass pipeline builder).
class PassRegistryEntry {
diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
index df3feb0..5364e2a 100644
--- a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
+++ b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
@@ -369,6 +369,10 @@ private:
ModuleOp mlirModule;
/// The LLVM module being imported.
std::unique_ptr<llvm::Module> llvmModule;
+ /// Nameless globals.
+ DenseMap<llvm::GlobalVariable *, FlatSymbolRefAttr> namelessGlobals;
+ /// Counter used to assign a unique ID to each nameless global.
+ unsigned namelessGlobalId = 0;
/// A dialect interface collection used for dispatching the import to specific
/// dialects.
diff --git a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h
index d0ca188..ef976c1 100644
--- a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h
+++ b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h
@@ -119,6 +119,13 @@ public:
return success();
}
+ /// List the registered passes and return.
+ MlirOptMainConfig &listPasses(bool list) {
+ listPassesFlag = list;
+ return *this;
+ }
+ bool shouldListPasses() const { return listPassesFlag; }
+
/// Enable running the reproducer information stored in resources (if
/// present).
MlirOptMainConfig &runReproducer(bool enableReproducer) {
@@ -219,6 +226,9 @@ protected:
/// The callback to populate the pass manager.
std::function<LogicalResult(PassManager &)> passPipelineCallback;
+ /// List the registered passes and return.
+ bool listPassesFlag = false;
+
/// Enable running the reproducer.
bool runReproducerFlag = false;
diff --git a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
index 784deaa..17be4d9 100644
--- a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
@@ -509,8 +509,8 @@ Type LLVMTypeConverter::convertMemRefToBarePtr(BaseMemRefType type) const {
/// * 1-D `vector<axT>` remains as is while,
/// * n>1 `vector<ax...xkxT>` convert via an (n-1)-D array type to
/// `!llvm.array<ax...array<jxvector<kxT>>>`.
-/// Returns failure for n-D scalable vector types as LLVM does not support
-/// arrays of scalable vectors.
+/// As LLVM supports arrays of scalable vectors, this method will also convert
+/// n-D scalable vectors provided that only the trailing dim is scalable.
FailureOr<Type> LLVMTypeConverter::convertVectorType(VectorType type) const {
auto elementType = convertType(type.getElementType());
if (!elementType)
@@ -521,7 +521,9 @@ FailureOr<Type> LLVMTypeConverter::convertVectorType(VectorType type) const {
type.getScalableDims().back());
assert(LLVM::isCompatibleVectorType(vectorType) &&
"expected vector type compatible with the LLVM dialect");
- // Only the trailing dimension can be scalable.
+ // For n-D vector types for which a _non-trailing_ dim is scalable,
+ // return a failure. Supporting such cases would require LLVM
+ // to support something akin "scalable arrays" of vectors.
if (llvm::is_contained(type.getScalableDims().drop_back(), true))
return failure();
auto shape = type.getShape();
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt b/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt
index 8f9b508..a296244 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt
@@ -14,7 +14,6 @@ add_mlir_dialect_library(MLIRArmSMETransforms
MLIRPass
MLIRArmSMEDialect
MLIRFuncDialect
- MLIRLLVMCommonConversion
MLIRVectorDialect
MLIRIndexDialect
MLIRSCFDialect
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp b/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp
index 1e71167..ee1e374 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp
+++ b/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp
@@ -15,7 +15,6 @@
#include "mlir/Dialect/ArmSME/Transforms/Passes.h"
#include "mlir/Dialect/ArmSME/Transforms/Transforms.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
-#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "llvm/ADT/TypeSwitch.h"
@@ -80,15 +79,6 @@ static LogicalResult isCompatible(PatternRewriter &rewriter,
return success();
}
-// Create 'llvm.experimental.vector.interleave2' intrinsic from `lhs` and `rhs`.
-static Value createInterleave2Intrinsic(RewriterBase &rewriter, Location loc,
- Value lhs, Value rhs) {
- auto inputType = cast<VectorType>(lhs.getType());
- VectorType inputTypeX2 =
- VectorType::Builder(inputType).setDim(0, inputType.getShape()[0] * 2);
- return rewriter.create<LLVM::vector_interleave2>(loc, inputTypeX2, lhs, rhs);
-}
-
// Fuse two 'arm_sme.outerproduct' operations that are chained via the
// accumulator into 2-way outer product operation.
//
@@ -106,10 +96,8 @@ static Value createInterleave2Intrinsic(RewriterBase &rewriter, Location loc,
//
// Becomes:
//
-// %a_packed = "llvm.intr.experimental.vector.interleave2"(%a0, %a1)
-// : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16>
-// %b_packed = "llvm.intr.experimental.vector.interleave2"(%b0, %b1)
-// : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16>
+// %a_packed = vector.interleave %a0, %a1 : vector<[4]xf16> -> vector<[8]xf16>
+// %b_packed = vector.interleave %b0, %b1 : vector<[4]xf16> -> vector<[8]xf16>
// %0 = arm_sme.fmopa_2way %a_packed, %b_packed
// : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
class OuterProductFusion2Way
@@ -135,28 +123,7 @@ public:
if (!op1->hasOneUse()) {
// If the first outer product has uses other than as the input to another
- // outer product, it can't be erased after fusion. This is a problem when
- // it also has an accumulator as this will be used as the root for tile
- // allocation and since the widening outer product uses the same
- // accumulator it will get assigned the same tile ID, resulting in 3
- // outer products accumulating to the same tile and incorrect results.
- //
- // Example:
- //
- // %acc = arith.constant dense<0.0> ; root for tile allocation
- // %0 = arm_sme.outerproduct %a0, %b0 acc(%acc)
- // vector.print %0 ; intermediary use, can't erase %0
- // %1 = arm_sme.outerproduct %a1, %b1 acc(%0)
- //
- // After fusion and tile allocation
- //
- // %0 = arm_sme.zero {tile_id = 0 : i32}
- // %1 = arm_sme.outerproduct %a0, %b0 acc(%0) {tile_id = 0 : i32}
- // vector.print %1
- // %2 = arm_sme.fmopa_2way %a, %b acc(%0) {tile_id = 0 : i32}
- //
- // No accumulator would be ok, but it's simpler to prevent this
- // altogether, since it has no benefit.
+ // outer product, it can't be erased after fusion.
return rewriter.notifyMatchFailure(op,
kMatchFailureOuterProductNotSingleUse);
}
@@ -169,7 +136,7 @@ public:
auto loc = op.getLoc();
auto packInputs = [&](Value lhs, Value rhs) {
- return createInterleave2Intrinsic(rewriter, loc, lhs, rhs);
+ return rewriter.create<vector::InterleaveOp>(loc, lhs, rhs);
};
auto lhs = packInputs(op1.getLhs().getDefiningOp()->getOperand(0),
@@ -226,8 +193,6 @@ public:
llvm_unreachable("unexpected arm_sme::CombiningKind!");
}
- rewriter.eraseOp(op1);
-
return success();
}
@@ -319,7 +284,7 @@ public:
auto loc = op.getLoc();
auto packInputs = [&](Value lhs, Value rhs) {
- return createInterleave2Intrinsic(rewriter, loc, lhs, rhs);
+ return rewriter.create<vector::InterleaveOp>(loc, lhs, rhs);
};
auto lhs0 = packInputs(op1.getLhs().getDefiningOp()->getOperand(0),
@@ -400,10 +365,6 @@ public:
llvm_unreachable("unexpected arm_sme::CombiningKind!");
}
- rewriter.eraseOp(op3);
- rewriter.eraseOp(op2);
- rewriter.eraseOp(op1);
-
return success();
}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
index 954485c..5227b22 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
@@ -463,10 +463,15 @@ struct BufferDeallocationSimplificationPass
SplitDeallocWhenNotAliasingAnyOther,
RetainedMemrefAliasingAlwaysDeallocatedMemref>(&getContext(),
analysis);
+ // We don't want that the block structure changes invalidating the
+ // `BufferOriginAnalysis` so we apply the rewrites witha `Normal` level of
+ // region simplification
+ GreedyRewriteConfig config;
+ config.enableRegionSimplification = GreedySimplifyRegionLevel::Normal;
populateDeallocOpCanonicalizationPatterns(patterns, &getContext());
- if (failed(
- applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
+ if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns),
+ config)))
signalPassFailure();
}
};
diff --git a/mlir/lib/Dialect/GPU/CMakeLists.txt b/mlir/lib/Dialect/GPU/CMakeLists.txt
index 8e4cef5..a596454 100644
--- a/mlir/lib/Dialect/GPU/CMakeLists.txt
+++ b/mlir/lib/Dialect/GPU/CMakeLists.txt
@@ -28,7 +28,7 @@ add_mlir_dialect_library(MLIRGPUTransforms
Transforms/AllReduceLowering.cpp
Transforms/AsyncRegionRewriter.cpp
Transforms/BufferDeallocationOpInterfaceImpl.cpp
- Transforms/DecomposeMemrefs.cpp
+ Transforms/DecomposeMemRefs.cpp
Transforms/EliminateBarriers.cpp
Transforms/GlobalIdRewriter.cpp
Transforms/KernelOutlining.cpp
diff --git a/mlir/lib/Dialect/GPU/Transforms/DecomposeMemrefs.cpp b/mlir/lib/Dialect/GPU/Transforms/DecomposeMemRefs.cpp
index 1e4c1fbc..2b2d10a 100644
--- a/mlir/lib/Dialect/GPU/Transforms/DecomposeMemrefs.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/DecomposeMemRefs.cpp
@@ -1,4 +1,4 @@
-//===- DecomposeMemrefs.cpp - Decompose memrefs pass implementation -------===//
+//===- DecomposeMemRefs.cpp - Decompose memrefs pass implementation -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp
index 3a799ce..2126147 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp
@@ -1283,6 +1283,8 @@ public:
/// This only kicks in when VectorTransformsOptions is set to `Matmul`.
/// vector.transpose operations are inserted if the vector.contract op is not a
/// row-major matrix multiply.
+///
+/// Scalable vectors are not supported.
FailureOr<Value> ContractionOpToMatmulOpLowering::matchAndRewriteMaskableOp(
vector::ContractionOp op, MaskingOpInterface maskOp,
PatternRewriter &rew) const {
@@ -1302,13 +1304,18 @@ FailureOr<Value> ContractionOpToMatmulOpLowering::matchAndRewriteMaskableOp(
!isReductionIterator(iteratorTypes[2]))
return failure();
+ Type opResType = op.getType();
+ VectorType vecType = dyn_cast<VectorType>(opResType);
+ if (vecType && vecType.isScalable()) {
+ // Note - this is sufficient to reject all cases with scalable vectors.
+ return failure();
+ }
+
Type elementType = op.getLhsType().getElementType();
if (!elementType.isIntOrFloat())
return failure();
- Type dstElementType = op.getType();
- if (auto vecType = dyn_cast<VectorType>(dstElementType))
- dstElementType = vecType.getElementType();
+ Type dstElementType = vecType ? vecType.getElementType() : opResType;
if (elementType != dstElementType)
return failure();
diff --git a/mlir/lib/Pass/PassRegistry.cpp b/mlir/lib/Pass/PassRegistry.cpp
index 483cbe8..cb7c1558 100644
--- a/mlir/lib/Pass/PassRegistry.cpp
+++ b/mlir/lib/Pass/PassRegistry.cpp
@@ -68,6 +68,32 @@ static void printOptionHelp(StringRef arg, StringRef desc, size_t indent,
// PassRegistry
//===----------------------------------------------------------------------===//
+/// Prints the passes that were previously registered and stored in passRegistry
+void mlir::printRegisteredPasses() {
+ size_t maxWidth = 0;
+ for (auto &entry : *passRegistry)
+ maxWidth = std::max(maxWidth, entry.second.getOptionWidth() + 4);
+
+ // Functor used to print the ordered entries of a registration map.
+ auto printOrderedEntries = [&](StringRef header, auto &map) {
+ llvm::SmallVector<PassRegistryEntry *, 32> orderedEntries;
+ for (auto &kv : map)
+ orderedEntries.push_back(&kv.second);
+ llvm::array_pod_sort(
+ orderedEntries.begin(), orderedEntries.end(),
+ [](PassRegistryEntry *const *lhs, PassRegistryEntry *const *rhs) {
+ return (*lhs)->getPassArgument().compare((*rhs)->getPassArgument());
+ });
+
+ llvm::outs().indent(0) << header << ":\n";
+ for (PassRegistryEntry *entry : orderedEntries)
+ entry->printHelpStr(/*indent=*/2, maxWidth);
+ };
+
+ // Print the available passes.
+ printOrderedEntries("Passes", *passRegistry);
+}
+
/// Print the help information for this pass. This includes the argument,
/// description, and any pass options. `descIndent` is the indent that the
/// descriptions should be aligned.
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index af2f2cf..f283980 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Target/LLVMIR/ModuleImport.h"
+#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Target/LLVMIR/Import.h"
#include "AttrKindDetail.h"
@@ -73,6 +74,11 @@ static constexpr StringRef getGlobalCtorsVarName() {
return "llvm.global_ctors";
}
+/// Prefix used for symbols of nameless llvm globals.
+static constexpr StringRef getNamelessGlobalPrefix() {
+ return "mlir.llvm.nameless_global";
+}
+
/// Returns the name of the global_dtors global variables.
static constexpr StringRef getGlobalDtorsVarName() {
return "llvm.global_dtors";
@@ -884,9 +890,22 @@ LogicalResult ModuleImport::convertGlobal(llvm::GlobalVariable *globalVar) {
globalExpressionAttr =
debugImporter->translateGlobalVariableExpression(globalExpressions[0]);
+ // Workaround to support LLVM's nameless globals. MLIR, in contrast to LLVM,
+ // always requires a symbol name.
+ SmallString<128> globalName(globalVar->getName());
+ if (globalName.empty()) {
+ // Make sure the symbol name does not clash with an existing symbol.
+ globalName = SymbolTable::generateSymbolName<128>(
+ getNamelessGlobalPrefix(),
+ [this](StringRef newName) {
+ return llvmModule->getNamedValue(newName);
+ },
+ namelessGlobalId);
+ namelessGlobals[globalVar] = FlatSymbolRefAttr::get(context, globalName);
+ }
GlobalOp globalOp = builder.create<GlobalOp>(
mlirModule.getLoc(), type, globalVar->isConstant(),
- convertLinkageFromLLVM(globalVar->getLinkage()), globalVar->getName(),
+ convertLinkageFromLLVM(globalVar->getLinkage()), StringRef(globalName),
valueAttr, alignment, /*addr_space=*/globalVar->getAddressSpace(),
/*dso_local=*/globalVar->isDSOLocal(),
/*thread_local=*/globalVar->isThreadLocal(), /*comdat=*/SymbolRefAttr(),
@@ -1061,7 +1080,12 @@ FailureOr<Value> ModuleImport::convertConstant(llvm::Constant *constant) {
// Convert global variable accesses.
if (auto *globalVar = dyn_cast<llvm::GlobalVariable>(constant)) {
Type type = convertType(globalVar->getType());
- auto symbolRef = FlatSymbolRefAttr::get(context, globalVar->getName());
+ StringRef globalName = globalVar->getName();
+ FlatSymbolRefAttr symbolRef;
+ if (globalName.empty())
+ symbolRef = namelessGlobals[globalVar];
+ else
+ symbolRef = FlatSymbolRefAttr::get(context, globalName);
return builder.create<AddressOfOp>(loc, type, symbolRef).getResult();
}
diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
index 831e1e6..0b88d31 100644
--- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
+++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
@@ -30,6 +30,7 @@
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
+#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Support/Timing.h"
#include "mlir/Support/ToolUtilities.h"
@@ -118,6 +119,10 @@ struct MlirOptMainConfigCLOptions : public MlirOptMainConfig {
"parsing"),
cl::location(useExplicitModuleFlag), cl::init(false));
+ static cl::opt<bool, /*ExternalStorage=*/true> listPasses(
+ "list-passes", cl::desc("Print the list of registered passes and exit"),
+ cl::location(listPassesFlag), cl::init(false));
+
static cl::opt<bool, /*ExternalStorage=*/true> runReproducer(
"run-reproducer", cl::desc("Run the pipeline stored in the reproducer"),
cl::location(runReproducerFlag), cl::init(false));
@@ -522,6 +527,11 @@ static LogicalResult printRegisteredDialects(DialectRegistry &registry) {
return success();
}
+static LogicalResult printRegisteredPassesAndReturn() {
+ mlir::printRegisteredPasses();
+ return success();
+}
+
LogicalResult mlir::MlirOptMain(llvm::raw_ostream &outputStream,
std::unique_ptr<llvm::MemoryBuffer> buffer,
DialectRegistry &registry,
@@ -529,6 +539,9 @@ LogicalResult mlir::MlirOptMain(llvm::raw_ostream &outputStream,
if (config.shouldShowDialects())
return printRegisteredDialects(registry);
+ if (config.shouldListPasses())
+ return printRegisteredPassesAndReturn();
+
// The split-input-file mode is a very specific mode that slices the file
// up into small pieces and checks each independently.
// We use an explicit threadpool to avoid creating and joining/destroying
@@ -565,6 +578,9 @@ LogicalResult mlir::MlirOptMain(int argc, char **argv,
if (config.shouldShowDialects())
return printRegisteredDialects(registry);
+ if (config.shouldListPasses())
+ return printRegisteredPassesAndReturn();
+
// When reading from stdin and the input is a tty, it is often a user mistake
// and the process "appears to be stuck". Print a message to let the user know
// about it!
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index 4c0f15b..dd75e28 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -9,6 +9,7 @@
#include "mlir/Transforms/RegionUtils.h"
#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Block.h"
+#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
@@ -16,11 +17,15 @@
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include <deque>
+#include <iterator>
using namespace mlir;
@@ -674,6 +679,94 @@ static bool ableToUpdatePredOperands(Block *block) {
return true;
}
+/// Prunes the redundant list of new arguments. E.g., if we are passing an
+/// argument list like [x, y, z, x] this would return [x, y, z] and it would
+/// update the `block` (to whom the argument are passed to) accordingly. The new
+/// arguments are passed as arguments at the back of the block, hence we need to
+/// know how many `numOldArguments` were before, in order to correctly replace
+/// the new arguments in the block
+static SmallVector<SmallVector<Value, 8>, 2> pruneRedundantArguments(
+ const SmallVector<SmallVector<Value, 8>, 2> &newArguments,
+ RewriterBase &rewriter, unsigned numOldArguments, Block *block) {
+
+ SmallVector<SmallVector<Value, 8>, 2> newArgumentsPruned(
+ newArguments.size(), SmallVector<Value, 8>());
+
+ if (newArguments.empty())
+ return newArguments;
+
+ // `newArguments` is a 2D array of size `numLists` x `numArgs`
+ unsigned numLists = newArguments.size();
+ unsigned numArgs = newArguments[0].size();
+
+ // Map that for each arg index contains the index that we can use in place of
+ // the original index. E.g., if we have newArgs = [x, y, z, x], we will have
+ // idxToReplacement[3] = 0
+ llvm::DenseMap<unsigned, unsigned> idxToReplacement;
+
+ // This is a useful data structure to track the first appearance of a Value
+ // on a given list of arguments
+ DenseMap<Value, unsigned> firstValueToIdx;
+ for (unsigned j = 0; j < numArgs; ++j) {
+ Value newArg = newArguments[0][j];
+ if (!firstValueToIdx.contains(newArg))
+ firstValueToIdx[newArg] = j;
+ }
+
+ // Go through the first list of arguments (list 0).
+ for (unsigned j = 0; j < numArgs; ++j) {
+ // Look back to see if there are possible redundancies in list 0. Please
+ // note that we are using a map to annotate when an argument was seen first
+ // to avoid a O(N^2) algorithm. This has the drawback that if we have two
+ // lists like:
+ // list0: [%a, %a, %a]
+ // list1: [%c, %b, %b]
+ // We cannot simplify it, because firstValueToIdx[%a] = 0, but we cannot
+ // point list1[1](==%b) or list1[2](==%b) to list1[0](==%c). However, since
+ // the number of arguments can be potentially unbounded we cannot afford a
+ // O(N^2) algorithm (to search to all the possible pairs) and we need to
+ // accept the trade-off.
+ unsigned k = firstValueToIdx[newArguments[0][j]];
+ if (k == j)
+ continue;
+
+ bool shouldReplaceJ = true;
+ unsigned replacement = k;
+ // If a possible redundancy is found, then scan the other lists: we
+ // can prune the arguments if and only if they are redundant in every
+ // list.
+ for (unsigned i = 1; i < numLists; ++i)
+ shouldReplaceJ =
+ shouldReplaceJ && (newArguments[i][k] == newArguments[i][j]);
+ // Save the replacement.
+ if (shouldReplaceJ)
+ idxToReplacement[j] = replacement;
+ }
+
+ // Populate the pruned argument list.
+ for (unsigned i = 0; i < numLists; ++i)
+ for (unsigned j = 0; j < numArgs; ++j)
+ if (!idxToReplacement.contains(j))
+ newArgumentsPruned[i].push_back(newArguments[i][j]);
+
+ // Replace the block's redundant arguments.
+ SmallVector<unsigned> toErase;
+ for (auto [idx, arg] : llvm::enumerate(block->getArguments())) {
+ if (idxToReplacement.contains(idx)) {
+ Value oldArg = block->getArgument(numOldArguments + idx);
+ Value newArg =
+ block->getArgument(numOldArguments + idxToReplacement[idx]);
+ rewriter.replaceAllUsesWith(oldArg, newArg);
+ toErase.push_back(numOldArguments + idx);
+ }
+ }
+
+ // Erase the block's redundant arguments.
+ for (unsigned idxToErase : llvm::reverse(toErase))
+ block->eraseArgument(idxToErase);
+ return newArgumentsPruned;
+}
+
LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
// Don't consider clusters that don't have blocks to merge.
if (blocksToMerge.empty())
@@ -703,6 +796,7 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
1 + blocksToMerge.size(),
SmallVector<Value, 8>(operandsToMerge.size()));
unsigned curOpIndex = 0;
+ unsigned numOldArguments = leaderBlock->getNumArguments();
for (const auto &it : llvm::enumerate(operandsToMerge)) {
unsigned nextOpOffset = it.value().first - curOpIndex;
curOpIndex = it.value().first;
@@ -722,6 +816,11 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
}
}
}
+
+ // Prune redundant arguments and update the leader block argument list
+ newArguments = pruneRedundantArguments(newArguments, rewriter,
+ numOldArguments, leaderBlock);
+
// Update the predecessors for each of the blocks.
auto updatePredecessors = [&](Block *block, unsigned clusterIndex) {
for (auto predIt = block->pred_begin(), predE = block->pred_end();
@@ -818,6 +917,111 @@ static LogicalResult mergeIdenticalBlocks(RewriterBase &rewriter,
return success(anyChanged);
}
+/// If a block's argument is always the same across different invocations, then
+/// drop the argument and use the value directly inside the block
+static LogicalResult dropRedundantArguments(RewriterBase &rewriter,
+ Block &block) {
+ SmallVector<size_t> argsToErase;
+
+ // Go through the arguments of the block.
+ for (auto [argIdx, blockOperand] : llvm::enumerate(block.getArguments())) {
+ bool sameArg = true;
+ Value commonValue;
+
+ // Go through the block predecessor and flag if they pass to the block
+ // different values for the same argument.
+ for (Block::pred_iterator predIt = block.pred_begin(),
+ predE = block.pred_end();
+ predIt != predE; ++predIt) {
+ auto branch = dyn_cast<BranchOpInterface>((*predIt)->getTerminator());
+ if (!branch) {
+ sameArg = false;
+ break;
+ }
+ unsigned succIndex = predIt.getSuccessorIndex();
+ SuccessorOperands succOperands = branch.getSuccessorOperands(succIndex);
+ auto branchOperands = succOperands.getForwardedOperands();
+ if (!commonValue) {
+ commonValue = branchOperands[argIdx];
+ continue;
+ }
+ if (branchOperands[argIdx] != commonValue) {
+ sameArg = false;
+ break;
+ }
+ }
+
+ // If they are passing the same value, drop the argument.
+ if (commonValue && sameArg) {
+ argsToErase.push_back(argIdx);
+
+ // Remove the argument from the block.
+ rewriter.replaceAllUsesWith(blockOperand, commonValue);
+ }
+ }
+
+ // Remove the arguments.
+ for (size_t argIdx : llvm::reverse(argsToErase)) {
+ block.eraseArgument(argIdx);
+
+ // Remove the argument from the branch ops.
+ for (auto predIt = block.pred_begin(), predE = block.pred_end();
+ predIt != predE; ++predIt) {
+ auto branch = cast<BranchOpInterface>((*predIt)->getTerminator());
+ unsigned succIndex = predIt.getSuccessorIndex();
+ SuccessorOperands succOperands = branch.getSuccessorOperands(succIndex);
+ succOperands.erase(argIdx);
+ }
+ }
+ return success(!argsToErase.empty());
+}
+
+/// This optimization drops redundant argument to blocks. I.e., if a given
+/// argument to a block receives the same value from each of the block
+/// predecessors, we can remove the argument from the block and use directly the
+/// original value. This is a simple example:
+///
+/// %cond = llvm.call @rand() : () -> i1
+/// %val0 = llvm.mlir.constant(1 : i64) : i64
+/// %val1 = llvm.mlir.constant(2 : i64) : i64
+/// %val2 = llvm.mlir.constant(3 : i64) : i64
+/// llvm.cond_br %cond, ^bb1(%val0 : i64, %val1 : i64), ^bb2(%val0 : i64, %val2
+/// : i64)
+///
+/// ^bb1(%arg0 : i64, %arg1 : i64):
+/// llvm.call @foo(%arg0, %arg1)
+///
+/// The previous IR can be rewritten as:
+/// %cond = llvm.call @rand() : () -> i1
+/// %val0 = llvm.mlir.constant(1 : i64) : i64
+/// %val1 = llvm.mlir.constant(2 : i64) : i64
+/// %val2 = llvm.mlir.constant(3 : i64) : i64
+/// llvm.cond_br %cond, ^bb1(%val1 : i64), ^bb2(%val2 : i64)
+///
+/// ^bb1(%arg0 : i64):
+/// llvm.call @foo(%val0, %arg0)
+///
+static LogicalResult dropRedundantArguments(RewriterBase &rewriter,
+ MutableArrayRef<Region> regions) {
+ llvm::SmallSetVector<Region *, 1> worklist;
+ for (Region &region : regions)
+ worklist.insert(&region);
+ bool anyChanged = false;
+ while (!worklist.empty()) {
+ Region *region = worklist.pop_back_val();
+
+ // Add any nested regions to the worklist.
+ for (Block &block : *region) {
+ anyChanged = succeeded(dropRedundantArguments(rewriter, block));
+
+ for (Operation &op : block)
+ for (Region &nestedRegion : op.getRegions())
+ worklist.insert(&nestedRegion);
+ }
+ }
+ return success(anyChanged);
+}
+
//===----------------------------------------------------------------------===//
// Region Simplification
//===----------------------------------------------------------------------===//
@@ -832,8 +1036,12 @@ LogicalResult mlir::simplifyRegions(RewriterBase &rewriter,
bool eliminatedBlocks = succeeded(eraseUnreachableBlocks(rewriter, regions));
bool eliminatedOpsOrArgs = succeeded(runRegionDCE(rewriter, regions));
bool mergedIdenticalBlocks = false;
- if (mergeBlocks)
+ bool droppedRedundantArguments = false;
+ if (mergeBlocks) {
mergedIdenticalBlocks = succeeded(mergeIdenticalBlocks(rewriter, regions));
+ droppedRedundantArguments =
+ succeeded(dropRedundantArguments(rewriter, regions));
+ }
return success(eliminatedBlocks || eliminatedOpsOrArgs ||
- mergedIdenticalBlocks);
+ mergedIdenticalBlocks || droppedRedundantArguments);
}
diff --git a/mlir/python/requirements.txt b/mlir/python/requirements.txt
index 6ec63e4..d1b5418 100644
--- a/mlir/python/requirements.txt
+++ b/mlir/python/requirements.txt
@@ -1,4 +1,4 @@
numpy>=1.19.5, <=1.26
pybind11>=2.9.0, <=2.10.3
-PyYAML>=5.3.1, <=6.0.1
-ml_dtypes # provides several NumPy dtype extensions, including the bf16 \ No newline at end of file
+PyYAML>=5.4.0, <=6.0.1
+ml_dtypes>=0.1.0, <=0.4.0 # provides several NumPy dtype extensions, including the bf16
diff --git a/mlir/test/Dialect/ArmSME/outer-product-fusion.mlir b/mlir/test/Dialect/ArmSME/outer-product-fusion.mlir
index 4887d61..9000551 100644
--- a/mlir/test/Dialect/ArmSME/outer-product-fusion.mlir
+++ b/mlir/test/Dialect/ArmSME/outer-product-fusion.mlir
@@ -4,10 +4,10 @@
// CHECK-SAME: %[[A0:.*]]: vector<[4]xf16>, %[[B0:.*]]: vector<[4]xf16>, %[[A1:.*]]: vector<[4]xf16>, %[[B1:.*]]: vector<[4]xf16>,
// CHECK-SAME: %[[A0_MASK:.*]]: vector<[4]xi1>, %[[B0_MASK:.*]]: vector<[4]xi1>, %[[A1_MASK:.*]]: vector<[4]xi1>, %[[B1_MASK:.*]]: vector<[4]xi1>
// CHECK-DAG: %[[ACC:.*]] = arith.constant dense<0.000000e+00> : vector<[4]x[4]xf32>
-// CHECK-DAG: %[[LHS:.*]] = "llvm.intr.vector.interleave2"(%[[A0]], %[[A1]]) : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16>
-// CHECK-DAG: %[[RHS:.*]] = "llvm.intr.vector.interleave2"(%[[B0]], %[[B1]]) : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16>
-// CHECK-DAG: %[[LHS_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[A0_MASK]], %[[A1_MASK]]) : (vector<[4]xi1>, vector<[4]xi1>) -> vector<[8]xi1>
-// CHECK-DAG: %[[RHS_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[B0_MASK]], %[[B1_MASK]]) : (vector<[4]xi1>, vector<[4]xi1>) -> vector<[8]xi1>
+// CHECK-DAG: %[[LHS:.*]] = vector.interleave %[[A0]], %[[A1]] : vector<[4]xf16> -> vector<[8]xf16>
+// CHECK-DAG: %[[RHS:.*]] = vector.interleave %[[B0]], %[[B1]] : vector<[4]xf16> -> vector<[8]xf16>
+// CHECK-DAG: %[[LHS_MASK:.*]] = vector.interleave %[[A0_MASK]], %[[A1_MASK]] : vector<[4]xi1> -> vector<[8]xi1>
+// CHECK-DAG: %[[RHS_MASK:.*]] = vector.interleave %[[B0_MASK]], %[[B1_MASK]] : vector<[4]xi1> -> vector<[8]xi1>
// CHECK-DAG: arm_sme.fmopa_2way %[[LHS]], %[[RHS]] acc(%[[ACC]]) masks(%[[LHS_MASK]], %[[RHS_MASK]]) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
func.func @outerproduct_add_widening_2way_f16f16f32(
%a0 : vector<[4]xf16>, %b0 : vector<[4]xf16>,
@@ -225,18 +225,18 @@ func.func @outerproduct_sub_widening_2way_unsigned_i16i16i32(
// CHECK-SAME: %[[A2_MASK:[a-z0-9]+]]: vector<[4]xi1>, %[[B2_MASK:[a-z0-9]+]]: vector<[4]xi1>,
// CHECK-SAME: %[[A3_MASK:[a-z0-9]+]]: vector<[4]xi1>, %[[B3_MASK:[a-z0-9]+]]: vector<[4]xi1>
// CHECK-DAG: %[[ACC:.*]] = arith.constant dense<0> : vector<[4]x[4]xi32>
-// CHECK-DAG: %[[LHS0:.*]] = "llvm.intr.vector.interleave2"(%[[A0]], %[[A2]]) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
-// CHECK-DAG: %[[LHS1:.*]] = "llvm.intr.vector.interleave2"(%[[A1]], %[[A3]]) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
-// CHECK-DAG: %[[RHS0:.*]] = "llvm.intr.vector.interleave2"(%[[B0]], %[[B2]]) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
-// CHECK-DAG: %[[RHS1:.*]] = "llvm.intr.vector.interleave2"(%[[B1]], %[[B3]]) : (vector<[4]xi8>, vector<[4]xi8>) -> vector<[8]xi8>
-// CHECK-DAG: %[[LHS:.*]] = "llvm.intr.vector.interleave2"(%[[LHS0]], %[[LHS1]]) : (vector<[8]xi8>, vector<[8]xi8>) -> vector<[16]xi8>
-// CHECK-DAG: %[[RHS:.*]] = "llvm.intr.vector.interleave2"(%[[RHS0]], %[[RHS1]]) : (vector<[8]xi8>, vector<[8]xi8>) -> vector<[16]xi8>
-// CHECK-DAG: %[[LHS0_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[A0_MASK]], %[[A2_MASK]]) : (vector<[4]xi1>, vector<[4]xi1>) -> vector<[8]xi1>
-// CHECK-DAG: %[[LHS1_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[A1_MASK]], %[[A3_MASK]]) : (vector<[4]xi1>, vector<[4]xi1>) -> vector<[8]xi1>
-// CHECK-DAG: %[[RHS0_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[B0_MASK]], %[[B2_MASK]]) : (vector<[4]xi1>, vector<[4]xi1>) -> vector<[8]xi1>
-// CHECK-DAG: %[[RHS1_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[B1_MASK]], %[[B3_MASK]]) : (vector<[4]xi1>, vector<[4]xi1>) -> vector<[8]xi1>
-// CHECK-DAG: %[[LHS_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[LHS0_MASK]], %[[LHS1_MASK]]) : (vector<[8]xi1>, vector<[8]xi1>) -> vector<[16]xi1>
-// CHECK-DAG: %[[RHS_MASK:.*]] = "llvm.intr.vector.interleave2"(%[[RHS0_MASK]], %[[RHS1_MASK]]) : (vector<[8]xi1>, vector<[8]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: %[[LHS0:.*]] = vector.interleave %[[A0]], %[[A2]] : vector<[4]xi8> -> vector<[8]xi8>
+// CHECK-DAG: %[[LHS1:.*]] = vector.interleave %[[A1]], %[[A3]] : vector<[4]xi8> -> vector<[8]xi8>
+// CHECK-DAG: %[[RHS0:.*]] = vector.interleave %[[B0]], %[[B2]] : vector<[4]xi8> -> vector<[8]xi8>
+// CHECK-DAG: %[[RHS1:.*]] = vector.interleave %[[B1]], %[[B3]] : vector<[4]xi8> -> vector<[8]xi8>
+// CHECK-DAG: %[[LHS:.*]] = vector.interleave %[[LHS0]], %[[LHS1]] : vector<[8]xi8> -> vector<[16]xi8>
+// CHECK-DAG: %[[RHS:.*]] = vector.interleave %[[RHS0]], %[[RHS1]] : vector<[8]xi8> -> vector<[16]xi8>
+// CHECK-DAG: %[[LHS0_MASK:.*]] = vector.interleave %[[A0_MASK]], %[[A2_MASK]] : vector<[4]xi1> -> vector<[8]xi1>
+// CHECK-DAG: %[[LHS1_MASK:.*]] = vector.interleave %[[A1_MASK]], %[[A3_MASK]] : vector<[4]xi1> -> vector<[8]xi1>
+// CHECK-DAG: %[[RHS0_MASK:.*]] = vector.interleave %[[B0_MASK]], %[[B2_MASK]] : vector<[4]xi1> -> vector<[8]xi1>
+// CHECK-DAG: %[[RHS1_MASK:.*]] = vector.interleave %[[B1_MASK]], %[[B3_MASK]] : vector<[4]xi1> -> vector<[8]xi1>
+// CHECK-DAG: %[[LHS_MASK:.*]] = vector.interleave %[[LHS0_MASK]], %[[LHS1_MASK]] : vector<[8]xi1> -> vector<[16]xi1>
+// CHECK-DAG: %[[RHS_MASK:.*]] = vector.interleave %[[RHS0_MASK]], %[[RHS1_MASK]] : vector<[8]xi1> -> vector<[16]xi1>
// CHECK-DAG: arm_sme.smopa_4way %[[LHS]], %[[RHS]] acc(%[[ACC]]) masks(%[[LHS_MASK]], %[[RHS_MASK]]) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
func.func @outerproduct_add_widening_4way_signed_i8i8i32(
%a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
diff --git a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
index 5e8104f..8e14990 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-branchop-interface.mlir
@@ -178,7 +178,7 @@ func.func @condBranchDynamicTypeNested(
// CHECK-NEXT: ^bb1
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb5([[ARG1]], %false{{[0-9_]*}} :
+// CHECK: cf.br ^bb6([[ARG1]], %false{{[0-9_]*}} :
// CHECK: ^bb2([[IDX:%.*]]:{{.*}})
// CHECK: [[ALLOC1:%.*]] = memref.alloc([[IDX]])
// CHECK-NEXT: test.buffer_based
@@ -186,20 +186,24 @@ func.func @condBranchDynamicTypeNested(
// CHECK-NEXT: [[OWN:%.+]] = arith.select [[ARG0]], [[ARG0]], [[NOT_ARG0]]
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.cond_br{{.*}}, ^bb3, ^bb3
+// CHECK: cf.cond_br{{.*}}, ^bb3, ^bb4
// CHECK-NEXT: ^bb3:
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb4([[ALLOC1]], [[OWN]]
-// CHECK-NEXT: ^bb4([[ALLOC2:%.*]]:{{.*}}, [[COND1:%.+]]:{{.*}})
+// CHECK: cf.br ^bb5([[ALLOC1]], [[OWN]]
+// CHECK-NEXT: ^bb4:
// CHECK-NOT: bufferization.dealloc
// CHECK-NOT: bufferization.clone
-// CHECK: cf.br ^bb5([[ALLOC2]], [[COND1]]
-// CHECK-NEXT: ^bb5([[ALLOC4:%.*]]:{{.*}}, [[COND2:%.+]]:{{.*}})
+// CHECK: cf.br ^bb5([[ALLOC1]], [[OWN]]
+// CHECK-NEXT: ^bb5([[ALLOC2:%.*]]:{{.*}}, [[COND1:%.+]]:{{.*}})
+// CHECK-NOT: bufferization.dealloc
+// CHECK-NOT: bufferization.clone
+// CHECK: cf.br ^bb6([[ALLOC2]], [[COND1]]
+// CHECK-NEXT: ^bb6([[ALLOC4:%.*]]:{{.*}}, [[COND2:%.+]]:{{.*}})
// CHECK-NEXT: [[BASE:%[a-zA-Z0-9_]+]]{{.*}} = memref.extract_strided_metadata [[ALLOC4]]
// CHECK-NEXT: [[OWN:%.+]]:2 = bufferization.dealloc ([[BASE]] :{{.*}}) if ([[COND2]]) retain ([[ALLOC4]], [[ARG2]] :
-// CHECK: cf.br ^bb6([[ALLOC4]], [[OWN]]#0
-// CHECK-NEXT: ^bb6([[ALLOC5:%.*]]:{{.*}}, [[COND3:%.+]]:{{.*}})
+// CHECK: cf.br ^bb7([[ALLOC4]], [[OWN]]#0
+// CHECK-NEXT: ^bb7([[ALLOC5:%.*]]:{{.*}}, [[COND3:%.+]]:{{.*}})
// CHECK: test.copy
// CHECK: [[BASE:%[a-zA-Z0-9_]+]]{{.*}} = memref.extract_strided_metadata [[ALLOC5]]
// CHECK-NEXT: bufferization.dealloc ([[BASE]] : {{.*}}) if ([[COND3]])
diff --git a/mlir/test/Dialect/LLVMIR/types.mlir b/mlir/test/Dialect/LLVMIR/types.mlir
index 42d370a..fd771b6 100644
--- a/mlir/test/Dialect/LLVMIR/types.mlir
+++ b/mlir/test/Dialect/LLVMIR/types.mlir
@@ -91,6 +91,10 @@ func.func @array() {
"some.op"() : () -> !llvm.array<10 x ptr<4>>
// CHECK: !llvm.array<10 x array<4 x f32>>
"some.op"() : () -> !llvm.array<10 x array<4 x f32>>
+ // CHECK: !llvm.array<10 x array<4 x vector<8xf32>>>
+ "some.op"() : () -> !llvm.array<10 x array<4 x vector<8xf32>>>
+ // CHECK: !llvm.array<10 x array<4 x vector<[8]xf32>>>
+ "some.op"() : () -> !llvm.array<10 x array<4 x vector<[8]xf32>>>
return
}
diff --git a/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir b/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
index d1a8922..50a2d6b 100644
--- a/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_entry_block.mlir
@@ -15,7 +15,7 @@ func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
// CHECK-LABEL: @main
// CHECK-SAME: (%[[ARG0:.+]]: tensor<f32>) -> tensor<f32>
// CHECK: %[[EXTRACTED:.+]] = tensor.extract %[[ARG0]][] : tensor<f32>
-// CHECK: cf.br ^{{.*}}(%[[EXTRACTED]] : f32)
-// CHECK: ^{{.*}}(%[[ARG1:.+]]: f32):
-// CHECK: %[[ELEMENTS:.+]] = tensor.from_elements %[[ARG1]] : tensor<f32>
+// CHECK: cf.br ^{{.*}}
+// CHECK: ^{{.*}}:
+// CHECK: %[[ELEMENTS:.+]] = tensor.from_elements %[[EXTRACTED]] : tensor<f32>
// CHECK: return %[[ELEMENTS]] : tensor<f32>
diff --git a/mlir/test/Dialect/Linalg/detensorize_if.mlir b/mlir/test/Dialect/Linalg/detensorize_if.mlir
index 8d17763..c728ad2 100644
--- a/mlir/test/Dialect/Linalg/detensorize_if.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_if.mlir
@@ -42,18 +42,15 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: arith.constant 0
-// CHECK-DAG: arith.constant 10
-// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
-// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
-// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
-// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
-// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
-// CHECK-NEXT: return %{{.*}}
+// CHECK-DAG: %[[cst:.*]] = arith.constant dense<0>
+// CHECK-DAG: arith.constant true
+// CHECK: cf.br
+// CHECK-NEXT: ^[[bb1:.*]]:
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb3
+// CHECK-NEXT: ^[[bb2]]
+// CHECK-NEXT: cf.br ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb3]]
+// CHECK-NEXT: return %[[cst]]
// CHECK-NEXT: }
// -----
@@ -106,20 +103,17 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: arith.constant 0
-// CHECK-DAG: arith.constant 10
-// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
-// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
-// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb3(%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
-// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
-// CHECK-NEXT: cf.br ^[[bb4:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb4]](%{{.*}}: i32)
-// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
-// CHECK-NEXT: return %{{.*}}
+// CHECK-DAG: %[[cst:.*]] = arith.constant dense<0>
+// CHECK-DAG: arith.constant true
+// CHECK: cf.br ^[[bb1:.*]]
+// CHECK-NEXT: ^[[bb1:.*]]:
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb3
+// CHECK-NEXT: ^[[bb2]]:
+// CHECK-NEXT: cf.br ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb3]]:
+// CHECK-NEXT: cf.br ^[[bb4:.*]]
+// CHECK-NEXT: ^[[bb4]]:
+// CHECK-NEXT: return %[[cst]]
// CHECK-NEXT: }
// -----
@@ -171,16 +165,13 @@ func.func @main() -> (tensor<i32>) attributes {} {
}
// CHECK-LABEL: func @main()
-// CHECK-DAG: arith.constant 0
-// CHECK-DAG: arith.constant 10
-// CHECK: cf.br ^[[bb1:.*]](%{{.*}}: i32)
-// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32):
-// CHECK-NEXT: arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^bb2(%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
-// CHECK-NEXT: arith.addi %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.br ^[[bb3:.*]](%{{.*}} : i32)
-// CHECK-NEXT: ^[[bb3]](%{{.*}}: i32)
-// CHECK-NEXT: tensor.from_elements %{{.*}} : tensor<i32>
-// CHECK-NEXT: return %{{.*}}
+// CHECK-DAG: %[[cst:.*]] = arith.constant dense<10>
+// CHECK-DAG: arith.constant true
+// CHECK: cf.br ^[[bb1:.*]]
+// CHECK-NEXT: ^[[bb1]]:
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^bb2
+// CHECK-NEXT: ^[[bb2]]
+// CHECK-NEXT: cf.br ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb3]]
+// CHECK-NEXT: return %[[cst]]
// CHECK-NEXT: }
diff --git a/mlir/test/Dialect/Linalg/detensorize_while.mlir b/mlir/test/Dialect/Linalg/detensorize_while.mlir
index aa30900..580a97d 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while.mlir
@@ -46,11 +46,11 @@ func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attribu
// DET-ALL: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// DET-ALL: ^[[bb1]](%{{.*}}: i32)
// DET-ALL: arith.cmpi slt, {{.*}}
-// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
-// DET-ALL: ^[[bb2]](%{{.*}}: i32)
+// DET-ALL: cf.cond_br {{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// DET-ALL: ^[[bb2]]
// DET-ALL: arith.addi {{.*}}
// DET-ALL: cf.br ^[[bb1]](%{{.*}} : i32)
-// DET-ALL: ^[[bb3]](%{{.*}}: i32)
+// DET-ALL: ^[[bb3]]:
// DET-ALL: tensor.from_elements {{.*}}
// DET-ALL: return %{{.*}} : tensor<i32>
@@ -62,10 +62,10 @@ func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attribu
// DET-CF: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// DET-CF: ^[[bb1]](%{{.*}}: i32)
// DET-CF: arith.cmpi slt, {{.*}}
-// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
-// DET-CF: ^[[bb2]](%{{.*}}: i32)
+// DET-CF: cf.cond_br {{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// DET-CF: ^[[bb2]]:
// DET-CF: arith.addi {{.*}}
// DET-CF: cf.br ^[[bb1]](%{{.*}} : i32)
-// DET-CF: ^[[bb3]](%{{.*}}: i32)
+// DET-CF: ^[[bb3]]:
// DET-CF: tensor.from_elements %{{.*}} : tensor<i32>
// DET-CF: return %{{.*}} : tensor<i32>
diff --git a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
index 955c7be..414d9b9 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
@@ -74,8 +74,8 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-ALL: } -> tensor<i32>
// DET-ALL: tensor.extract %{{.*}}[] : tensor<i32>
// DET-ALL: cmpi slt, %{{.*}}, %{{.*}} : i32
-// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]](%{{.*}} : i32)
-// DET-ALL: ^[[bb2]](%{{.*}}: i32)
+// DET-ALL: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// DET-ALL: ^[[bb2]]:
// DET-ALL: tensor.from_elements %{{.*}} : tensor<i32>
// DET-ALL: tensor.empty() : tensor<10xi32>
// DET-ALL: linalg.generic {{{.*}}} ins(%{{.*}} : tensor<i32>) outs(%{{.*}} : tensor<10xi32>) {
@@ -83,7 +83,7 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-ALL: linalg.yield %{{.*}} : i32
// DET-ALL: } -> tensor<10xi32>
// DET-ALL: cf.br ^[[bb1]](%{{.*}} : tensor<10xi32>)
-// DET-ALL: ^[[bb3]](%{{.*}}: i32)
+// DET-ALL: ^[[bb3]]
// DET-ALL: tensor.from_elements %{{.*}} : tensor<i32>
// DET-ALL: return %{{.*}} : tensor<i32>
// DET-ALL: }
@@ -95,10 +95,10 @@ func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attr
// DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<10xi32>) outs(%{{.*}} : tensor<i32>) {
// DET-CF: tensor.extract %{{.*}}[] : tensor<i32>
// DET-CF: cmpi slt, %{{.*}}, %{{.*}} : i32
-// DET-CF: cf.cond_br %{{.*}}, ^bb2(%{{.*}} : tensor<i32>), ^bb3(%{{.*}} : tensor<i32>)
-// DET-CF: ^bb2(%{{.*}}: tensor<i32>)
+// DET-CF: cf.cond_br %{{.*}}, ^bb2, ^bb3
+// DET-CF: ^bb2:
// DET-CF: %{{.*}} = linalg.generic {{{.*}}} ins(%{{.*}} : tensor<i32>) outs(%{{.*}} : tensor<10xi32>) {
// DET-CF: cf.br ^bb1(%{{.*}} : tensor<10xi32>)
-// DET-CF: ^bb3(%{{.*}}: tensor<i32>)
+// DET-CF: ^bb3:
// DET-CF: return %{{.*}} : tensor<i32>
// DET-CF: }
diff --git a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
index 6d8d5fe..913e782 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
@@ -49,8 +49,8 @@ func.func @main() -> () attributes {} {
// CHECK-NEXT: cf.br ^[[bb1:.*]](%{{.*}} : i32)
// CHECK-NEXT: ^[[bb1]](%{{.*}}: i32)
// CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}}
-// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]](%{{.*}} : i32), ^[[bb3:.*]]
-// CHECK-NEXT: ^[[bb2]](%{{.*}}: i32)
+// CHECK-NEXT: cf.cond_br %{{.*}}, ^[[bb2:.*]], ^[[bb3:.*]]
+// CHECK-NEXT: ^[[bb2]]
// CHECK-NEXT: %{{.*}} = arith.addi %{{.*}}, %{{.*}}
// CHECK-NEXT: cf.br ^[[bb1]](%{{.*}} : i32)
// CHECK-NEXT: ^[[bb3]]:
diff --git a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir
index 78cf82e..4867a41 100644
--- a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir
@@ -36,13 +36,23 @@
// CHECK: %[[mm5:.*]] = vector.insert %[[mm4]], %[[mm3]] [1] : vector<3xf32> into vector<2x3xf32>
// CHECK: %[[mm6:.*]] = arith.addf %[[C]], %[[mm5]] : vector<2x3xf32>
func.func @matmul(%arg0: vector<2x4xf32>,
- %arg1: vector<4x3xf32>,
- %arg2: vector<2x3xf32>) -> vector<2x3xf32> {
+ %arg1: vector<4x3xf32>,
+ %arg2: vector<2x3xf32>) -> vector<2x3xf32> {
%0 = vector.contract #matmat_trait %arg0, %arg1, %arg2
: vector<2x4xf32>, vector<4x3xf32> into vector<2x3xf32>
return %0 : vector<2x3xf32>
}
+// CHECK-LABEL: func @matmul_scalable
+// CHECK-NOT: vector.matrix_multiply
+func.func @matmul_scalable(%arg0: vector<2x4xf32>,
+ %arg1: vector<4x[3]xf32>,
+ %arg2: vector<2x[3]xf32>) -> vector<2x[3]xf32> {
+ %0 = vector.contract #matmat_trait %arg0, %arg1, %arg2
+ : vector<2x4xf32>, vector<4x[3]xf32> into vector<2x[3]xf32>
+ return %0 : vector<2x[3]xf32>
+}
+
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%f = transform.structured.match ops{["func.func"]} in %module_op
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul-mixed-types.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul-mixed-types.mlir
index aabd9d2..5784ecb 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul-mixed-types.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul-mixed-types.mlir
@@ -1,11 +1,7 @@
// RUN: mlir-opt %s \
// RUN: -transform-interpreter -test-transform-dialect-erase-schedule \
// RUN: -one-shot-bufferize="bufferize-function-boundaries" -canonicalize \
-// RUN: -arm-sme-vector-legalization -canonicalize -cse \
-// RUN: -convert-vector-to-arm-sme -arm-sme-outer-product-fusion \
-// RUN: -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za if-required-by-ops" \
-// RUN: -convert-vector-to-scf=full-unroll -convert-arm-sme-to-llvm \
+// RUN: -test-lower-to-arm-sme -convert-vector-to-llvm="enable-arm-sve" \
// RUN: -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=main -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Memref/cast-runtime-verification.mlir b/mlir/test/Integration/Dialect/MemRef/cast-runtime-verification.mlir
index 52b8c16..52b8c16 100644
--- a/mlir/test/Integration/Dialect/Memref/cast-runtime-verification.mlir
+++ b/mlir/test/Integration/Dialect/MemRef/cast-runtime-verification.mlir
diff --git a/mlir/test/Integration/Dialect/Memref/load-runtime-verification.mlir b/mlir/test/Integration/Dialect/MemRef/load-runtime-verification.mlir
index 169dfd7..169dfd7 100644
--- a/mlir/test/Integration/Dialect/Memref/load-runtime-verification.mlir
+++ b/mlir/test/Integration/Dialect/MemRef/load-runtime-verification.mlir
diff --git a/mlir/test/Integration/Dialect/Memref/memref_abi.c b/mlir/test/Integration/Dialect/MemRef/memref_abi.c
index 8862727..8862727 100644
--- a/mlir/test/Integration/Dialect/Memref/memref_abi.c
+++ b/mlir/test/Integration/Dialect/MemRef/memref_abi.c
diff --git a/mlir/test/Integration/Dialect/Memref/print-memref.mlir b/mlir/test/Integration/Dialect/MemRef/print-memref.mlir
index f59e220d..f59e220d 100644
--- a/mlir/test/Integration/Dialect/Memref/print-memref.mlir
+++ b/mlir/test/Integration/Dialect/MemRef/print-memref.mlir
diff --git a/mlir/test/Integration/Dialect/Memref/reinterpret-cast-runtime-verification.mlir b/mlir/test/Integration/Dialect/MemRef/reinterpret-cast-runtime-verification.mlir
index 2239ba5..2239ba5 100644
--- a/mlir/test/Integration/Dialect/Memref/reinterpret-cast-runtime-verification.mlir
+++ b/mlir/test/Integration/Dialect/MemRef/reinterpret-cast-runtime-verification.mlir
diff --git a/mlir/test/Integration/Dialect/Memref/subview-runtime-verification.mlir b/mlir/test/Integration/Dialect/MemRef/subview-runtime-verification.mlir
index 3ccf8b1..3ccf8b1 100644
--- a/mlir/test/Integration/Dialect/Memref/subview-runtime-verification.mlir
+++ b/mlir/test/Integration/Dialect/MemRef/subview-runtime-verification.mlir
diff --git a/mlir/test/Integration/Dialect/Memref/verify-memref.mlir b/mlir/test/Integration/Dialect/MemRef/verify-memref.mlir
index 431ae0a..431ae0a 100644
--- a/mlir/test/Integration/Dialect/Memref/verify-memref.mlir
+++ b/mlir/test/Integration/Dialect/MemRef/verify-memref.mlir
diff --git a/mlir/test/Target/LLVMIR/Import/global-variables.ll b/mlir/test/Target/LLVMIR/Import/global-variables.ll
index 902f77b..cac74114 100644
--- a/mlir/test/Target/LLVMIR/Import/global-variables.ll
+++ b/mlir/test/Target/LLVMIR/Import/global-variables.ll
@@ -280,7 +280,24 @@ define void @bar() {
; CHECK-DAG: #[[GLOBAL_VAR:.*]] = #llvm.di_global_variable<file = #[[FILE]], line = 268, type = #[[COMPOSITE_TYPE]], isLocalToUnit = true, isDefined = true>
; CHECK-DAG: #[[GLOBAL_VAR_EXPR:.*]] = #llvm.di_global_variable_expression<var = #[[GLOBAL_VAR]], expr = <>>
-; CHECK-DAG: llvm.mlir.global external constant @".str.1"() {addr_space = 0 : i32, dbg_expr = #[[GLOBAL_VAR_EXPR]]}
+; CHECK: llvm.mlir.global private unnamed_addr constant @mlir.llvm.nameless_global_0("0\00")
+; We skip over @mlir.llvm.nameless_global.1 and 2 because they exist
+; CHECK: llvm.mlir.global private unnamed_addr constant @mlir.llvm.nameless_global_3("1\00")
+;
+; CHECK: llvm.mlir.global internal constant @zero() {addr_space = 0 : i32, dso_local} : !llvm.ptr {
+; CHECK: llvm.mlir.addressof @mlir.llvm.nameless_global_0 : !llvm.ptr
+; CHECK: llvm.mlir.global internal constant @one() {addr_space = 0 : i32, dso_local} : !llvm.ptr {
+; CHECK: llvm.mlir.addressof @mlir.llvm.nameless_global_3 : !llvm.ptr
+
+; CHECK: llvm.mlir.global external constant @".str.1"() {addr_space = 0 : i32, dbg_expr = #[[GLOBAL_VAR_EXPR]]}
+
+@0 = private unnamed_addr constant [2 x i8] c"0\00"
+@1 = private unnamed_addr constant [2 x i8] c"1\00"
+@zero = internal constant ptr @0
+@one = internal constant ptr @1
+
+@"mlir.llvm.nameless_global_1" = external constant [10 x i8], !dbg !0
+declare void @"mlir.llvm.nameless_global_2"()
@.str.1 = external constant [10 x i8], !dbg !0
diff --git a/mlir/test/Target/LLVMIR/llvmir-types.mlir b/mlir/test/Target/LLVMIR/llvmir-types.mlir
index 3e53321..6e54bb0 100644
--- a/mlir/test/Target/LLVMIR/llvmir-types.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-types.mlir
@@ -99,6 +99,10 @@ llvm.func @return_a8_float() -> !llvm.array<8 x f32>
llvm.func @return_a10_p_4() -> !llvm.array<10 x ptr<4>>
// CHECK: declare [10 x [4 x float]] @return_a10_a4_float()
llvm.func @return_a10_a4_float() -> !llvm.array<10 x array<4 x f32>>
+// CHECK: declare [10 x [4 x <4 x float>]] @return_a10_a4_v4_float()
+llvm.func @return_a10_a4_v4_float() -> !llvm.array<10 x array<4 x vector<4xf32>>>
+// CHECK: declare [10 x [4 x <vscale x 4 x float>]] @return_a10_a4_sv4_float()
+llvm.func @return_a10_a4_sv4_float() -> !llvm.array<10 x array<4 x vector<[4]xf32>>>
//
// Literal structures.
diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir
index 3b8b1fc..92cfde8 100644
--- a/mlir/test/Transforms/canonicalize-block-merge.mlir
+++ b/mlir/test/Transforms/canonicalize-block-merge.mlir
@@ -87,7 +87,7 @@ func.func @mismatch_operands_matching_arguments(%cond : i1, %arg0 : i32, %arg1 :
// CHECK-LABEL: func @mismatch_argument_uses(
func.func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32, i32) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
+ // CHECK: return {{.*}}, {{.*}}
cf.cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32)
@@ -101,7 +101,7 @@ func.func @mismatch_argument_uses(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32,
// CHECK-LABEL: func @mismatch_argument_types(
func.func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
+ // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2
cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg1 : i16)
@@ -115,7 +115,7 @@ func.func @mismatch_argument_types(%cond : i1, %arg0 : i32, %arg1 : i16) {
// CHECK-LABEL: func @mismatch_argument_count(
func.func @mismatch_argument_count(%cond : i1, %arg0 : i32) {
- // CHECK: cf.cond_br %{{.*}}, ^bb1(%{{.*}}), ^bb2
+ // CHECK: cf.cond_br %{{.*}}, ^bb1, ^bb2
cf.cond_br %cond, ^bb1(%arg0 : i32), ^bb2
diff --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir
index ac034d5..8463194 100644
--- a/mlir/test/Transforms/canonicalize-dce.mlir
+++ b/mlir/test/Transforms/canonicalize-dce.mlir
@@ -137,10 +137,10 @@ func.func @f(%arg0: f32) {
// Test case: Test the mechanics of deleting multiple block arguments.
// CHECK: func @f(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>, %arg2: tensor<3xf32>, %arg3: tensor<4xf32>, %arg4: tensor<5xf32>)
-// CHECK-NEXT: "test.br"(%arg1, %arg3)[^bb1] : (tensor<2xf32>, tensor<4xf32>)
-// CHECK-NEXT: ^bb1([[VAL0:%.+]]: tensor<2xf32>, [[VAL1:%.+]]: tensor<4xf32>):
-// CHECK-NEXT: "foo.print"([[VAL0]])
-// CHECK-NEXT: "foo.print"([[VAL1]])
+// CHECK-NEXT: "test.br"()[^bb1]
+// CHECK-NEXT: ^bb1:
+// CHECK-NEXT: "foo.print"(%arg1)
+// CHECK-NEXT: "foo.print"(%arg3)
// CHECK-NEXT: return
diff --git a/mlir/test/Transforms/make-isolated-from-above.mlir b/mlir/test/Transforms/make-isolated-from-above.mlir
index 58f6cfb..a9d4325 100644
--- a/mlir/test/Transforms/make-isolated-from-above.mlir
+++ b/mlir/test/Transforms/make-isolated-from-above.mlir
@@ -78,9 +78,9 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[EMPTY]], %[[C1]]
// CHECK: test.isolated_one_region_op %[[ARG2]], %[[C0]], %[[C1]], %[[D0]], %[[D1]]
// CHECK-NEXT: ^bb0(%[[B0:[a-zA-Z0-9]+]]: index, %[[B1:[a-zA-Z0-9]+]]: index, %[[B2:[a-zA-Z0-9]+]]: index, %[[B3:[a-zA-Z0-9]+]]: index, %[[B4:[a-zA-Z0-9]+]]: index)
-// CHECK-NEXT: cf.br ^bb1(%[[B0]] : index)
-// CHECK: ^bb1(%[[B5:.+]]: index)
-// CHECK: "foo.yield"(%[[B1]], %[[B2]], %[[B3]], %[[B4]], %[[B5]])
+// CHECK-NEXT: cf.br ^bb1
+// CHECK: ^bb1:
+// CHECK: "foo.yield"(%[[B1]], %[[B2]], %[[B3]], %[[B4]], %[[B0]])
// CLONE1-LABEL: func @make_isolated_from_above_multiple_blocks(
// CLONE1-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
@@ -95,9 +95,9 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CLONE1-NEXT: ^bb0(%[[B0:[a-zA-Z0-9]+]]: index, %[[B1:[a-zA-Z0-9]+]]: index, %[[B2:[a-zA-Z0-9]+]]: index)
// CLONE1-DAG: %[[C0_0:.+]] = arith.constant 0 : index
// CLONE1-DAG: %[[C1_0:.+]] = arith.constant 1 : index
-// CLONE1-NEXT: cf.br ^bb1(%[[B0]] : index)
-// CLONE1: ^bb1(%[[B3:.+]]: index)
-// CLONE1: "foo.yield"(%[[C0_0]], %[[C1_0]], %[[B1]], %[[B2]], %[[B3]])
+// CLONE1-NEXT: cf.br ^bb1
+// CLONE1: ^bb1:
+// CLONE1: "foo.yield"(%[[C0_0]], %[[C1_0]], %[[B1]], %[[B2]], %[[B0]])
// CLONE2-LABEL: func @make_isolated_from_above_multiple_blocks(
// CLONE2-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
@@ -110,6 +110,6 @@ func.func @make_isolated_from_above_multiple_blocks(%arg0 : index, %arg1 : index
// CLONE2-DAG: %[[EMPTY:.+]] = tensor.empty(%[[B1]], %[[B2]])
// CLONE2-DAG: %[[D0:.+]] = tensor.dim %[[EMPTY]], %[[C0]]
// CLONE2-DAG: %[[D1:.+]] = tensor.dim %[[EMPTY]], %[[C1]]
-// CLONE2-NEXT: cf.br ^bb1(%[[B0]] : index)
-// CLONE2: ^bb1(%[[B3:.+]]: index)
-// CLONE2: "foo.yield"(%[[C0]], %[[C1]], %[[D0]], %[[D1]], %[[B3]])
+// CLONE2-NEXT: cf.br ^bb1
+// CLONE2: ^bb1:
+// CLONE2: "foo.yield"(%[[C0]], %[[C1]], %[[D0]], %[[D1]], %[[B0]])
diff --git a/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir b/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
new file mode 100644
index 0000000..84df836
--- /dev/null
+++ b/mlir/test/Transforms/test-canonicalize-merge-large-blocks.mlir
@@ -0,0 +1,192 @@
+ // RUN: mlir-opt -pass-pipeline='builtin.module(llvm.func(canonicalize{region-simplify=aggressive}))' %s | FileCheck %s
+
+llvm.func @foo(%arg0: i64)
+
+llvm.func @rand() -> i1
+
+// CHECK-LABEL: func @large_merge_block(
+llvm.func @large_merge_block(%arg0: i64) {
+ // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+ // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
+ // CHECK: %[[C3:.*]] = llvm.mlir.constant(3 : i64) : i64
+ // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : i64) : i64
+
+ // CHECK: llvm.cond_br %5, ^bb1(%[[C1]], %[[C3]], %[[C4]], %[[C2]] : i64, i64, i64, i64), ^bb1(%[[C4]], %[[C2]], %[[C1]], %[[C3]] : i64, i64, i64, i64)
+ // CHECK: ^bb{{.*}}(%[[arg0:.*]]: i64, %[[arg1:.*]]: i64, %[[arg2:.*]]: i64, %[[arg3:.*]]: i64):
+ // CHECK: llvm.cond_br %{{.*}}, ^bb2(%[[arg0]] : i64), ^bb2(%[[arg3]] : i64)
+ // CHECK: ^bb{{.*}}(%11: i64):
+ // CHECK: llvm.br ^bb{{.*}}
+ // CHECK: ^bb{{.*}}:
+ // CHECK: llvm.call
+ // CHECK: llvm.cond_br {{.*}}, ^bb{{.*}}(%[[arg1]] : i64), ^bb{{.*}}(%[[arg2]] : i64)
+ // CHECK: ^bb{{.*}}:
+ // CHECK: llvm.call
+ // CHECK llvm.br ^bb{{.*}}
+
+ %0 = llvm.mlir.constant(0 : i64) : i64
+ %1 = llvm.mlir.constant(1 : i64) : i64
+ %2 = llvm.mlir.constant(2 : i64) : i64
+ %3 = llvm.mlir.constant(3 : i64) : i64
+ %4 = llvm.mlir.constant(4 : i64) : i64
+ %10 = llvm.icmp "eq" %arg0, %0 : i64
+ llvm.cond_br %10, ^bb1, ^bb14
+^bb1: // pred: ^bb0
+ %11 = llvm.call @rand() : () -> i1
+ llvm.cond_br %11, ^bb2, ^bb3
+^bb2: // pred: ^bb1
+ llvm.call @foo(%1) : (i64) -> ()
+ llvm.br ^bb4
+^bb3: // pred: ^bb1
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.br ^bb4
+^bb4: // 2 preds: ^bb2, ^bb3
+ %14 = llvm.call @rand() : () -> i1
+ llvm.cond_br %14, ^bb5, ^bb6
+^bb5: // pred: ^bb4
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb13
+^bb6: // pred: ^bb4
+ llvm.call @foo(%4) : (i64) -> ()
+ llvm.br ^bb13
+^bb13: // 2 preds: ^bb11, ^bb12
+ llvm.br ^bb27
+^bb14: // pred: ^bb0
+ %23 = llvm.call @rand() : () -> i1
+ llvm.cond_br %23, ^bb15, ^bb16
+^bb15: // pred: ^bb14
+ llvm.call @foo(%4) : (i64) -> ()
+ llvm.br ^bb17
+^bb16: // pred: ^bb14
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb17
+^bb17: // 2 preds: ^bb15, ^bb16
+ %26 = llvm.call @rand() : () -> i1
+ llvm.cond_br %26, ^bb18, ^bb19
+^bb18: // pred: ^bb17
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.br ^bb26
+^bb19: // pred: ^bb17
+ llvm.call @foo(%1) : (i64) -> ()
+ llvm.br ^bb26
+^bb26: // 2 preds: ^bb24, ^bb25
+ llvm.br ^bb27
+^bb27: // 2 preds: ^bb13, ^bb26
+ llvm.return
+}
+
+llvm.func @redundant_args0(%cond : i1) {
+ %0 = llvm.mlir.constant(0 : i64) : i64
+ %2 = llvm.mlir.constant(1 : i64) : i64
+ %3 = llvm.mlir.constant(2 : i64) : i64
+ // CHECK %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+ // CHECK %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
+
+ llvm.cond_br %cond, ^bb1, ^bb2
+
+ // CHECK: llvm.cond_br %{{.*}}, ^bb{{.*}}(%[[C0]], %[[C0]] : i64, i64), ^bb{{.*}}(%[[C1]], %[[C2]] : i64, i64)
+ // CHECK: ^bb{{.*}}(%{{.*}}: i64, %{{.*}}: i64)
+^bb1:
+ llvm.call @foo(%0) : (i64) -> ()
+ llvm.call @foo(%0) : (i64) -> ()
+ llvm.br ^bb3
+^bb2:
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb3
+^bb3:
+ llvm.return
+}
+
+llvm.func @redundant_args1(%cond : i1) {
+ %0 = llvm.mlir.constant(0 : i64) : i64
+ %2 = llvm.mlir.constant(1 : i64) : i64
+ %3 = llvm.mlir.constant(2 : i64) : i64
+ // CHECK %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+ // CHECK %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
+
+ llvm.cond_br %cond, ^bb1, ^bb2
+
+ // CHECK: llvm.cond_br %{{.*}}, ^bb{{.*}}(%[[C1]], %[[C2]] : i64, i64), ^bb{{.*}}(%[[C0]], %[[C0]] : i64, i64)
+ // CHECK: ^bb{{.*}}(%{{.*}}: i64, %{{.*}}: i64)
+^bb1:
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb3
+^bb2:
+ llvm.call @foo(%0) : (i64) -> ()
+ llvm.call @foo(%0) : (i64) -> ()
+ llvm.br ^bb3
+^bb3:
+ llvm.return
+}
+
+llvm.func @redundant_args_complex(%cond : i1) {
+ %0 = llvm.mlir.constant(0 : i64) : i64
+ %1 = llvm.mlir.constant(1 : i64) : i64
+ %2 = llvm.mlir.constant(2 : i64) : i64
+ %3 = llvm.mlir.constant(3 : i64) : i64
+ // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+ // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
+ // CHECK: %[[C3:.*]] = llvm.mlir.constant(3 : i64) : i64
+
+ llvm.cond_br %cond, ^bb1, ^bb2
+
+ // CHECK: llvm.cond_br %{{.*}}, ^bb{{.*}}(%[[C2]], %[[C1]], %[[C3]] : i64, i64, i64), ^bb{{.*}}(%[[C0]], %[[C3]], %[[C2]] : i64, i64, i64)
+ // CHECK: ^bb{{.*}}(%[[arg0:.*]]: i64, %[[arg1:.*]]: i64, %[[arg2:.*]]: i64):
+ // CHECK: llvm.call @foo(%[[arg0]])
+ // CHECK: llvm.call @foo(%[[arg0]])
+ // CHECK: llvm.call @foo(%[[arg1]])
+ // CHECK: llvm.call @foo(%[[C2]])
+ // CHECK: llvm.call @foo(%[[arg2]])
+
+^bb1:
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.call @foo(%1) : (i64) -> ()
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.br ^bb3
+^bb2:
+ llvm.call @foo(%0) : (i64) -> ()
+ llvm.call @foo(%0) : (i64) -> ()
+ llvm.call @foo(%3) : (i64) -> ()
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.call @foo(%2) : (i64) -> ()
+ llvm.br ^bb3
+^bb3:
+ llvm.return
+}
+
+llvm.func @blocks_with_args() {
+ %0 = llvm.mlir.zero : !llvm.ptr
+ %1 = llvm.call @rand() : () -> i1
+ // CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : i64)
+ // CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i64)
+ // CHECK: %[[cond:.*]] = llvm.call @rand
+ %3 = llvm.mlir.constant(0) : i64
+ %4 = llvm.mlir.constant(1) : i64
+ // CHECK: llvm.cond_br %[[cond]], ^bb1(%[[c1]] : i64), ^bb1(%[[c0]] : i64)
+ // CHECK: ^bb1(%{{.*}}: i64):
+ // CHECK ^bb2:
+ // CHECK ^bb3:
+ // CHECK llvm.return
+ llvm.cond_br %1, ^bb7(%0 : !llvm.ptr), ^bb1(%0 : !llvm.ptr)
+^bb1(%5: !llvm.ptr):
+ llvm.store %5, %0 : !llvm.ptr, !llvm.ptr
+ llvm.cond_br %1, ^bb2(%3 : i64), ^bb4(%3 : i64)
+^bb7(%6: !llvm.ptr):
+ llvm.store %6, %0 : !llvm.ptr, !llvm.ptr
+ llvm.cond_br %1, ^bb2(%4 : i64), ^bb4(%4 : i64)
+^bb2(%7: i64):
+ llvm.call @foo(%7) : (i64) -> ()
+ llvm.br ^bb8
+^bb4(%8: i64):
+ llvm.call @foo(%8) : (i64) -> ()
+ llvm.br ^bb8
+^bb8:
+ llvm.return
+}
diff --git a/mlir/tools/mlir-cpu-runner/CMakeLists.txt b/mlir/tools/mlir-cpu-runner/CMakeLists.txt
index 1766b28..ae6dbce 100644
--- a/mlir/tools/mlir-cpu-runner/CMakeLists.txt
+++ b/mlir/tools/mlir-cpu-runner/CMakeLists.txt
@@ -7,6 +7,8 @@ set(LLVM_LINK_COMPONENTS
add_mlir_tool(mlir-cpu-runner
mlir-cpu-runner.cpp
+
+ EXPORT_SYMBOLS
)
llvm_update_compile_flags(mlir-cpu-runner)
target_link_libraries(mlir-cpu-runner PRIVATE
@@ -22,5 +24,3 @@ target_link_libraries(mlir-cpu-runner PRIVATE
MLIRTargetLLVMIRExport
MLIRSupport
)
-
-export_executable_symbols(mlir-cpu-runner)
diff --git a/mlir/tools/mlir-opt/CMakeLists.txt b/mlir/tools/mlir-opt/CMakeLists.txt
index 8b79de5..1209c53 100644
--- a/mlir/tools/mlir-opt/CMakeLists.txt
+++ b/mlir/tools/mlir-opt/CMakeLists.txt
@@ -102,9 +102,9 @@ add_mlir_tool(mlir-opt
DEPENDS
${LIBS}
SUPPORT_PLUGINS
+ EXPORT_SYMBOLS_FOR_PLUGINS
)
target_link_libraries(mlir-opt PRIVATE ${LIBS})
llvm_update_compile_flags(mlir-opt)
mlir_check_all_link_libraries(mlir-opt)
-export_executable_symbols_for_plugins(mlir-opt)
diff --git a/openmp/runtime/src/kmp_os.h b/openmp/runtime/src/kmp_os.h
index 24b40ed..2252f5e 100644
--- a/openmp/runtime/src/kmp_os.h
+++ b/openmp/runtime/src/kmp_os.h
@@ -77,7 +77,7 @@
#if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
KMP_OS_DRAGONFLY || KMP_OS_AIX) && \
- !KMP_OS_WASI
+ !KMP_OS_WASI && !KMP_OS_EMSCRIPTEN
#define KMP_AFFINITY_SUPPORTED 1
#if KMP_OS_WINDOWS && KMP_ARCH_X86_64
#define KMP_GROUP_AFFINITY 1
@@ -1293,7 +1293,7 @@ bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) {
extern void *__kmp_lookup_symbol(const char *name, bool next = false);
#define KMP_DLSYM(name) __kmp_lookup_symbol(name)
#define KMP_DLSYM_NEXT(name) __kmp_lookup_symbol(name, true)
-#elif KMP_OS_WASI
+#elif KMP_OS_WASI || KMP_OS_EMSCRIPTEN
#define KMP_DLSYM(name) nullptr
#define KMP_DLSYM_NEXT(name) nullptr
#else
diff --git a/openmp/runtime/src/kmp_platform.h b/openmp/runtime/src/kmp_platform.h
index 200fdf6..9c22151 100644
--- a/openmp/runtime/src/kmp_platform.h
+++ b/openmp/runtime/src/kmp_platform.h
@@ -25,6 +25,7 @@
#define KMP_OS_HURD 0
#define KMP_OS_SOLARIS 0
#define KMP_OS_WASI 0
+#define KMP_OS_EMSCRIPTEN 0
#define KMP_OS_UNIX 0 /* disjunction of KMP_OS_LINUX, KMP_OS_DARWIN etc. */
#ifdef _WIN32
@@ -44,6 +45,11 @@
#elif (defined __linux__)
#undef KMP_OS_LINUX
#define KMP_OS_LINUX 1
+#elif defined(__EMSCRIPTEN__)
+#undef KMP_OS_LINUX
+#undef KMP_OS_EMSCRIPTEN
+#define KMP_OS_LINUX 1
+#define KMP_OS_EMSCRIPTEN 1
#else
#endif
@@ -77,7 +83,7 @@
#define KMP_OS_SOLARIS 1
#endif
-#if (defined __wasi__) || (defined __EMSCRIPTEN__)
+#if (defined __wasi__)
#undef KMP_OS_WASI
#define KMP_OS_WASI 1
#endif
diff --git a/openmp/runtime/src/z_Linux_asm.S b/openmp/runtime/src/z_Linux_asm.S
index 5b614e2..f119f64 100644
--- a/openmp/runtime/src/z_Linux_asm.S
+++ b/openmp/runtime/src/z_Linux_asm.S
@@ -2452,23 +2452,3 @@ KMP_PREFIX_UNDERSCORE(__kmp_unnamed_critical_addr):
.section .note.GNU-stack,"",@progbits
# endif
#endif
-
-#if KMP_ARCH_WASM
-.data
-.global .gomp_critical_user_
-.global .gomp_critical_user_.var
-.global .gomp_critical_user_.reduction.var
-.global __kmp_unnamed_critical_addr
-.gomp_critical_user_:
-.zero 4
-.size .gomp_critical_user_, 4
-.gomp_critical_user_.var:
-.zero 4
-.size .gomp_critical_user_.var, 4
-.gomp_critical_user_.reduction.var:
-.zero 4
-.size .gomp_critical_user_.reduction.var, 4
-__kmp_unnamed_critical_addr:
- .4byte .gomp_critical_user_
- .size __kmp_unnamed_critical_addr, 4
-#endif
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index c58236a..1bf6cdb 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -2654,7 +2654,6 @@ cc_library(
":CodeGen",
":Core",
":Coroutines",
- ":Demangle",
":HipStdPar",
":IPO",
":IRPrinter",