aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-10-18 11:48:47 -0700
committerVitaly Buka <vitalybuka@google.com>2024-10-18 11:48:47 -0700
commitead6b282402ff27367e744e3a92df12b92ef45c1 (patch)
tree899548e0d7e0a4593dd2e175f143ab277e9773b6
parenta9aec0b7845ae812186ee5f01fdba322cd025322 (diff)
parentd60fdc1ca31f21e27450f3902710ab37907af84e (diff)
downloadllvm-users/vitalybuka/spr/main.lsan-process-non-suspended-threads.zip
llvm-users/vitalybuka/spr/main.lsan-process-non-suspended-threads.tar.gz
llvm-users/vitalybuka/spr/main.lsan-process-non-suspended-threads.tar.bz2
[𝘀𝗽𝗿] changes introduced through rebaseusers/vitalybuka/spr/main.lsan-process-non-suspended-threads
Created using spr 1.3.4 [skip ci]
-rw-r--r--bolt/include/bolt/Core/DIEBuilder.h2
-rw-r--r--bolt/lib/Core/BinaryContext.cpp4
-rw-r--r--bolt/lib/Passes/VeneerElimination.cpp6
-rw-r--r--bolt/lib/Rewrite/DWARFRewriter.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseStartsEndsWithCheck.cpp82
-rw-r--r--clang-tools-extra/clangd/test/log.test2
-rw-r--r--clang-tools-extra/test/clang-query/invalid-command-line.cpp2
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/use-starts-ends-with.cpp13
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/invalid-command-line.cpp2
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst25
-rw-r--r--clang/docs/ReleaseNotes.rst10
-rw-r--r--clang/include/clang-c/Index.h2
-rw-r--r--clang/include/clang/Basic/AArch64SVEACLETypes.def6
-rw-r--r--clang/include/clang/Basic/Builtins.td1
-rw-r--r--clang/include/clang/Basic/StackExhaustionHandler.h45
-rw-r--r--clang/include/clang/Driver/Options.td11
-rw-r--r--clang/include/clang/Format/Format.h25
-rw-r--r--clang/include/clang/Sema/Sema.h6
-rw-r--r--clang/include/clang/Serialization/ASTBitCodes.h2
-rw-r--r--clang/include/clang/Serialization/ASTReader.h6
-rw-r--r--clang/lib/AST/ASTImporter.cpp26
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp22
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp3
-rw-r--r--clang/lib/AST/ByteCode/EvalEmitter.cpp17
-rw-r--r--clang/lib/AST/ByteCode/Interp.h6
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp21
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td1
-rw-r--r--clang/lib/AST/Decl.cpp10
-rw-r--r--clang/lib/AST/Expr.cpp2
-rw-r--r--clang/lib/AST/ExprConstant.cpp14
-rw-r--r--clang/lib/Basic/CMakeLists.txt1
-rw-r--r--clang/lib/Basic/StackExhaustionHandler.cpp35
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp10
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp12
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h6
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp37
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp31
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp14
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.h3
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp2
-rw-r--r--clang/lib/Format/Format.cpp3
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp4
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp5
-rw-r--r--clang/lib/Sema/Sema.cpp12
-rw-r--r--clang/lib/Sema/SemaFunctionEffects.cpp4
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp4
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp13
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp3
-rw-r--r--clang/lib/Serialization/ASTReader.cpp21
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp85
-rw-r--r--clang/test/AST/ByteCode/builtin-functions.cpp14
-rw-r--r--clang/test/AST/ByteCode/new-delete.cpp22
-rw-r--r--clang/test/AST/ByteCode/placement-new.cpp24
-rw-r--r--clang/test/Analysis/infeasible-sink.c29
-rw-r--r--clang/test/Analysis/unary-sym-expr.c33
-rw-r--r--clang/test/CodeGen/RISCV/riscv-inline-asm.c40
-rw-r--r--clang/test/CodeGen/math-libcalls-tbaa-indirect-args.c18
-rw-r--r--clang/test/CodeGen/stack-protector-guard.c16
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/inlined-key-function.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/parent-and-child-in-comdats.cpp4
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/parent-vtable-in-comdat.cpp4
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp2
-rw-r--r--clang/test/CodeGenCXX/RelativeVTablesABI/type-info.cpp4
-rw-r--r--clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp86
-rw-r--r--clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp70
-rw-r--r--clang/test/CodeGenCXX/armv7k.cpp6
-rw-r--r--clang/test/CodeGenCXX/builtins.cpp14
-rw-r--r--clang/test/CodeGenCXX/dynamic-cast-address-space.cpp4
-rw-r--r--clang/test/CodeGenCXX/exceptions-no-rtti.cpp2
-rw-r--r--clang/test/CodeGenCXX/implicit-record-visibility.cpp2
-rw-r--r--clang/test/CodeGenCXX/mdefault-visibility-export-mapping-rtti.cpp480
-rw-r--r--clang/test/CodeGenCXX/modules-vtable.cppm12
-rw-r--r--clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp4
-rw-r--r--clang/test/CodeGenCXX/ptrauth-type-info-vtable.cpp7
-rw-r--r--clang/test/CodeGenCXX/ptrauth-vtable-virtual-inheritance-thunk.cpp26
-rw-r--r--clang/test/CodeGenCXX/rtti-linkage.cpp64
-rw-r--r--clang/test/CodeGenCXX/rtti-visibility.cpp6
-rw-r--r--clang/test/CodeGenCXX/symbol-partition.cpp2
-rw-r--r--clang/test/CodeGenCXX/type_visibility.cpp36
-rw-r--r--clang/test/CodeGenCXX/typeinfo-with-address-space.cpp4
-rw-r--r--clang/test/CodeGenCXX/visibility-ms-compat.cpp12
-rw-r--r--clang/test/CodeGenCXX/vtable-align-address-space.cpp2
-rw-r--r--clang/test/CodeGenCXX/vtable-align.cpp4
-rw-r--r--clang/test/CodeGenCXX/vtable-available-externally.cpp2
-rw-r--r--clang/test/CodeGenCXX/vtable-key-function-arm.cpp24
-rw-r--r--clang/test/CodeGenCXX/vtable-key-function-ios.cpp16
-rw-r--r--clang/test/CodeGenCXX/vtable-key-function-win-comdat.cpp6
-rw-r--r--clang/test/CodeGenCXX/weak-extern-typeinfo.cpp14
-rw-r--r--clang/test/CodeGenCXX/windows-itanium-type-info.cpp2
-rw-r--r--clang/test/CodeGenObjCXX/rtti.mm9
-rw-r--r--clang/test/Driver/stack-protector-guard.c57
-rw-r--r--clang/test/Modules/no-external-type-id.cppm2
-rw-r--r--clang/test/Modules/pr97313.cppm6
-rw-r--r--clang/test/Sema/constant-builtins-2.c13
-rw-r--r--clang/test/Sema/constexpr.c17
-rw-r--r--clang/test/SemaCXX/cxx2b-deducing-this.cpp17
-rw-r--r--clang/test/SemaCXX/typeid-ref.cpp2
-rw-r--r--clang/unittests/AST/ASTImporterTest.cpp28
-rw-r--r--clang/unittests/Basic/DiagnosticTest.cpp5
-rw-r--r--clang/unittests/Format/ConfigParseTest.cpp1
-rw-r--r--clang/unittests/Format/FormatTest.cpp77
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp6
-rw-r--r--clang/utils/TableGen/ClangOptionDocEmitter.cpp4
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp4
-rw-r--r--clang/utils/TableGen/SveEmitter.cpp4
-rw-r--r--compiler-rt/lib/interception/interception_win.cpp1
-rw-r--r--compiler-rt/test/lsan/TestCases/print_threads.c3
-rw-r--r--flang/docs/OptionComparison.md2
-rw-r--r--flang/include/flang/Common/LangOptions.def2
-rw-r--r--flang/include/flang/Common/LangOptions.h8
-rw-r--r--flang/include/flang/Lower/LoweringOptions.def5
-rw-r--r--flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h2
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRTypes.td4
-rw-r--r--flang/include/flang/Parser/parse-tree.h2
-rw-r--r--flang/include/flang/Runtime/magic-numbers.h2
-rw-r--r--flang/lib/Evaluate/intrinsics.cpp2
-rw-r--r--flang/lib/Frontend/CompilerInvocation.cpp36
-rw-r--r--flang/lib/Lower/ConvertConstant.cpp7
-rw-r--r--flang/lib/Lower/ConvertExpr.cpp8
-rw-r--r--flang/lib/Lower/ConvertExprToHLFIR.cpp5
-rw-r--r--flang/lib/Lower/ConvertVariable.cpp2
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.cpp23
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp4
-rw-r--r--flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp2
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Numeric.cpp6
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Reduction.cpp26
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Transformational.cpp8
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp4
-rw-r--r--flang/lib/Optimizer/Dialect/FIROps.cpp6
-rw-r--r--flang/lib/Parser/parse-tree.cpp18
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp46
-rw-r--r--flang/lib/Semantics/check-omp-structure.h7
-rw-r--r--flang/test/Driver/frontend-forwarding.f902
-rw-r--r--flang/test/Driver/integer-overflow.f9010
-rw-r--r--flang/test/Integration/OpenMP/atomic-capture-complex.f9050
-rw-r--r--flang/test/Semantics/OpenMP/clause-validity01.f906
-rw-r--r--flang/test/Semantics/OpenMP/do-collapse.f908
-rw-r--r--flang/test/Semantics/OpenMP/loop-association.f906
-rw-r--r--flang/test/Semantics/OpenMP/workshare02.f9018
-rw-r--r--libc/cmake/modules/LLVMLibCCompileOptionRules.cmake11
-rw-r--r--libc/config/config.json4
-rw-r--r--libc/config/gpu/entrypoints.txt1
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt1
-rw-r--r--libc/docs/configure.rst1
-rw-r--r--libc/docs/math/index.rst2
-rw-r--r--libc/include/llvm-libc-types/CMakeLists.txt8
-rw-r--r--libc/include/llvm-libc-types/cfloat128.h41
-rw-r--r--libc/include/llvm-libc-types/cfloat16.h21
-rw-r--r--libc/spec/stdc.td1
-rw-r--r--libc/src/__support/CPP/CMakeLists.txt2
-rw-r--r--libc/src/__support/CPP/type_traits.h1
-rw-r--r--libc/src/__support/CPP/type_traits/is_complex.h15
-rw-r--r--libc/src/__support/FPUtil/ManipulationFunctions.h10
-rw-r--r--libc/src/__support/macros/properties/CMakeLists.txt10
-rw-r--r--libc/src/__support/macros/properties/complex_types.h25
-rw-r--r--libc/src/math/CMakeLists.txt1
-rw-r--r--libc/src/math/generic/CMakeLists.txt23
-rw-r--r--libc/src/math/generic/exp10f16.cpp2
-rw-r--r--libc/src/math/generic/powf.cpp6
-rw-r--r--libc/src/math/generic/tanhf16.cpp144
-rw-r--r--libc/src/math/tanhf16.h21
-rw-r--r--libc/test/UnitTest/FPMatcher.h17
-rw-r--r--libc/test/src/math/CMakeLists.txt11
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt13
-rw-r--r--libc/test/src/math/smoke/FrexpTest.h11
-rw-r--r--libc/test/src/math/smoke/powf_test.cpp3
-rw-r--r--libc/test/src/math/smoke/tanhf16_test.cpp143
-rw-r--r--libc/test/src/math/tanhf16_test.cpp40
-rw-r--r--libcxx/.clang-format1
-rw-r--r--libcxx/docs/ReleaseNotes/20.rst6
-rw-r--r--libcxx/include/__memory/addressof.h6
-rw-r--r--libcxx/include/bitset56
-rw-r--r--libcxx/include/forward_list109
-rw-r--r--libcxx/include/list191
-rw-r--r--libcxx/test/std/containers/sequences/forwardlist/types.pass.cpp18
-rw-r--r--libcxx/test/std/containers/sequences/list/types.pass.cpp18
-rw-r--r--libcxx/test/std/utilities/template.bitset/bitset.members/nonstdmem.uglified.compile.pass.cpp15
-rw-r--r--lld/MachO/ObjC.cpp99
-rw-r--r--lld/test/MachO/objc-category-merging-minimal.s13
-rw-r--r--lldb/CMakeLists.txt2
-rw-r--r--lldb/docs/use/aarch64-linux.md8
-rw-r--r--lldb/source/Commands/CommandObjectMultiword.cpp8
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp3
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/Makefile6
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/TestOdrHandlingWithDylib.py29
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/main.cpp11
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.cpp14
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.h9
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/service.cpp15
-rw-r--r--lldb/test/API/lang/cpp/odr-handling-with-dylib/service.h20
-rw-r--r--llvm/include/llvm/ADT/STLExtras.h29
-rw-r--r--llvm/include/llvm/Analysis/TargetLibraryInfo.def15
-rw-r--r--llvm/include/llvm/BinaryFormat/Minidump.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h5
-rw-r--r--llvm/include/llvm/CodeGen/TargetFrameLowering.h9
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h4
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMP.td2
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td2
-rw-r--r--llvm/include/llvm/Object/OffloadBinary.h3
-rw-r--r--llvm/include/llvm/SandboxIR/Operator.h99
-rw-r--r--llvm/include/llvm/SandboxIR/Type.h6
-rw-r--r--llvm/include/llvm/SandboxIR/Value.h6
-rw-r--r--llvm/include/llvm/Support/AutoConvert.h34
-rw-r--r--llvm/include/llvm/TableGen/Error.h4
-rw-r--r--llvm/include/llvm/TableGen/Record.h526
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td14
-rw-r--r--llvm/include/llvm/Transforms/Utils/SSAUpdater.h2
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp4
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp14
-rw-r--r--llvm/lib/Analysis/ModuleSummaryAnalysis.cpp12
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp3
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineSSAUpdater.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp100
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp47
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp7
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp1
-rw-r--r--llvm/lib/Passes/PassRegistry.def1
-rw-r--r--llvm/lib/TableGen/DetailedRecordsBackend.cpp2
-rw-r--r--llvm/lib/TableGen/Error.cpp4
-rw-r--r--llvm/lib/TableGen/Record.cpp873
-rw-r--r--llvm/lib/TableGen/TGLexer.h2
-rw-r--r--llvm/lib/TableGen/TGParser.cpp428
-rw-r--r--llvm/lib/TableGen/TGParser.h82
-rw-r--r--llvm/lib/Target/AArch64/AArch64Combine.td6
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.h4
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp81
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h3
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600FrameLowering.h5
-rw-r--r--llvm/lib/Target/AMDGPU/SIFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIFrameLowering.h5
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp4
-rw-r--r--llvm/lib/Target/ARC/ARCFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/ARC/ARCFrameLowering.h5
-rw-r--r--llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMCallingConv.td19
-rw-r--r--llvm/lib/Target/ARM/ARMFrameLowering.cpp153
-rw-r--r--llvm/lib/Target/ARM/ARMFrameLowering.h4
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp7
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.h12
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp2
-rw-r--r--llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp8
-rw-r--r--llvm/lib/Target/AVR/AVRFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/AVR/AVRFrameLowering.h4
-rw-r--r--llvm/lib/Target/BPF/BPFFrameLowering.cpp4
-rw-r--r--llvm/lib/Target/BPF/BPFFrameLowering.h4
-rw-r--r--llvm/lib/Target/CSKY/CSKYFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/CSKY/CSKYFrameLowering.h4
-rw-r--r--llvm/lib/Target/DirectX/DirectXFrameLowering.h3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp5
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.h4
-rw-r--r--llvm/lib/Target/Lanai/LanaiFrameLowering.h5
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchFrameLowering.h4
-rw-r--r--llvm/lib/Target/M68k/M68kFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/M68k/M68kFrameLowering.h13
-rw-r--r--llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp17
-rw-r--r--llvm/lib/Target/M68k/M68kInstrAtomics.td7
-rw-r--r--llvm/lib/Target/MSP430/MSP430FrameLowering.cpp2
-rw-r--r--llvm/lib/Target/MSP430/MSP430FrameLowering.h2
-rw-r--r--llvm/lib/Target/Mips/MipsFrameLowering.cpp10
-rw-r--r--llvm/lib/Target/Mips/MipsFrameLowering.h4
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp4
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXFrameLowering.h4
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCFrameLowering.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCFrameLowering.h4
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp8
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h2
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp14
-rw-r--r--llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp8
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.h4
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp149
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.td19
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp45
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVFrameLowering.h3
-rw-r--r--llvm/lib/Target/Sparc/SparcFrameLowering.cpp8
-rw-r--r--llvm/lib/Target/Sparc/SparcFrameLowering.h4
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.h2
-rw-r--r--llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp16
-rw-r--r--llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZ.td6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp9
-rw-r--r--llvm/lib/Target/SystemZ/SystemZFrameLowering.h9
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.h4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrFormats.td26
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.td6
-rw-r--r--llvm/lib/Target/VE/VEFrameLowering.cpp8
-rw-r--r--llvm/lib/Target/VE/VEFrameLowering.h3
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td2
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp8
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.h4
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp13
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h2
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td17
-rw-r--r--llvm/lib/Target/XCore/XCoreFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/XCore/XCoreFrameLowering.h5
-rw-r--r--llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Xtensa/XtensaFrameLowering.h5
-rw-r--r--llvm/lib/Transforms/IPO/FunctionSpecialization.cpp15
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/BuildLibCalls.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/CtorUtils.cpp6
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp14
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h3
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/fixed-vector-insert-subvector.ll18
-rw-r--r--llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir21
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-trunc.mir98
-rw-r--r--llvm/test/CodeGen/AArch64/add.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/and-mask-removal.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/andorxor.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/bitcast.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/concat-vector.ll91
-rw-r--r--llvm/test/CodeGen/AArch64/fcmp.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/itofp.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/mul.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/naked-fn-with-frame-pointer.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll21
-rw-r--r--llvm/test/CodeGen/AArch64/qshrn.ll383
-rw-r--r--llvm/test/CodeGen/AArch64/sub.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll262
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll9
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll252
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll70
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll44
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll692
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll53
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll82
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll58
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll1132
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll42
-rw-r--r--llvm/test/CodeGen/ARM/naked-fn-with-frame-pointer.ll55
-rw-r--r--llvm/test/CodeGen/AVR/naked-fn-with-frame-pointer.ll20
-rw-r--r--llvm/test/CodeGen/BPF/naked-fn-with-frame-pointer.ll41
-rw-r--r--llvm/test/CodeGen/CSKY/naked-fn-with-frame-pointer.ll41
-rw-r--r--llvm/test/CodeGen/Hexagon/naked-fn-with-frame-pointer.ll30
-rw-r--r--llvm/test/CodeGen/Lanai/naked-fn-with-frame-pointer.ll35
-rw-r--r--llvm/test/CodeGen/LoongArch/naked-fn-with-frame-pointer.ll45
-rw-r--r--llvm/test/CodeGen/M68k/Atomics/non-ari.ll46
-rw-r--r--llvm/test/CodeGen/M68k/naked-fn-with-frame-pointer.ll26
-rw-r--r--llvm/test/CodeGen/MSP430/naked-fn-with-frame-pointer.ll27
-rw-r--r--llvm/test/CodeGen/Mips/naked-fn-with-frame-pointer.ll87
-rw-r--r--llvm/test/CodeGen/NVPTX/naked-fn-with-frame-pointer.ll73
-rw-r--r--llvm/test/CodeGen/PowerPC/naked-fn-with-frame-pointer.ll87
-rw-r--r--llvm/test/CodeGen/PowerPC/stack-guard-global.ll122
-rw-r--r--llvm/test/CodeGen/PowerPC/stack-guard-tls.ll114
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll33
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll109
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-invalid.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll41
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll157
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll89
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll158
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm.ll66
-rw-r--r--llvm/test/CodeGen/RISCV/naked-fn-with-frame-pointer.ll45
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-trampoline.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll61
-rw-r--r--llvm/test/CodeGen/SPARC/naked-fn-with-frame-pointer.ll45
-rw-r--r--llvm/test/CodeGen/SystemZ/naked-fn-with-frame-pointer.ll28
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-reduce-add-01.ll2
-rw-r--r--llvm/test/CodeGen/Thumb2/pacbti-m-frame-chain.ll150
-rw-r--r--llvm/test/CodeGen/VE/naked-fn-with-frame-pointer.ll41
-rw-r--r--llvm/test/CodeGen/WebAssembly/naked-fn-with-frame-pointer.ll37
-rw-r--r--llvm/test/CodeGen/X86/andnot-patterns.ll11
-rw-r--r--llvm/test/CodeGen/X86/avx2-arith.ll2
-rw-r--r--llvm/test/CodeGen/X86/combine-sra.ll9
-rw-r--r--llvm/test/CodeGen/X86/fma.ll136
-rw-r--r--llvm/test/CodeGen/X86/midpoint-int-vec-128.ll50
-rw-r--r--llvm/test/CodeGen/X86/midpoint-int-vec-256.ll60
-rw-r--r--llvm/test/CodeGen/X86/min-legal-vector-width.ll18
-rw-r--r--llvm/test/CodeGen/X86/naked-fn-with-frame-pointer.ll39
-rw-r--r--llvm/test/CodeGen/X86/pmul.ll62
-rw-r--r--llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll2
-rw-r--r--llvm/test/CodeGen/X86/psubus.ll81
-rw-r--r--llvm/test/CodeGen/X86/sat-add.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll6
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-packus.ll275
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-ssat.ll275
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-usat.ll279
-rw-r--r--llvm/test/CodeGen/XCore/naked-fn-with-frame-pointer.ll31
-rw-r--r--llvm/test/CodeGen/Xtensa/naked-fn-with-frame-pointer.ll31
-rw-r--r--llvm/test/TableGen/GlobalISelEmitter-implicit-defs.td12
-rw-r--r--llvm/test/ThinLTO/X86/memprof-icp.ll54
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/umax.ll168
-rw-r--r--llvm/test/Transforms/FunctionSpecialization/noinline.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/ctor-list-preserve-addrspace.ll19
-rw-r--r--llvm/test/Transforms/InferFunctionAttrs/annotate.ll9
-rw-r--r--llvm/test/Transforms/InstCombine/sink_instruction.ll119
-rw-r--r--llvm/test/Transforms/JumpThreading/thread-debug-info.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll80
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll119
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-select-intrinsics.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-unused-interleave-group.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/remarks-insert-into-small-vector.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll4
-rw-r--r--llvm/test/Transforms/SROA/fake-use-sroa.ll2
-rw-r--r--llvm/test/Transforms/Sink/invariant-load.ll67
-rw-r--r--llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml20
-rw-r--r--llvm/tools/llvm-readtapi/llvm-readtapi.cpp4
-rw-r--r--llvm/unittests/ADT/STLExtrasTest.cpp37
-rw-r--r--llvm/unittests/Analysis/TargetLibraryInfoTest.cpp3
-rw-r--r--llvm/unittests/CodeGen/MFCommon.inc4
-rw-r--r--llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp2
-rw-r--r--llvm/unittests/Object/GOFFObjectFileTest.cpp112
-rw-r--r--llvm/unittests/SandboxIR/CMakeLists.txt1
-rw-r--r--llvm/unittests/SandboxIR/OperatorTest.cpp141
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp4
-rw-r--r--llvm/utils/TableGen/AsmMatcherEmitter.cpp44
-rw-r--r--llvm/utils/TableGen/AsmWriterEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/Attributes.cpp2
-rw-r--r--llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp2
-rw-r--r--llvm/utils/TableGen/CodeEmitterGen.cpp2
-rw-r--r--llvm/utils/TableGen/CodeGenMapTable.cpp4
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp61
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.h10
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.cpp4
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.h2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstruction.cpp22
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstruction.h4
-rw-r--r--llvm/utils/TableGen/Common/CodeGenRegisters.cpp16
-rw-r--r--llvm/utils/TableGen/Common/CodeGenSchedule.cpp9
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.cpp10
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp4
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp2
-rw-r--r--llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp11
-rw-r--r--llvm/utils/TableGen/CompressInstEmitter.cpp7
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherGen.cpp2
-rw-r--r--llvm/utils/TableGen/DFAEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/DXILEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/DecoderEmitter.cpp36
-rw-r--r--llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp11
-rw-r--r--llvm/utils/TableGen/InstrInfoEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/OptionParserEmitter.cpp10
-rw-r--r--llvm/utils/TableGen/RegisterInfoEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/SearchableTableEmitter.cpp9
-rw-r--r--llvm/utils/TableGen/X86FoldTablesEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/X86InstrMappingEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/X86RecognizableInstr.cpp8
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/SandboxIR/BUILD.gn1
-rw-r--r--llvm/utils/lit/tests/shtest-output-printing.py2
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp2
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp47
-rw-r--r--mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp2
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp2
-rw-r--r--mlir/lib/Transforms/RemoveDeadValues.cpp2
-rw-r--r--mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir27
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir23
-rw-r--r--mlir/test/Dialect/Tensor/invalid.mlir35
-rw-r--r--mlir/test/mlir-rewrite/simple.mlir3
-rw-r--r--mlir/tools/mlir-rewrite/mlir-rewrite.cpp16
-rw-r--r--mlir/tools/mlir-tblgen/BytecodeDialectGen.cpp3
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel22
493 files changed, 10402 insertions, 5425 deletions
diff --git a/bolt/include/bolt/Core/DIEBuilder.h b/bolt/include/bolt/Core/DIEBuilder.h
index e5b057e..d1acba0 100644
--- a/bolt/include/bolt/Core/DIEBuilder.h
+++ b/bolt/include/bolt/Core/DIEBuilder.h
@@ -314,7 +314,7 @@ public:
BC.errs()
<< "BOLT-ERROR: unable to find TypeUnit for Type Unit at offset 0x"
- << DU.getOffset() << "\n";
+ << Twine::utohexstr(DU.getOffset()) << "\n";
return nullptr;
}
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index 1347047..f246750 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -1294,8 +1294,8 @@ bool BinaryContext::handleAArch64Veneer(uint64_t Address, bool MatchOnly) {
Veneer->getOrCreateLocalLabel(Address);
Veneer->setMaxSize(TotalSize);
Veneer->updateState(BinaryFunction::State::Disassembled);
- LLVM_DEBUG(dbgs() << "BOLT-DEBUG: handling veneer function at 0x" << Address
- << "\n");
+ LLVM_DEBUG(dbgs() << "BOLT-DEBUG: handling veneer function at 0x"
+ << Twine::utohexstr(Address) << "\n");
return true;
};
diff --git a/bolt/lib/Passes/VeneerElimination.cpp b/bolt/lib/Passes/VeneerElimination.cpp
index 87fe625..8bf0359 100644
--- a/bolt/lib/Passes/VeneerElimination.cpp
+++ b/bolt/lib/Passes/VeneerElimination.cpp
@@ -73,12 +73,12 @@ Error VeneerElimination::runOnFunctions(BinaryContext &BC) {
continue;
const MCSymbol *TargetSymbol = BC.MIB->getTargetSymbol(Instr, 0);
- if (VeneerDestinations.find(TargetSymbol) == VeneerDestinations.end())
+ auto It = VeneerDestinations.find(TargetSymbol);
+ if (It == VeneerDestinations.end())
continue;
VeneerCallers++;
- BC.MIB->replaceBranchTarget(Instr, VeneerDestinations[TargetSymbol],
- BC.Ctx.get());
+ BC.MIB->replaceBranchTarget(Instr, It->second, BC.Ctx.get());
}
}
}
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index f9cb1b3..1b5ba8b 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -1362,7 +1362,7 @@ void DWARFRewriter::updateDWARFObjectAddressRanges(
Die.getTag() == dwarf::DW_TAG_compile_unit)) {
if (opts::Verbosity >= 1)
errs() << "BOLT-WARNING: cannot update ranges for DIE in Unit offset 0x"
- << Unit.getOffset() << '\n';
+ << Twine::utohexstr(Unit.getOffset()) << '\n';
}
}
diff --git a/clang-tools-extra/clang-tidy/modernize/UseStartsEndsWithCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseStartsEndsWithCheck.cpp
index 5eb3267..1231f954 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseStartsEndsWithCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseStartsEndsWithCheck.cpp
@@ -9,7 +9,8 @@
#include "UseStartsEndsWithCheck.h"
#include "../utils/ASTUtils.h"
-#include "../utils/OptionsUtils.h"
+#include "../utils/Matchers.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Lex/Lexer.h"
#include <string>
@@ -82,60 +83,53 @@ UseStartsEndsWithCheck::UseStartsEndsWithCheck(StringRef Name,
void UseStartsEndsWithCheck::registerMatchers(MatchFinder *Finder) {
const auto ZeroLiteral = integerLiteral(equals(0));
- const auto HasStartsWithMethodWithName = [](const std::string &Name) {
- return hasMethod(
- cxxMethodDecl(hasName(Name), isConst(), parameterCountIs(1))
- .bind("starts_with_fun"));
+ const auto ClassTypeWithMethod = [](const StringRef MethodBoundName,
+ const auto... Methods) {
+ return cxxRecordDecl(anyOf(
+ hasMethod(cxxMethodDecl(isConst(), parameterCountIs(1),
+ returns(booleanType()), hasAnyName(Methods))
+ .bind(MethodBoundName))...));
};
- const auto HasStartsWithMethod =
- anyOf(HasStartsWithMethodWithName("starts_with"),
- HasStartsWithMethodWithName("startsWith"),
- HasStartsWithMethodWithName("startswith"));
+
const auto OnClassWithStartsWithFunction =
- on(hasType(hasCanonicalType(hasDeclaration(cxxRecordDecl(
- anyOf(HasStartsWithMethod,
- hasAnyBase(hasType(hasCanonicalType(
- hasDeclaration(cxxRecordDecl(HasStartsWithMethod)))))))))));
-
- const auto HasEndsWithMethodWithName = [](const std::string &Name) {
- return hasMethod(
- cxxMethodDecl(hasName(Name), isConst(), parameterCountIs(1))
- .bind("ends_with_fun"));
- };
- const auto HasEndsWithMethod = anyOf(HasEndsWithMethodWithName("ends_with"),
- HasEndsWithMethodWithName("endsWith"),
- HasEndsWithMethodWithName("endswith"));
- const auto OnClassWithEndsWithFunction =
- on(expr(hasType(hasCanonicalType(hasDeclaration(cxxRecordDecl(
- anyOf(HasEndsWithMethod,
- hasAnyBase(hasType(hasCanonicalType(hasDeclaration(
- cxxRecordDecl(HasEndsWithMethod)))))))))))
- .bind("haystack"));
+ ClassTypeWithMethod("starts_with_fun", "starts_with", "startsWith",
+ "startswith", "StartsWith");
+
+ const auto OnClassWithEndsWithFunction = ClassTypeWithMethod(
+ "ends_with_fun", "ends_with", "endsWith", "endswith", "EndsWith");
// Case 1: X.find(Y) [!=]= 0 -> starts_with.
const auto FindExpr = cxxMemberCallExpr(
anyOf(argumentCountIs(1), hasArgument(1, ZeroLiteral)),
- callee(cxxMethodDecl(hasName("find")).bind("find_fun")),
- OnClassWithStartsWithFunction, hasArgument(0, expr().bind("needle")));
+ callee(
+ cxxMethodDecl(hasName("find"), ofClass(OnClassWithStartsWithFunction))
+ .bind("find_fun")),
+ hasArgument(0, expr().bind("needle")));
// Case 2: X.rfind(Y, 0) [!=]= 0 -> starts_with.
const auto RFindExpr = cxxMemberCallExpr(
hasArgument(1, ZeroLiteral),
- callee(cxxMethodDecl(hasName("rfind")).bind("find_fun")),
- OnClassWithStartsWithFunction, hasArgument(0, expr().bind("needle")));
+ callee(cxxMethodDecl(hasName("rfind"),
+ ofClass(OnClassWithStartsWithFunction))
+ .bind("find_fun")),
+ hasArgument(0, expr().bind("needle")));
// Case 3: X.compare(0, LEN(Y), Y) [!=]= 0 -> starts_with.
const auto CompareExpr = cxxMemberCallExpr(
argumentCountIs(3), hasArgument(0, ZeroLiteral),
- callee(cxxMethodDecl(hasName("compare")).bind("find_fun")),
- OnClassWithStartsWithFunction, hasArgument(2, expr().bind("needle")),
+ callee(cxxMethodDecl(hasName("compare"),
+ ofClass(OnClassWithStartsWithFunction))
+ .bind("find_fun")),
+ hasArgument(2, expr().bind("needle")),
hasArgument(1, lengthExprForStringNode("needle")));
// Case 4: X.compare(LEN(X) - LEN(Y), LEN(Y), Y) [!=]= 0 -> ends_with.
const auto CompareEndsWithExpr = cxxMemberCallExpr(
argumentCountIs(3),
- callee(cxxMethodDecl(hasName("compare")).bind("find_fun")),
- OnClassWithEndsWithFunction, hasArgument(2, expr().bind("needle")),
+ callee(cxxMethodDecl(hasName("compare"),
+ ofClass(OnClassWithEndsWithFunction))
+ .bind("find_fun")),
+ on(expr().bind("haystack")), hasArgument(2, expr().bind("needle")),
hasArgument(1, lengthExprForStringNode("needle")),
hasArgument(0,
binaryOperator(hasOperatorName("-"),
@@ -145,7 +139,7 @@ void UseStartsEndsWithCheck::registerMatchers(MatchFinder *Finder) {
// All cases comparing to 0.
Finder->addMatcher(
binaryOperator(
- hasAnyOperatorName("==", "!="),
+ matchers::isEqualityOperator(),
hasOperands(cxxMemberCallExpr(anyOf(FindExpr, RFindExpr, CompareExpr,
CompareEndsWithExpr))
.bind("find_expr"),
@@ -156,7 +150,7 @@ void UseStartsEndsWithCheck::registerMatchers(MatchFinder *Finder) {
// Case 5: X.rfind(Y) [!=]= LEN(X) - LEN(Y) -> ends_with.
Finder->addMatcher(
binaryOperator(
- hasAnyOperatorName("==", "!="),
+ matchers::isEqualityOperator(),
hasOperands(
cxxMemberCallExpr(
anyOf(
@@ -166,8 +160,10 @@ void UseStartsEndsWithCheck::registerMatchers(MatchFinder *Finder) {
1,
anyOf(declRefExpr(to(varDecl(hasName("npos")))),
memberExpr(member(hasName("npos"))))))),
- callee(cxxMethodDecl(hasName("rfind")).bind("find_fun")),
- OnClassWithEndsWithFunction,
+ callee(cxxMethodDecl(hasName("rfind"),
+ ofClass(OnClassWithEndsWithFunction))
+ .bind("find_fun")),
+ on(expr().bind("haystack")),
hasArgument(0, expr().bind("needle")))
.bind("find_expr"),
binaryOperator(hasOperatorName("-"),
@@ -190,9 +186,8 @@ void UseStartsEndsWithCheck::check(const MatchFinder::MatchResult &Result) {
const CXXMethodDecl *ReplacementFunction =
StartsWithFunction ? StartsWithFunction : EndsWithFunction;
- if (ComparisonExpr->getBeginLoc().isMacroID()) {
+ if (ComparisonExpr->getBeginLoc().isMacroID())
return;
- }
const bool Neg = ComparisonExpr->getOpcode() == BO_NE;
@@ -220,9 +215,8 @@ void UseStartsEndsWithCheck::check(const MatchFinder::MatchResult &Result) {
(ReplacementFunction->getName() + "(").str());
// Add possible negation '!'.
- if (Neg) {
+ if (Neg)
Diagnostic << FixItHint::CreateInsertion(FindExpr->getBeginLoc(), "!");
- }
}
} // namespace clang::tidy::modernize
diff --git a/clang-tools-extra/clangd/test/log.test b/clang-tools-extra/clangd/test/log.test
index 7a53d36..5cc8719 100644
--- a/clang-tools-extra/clangd/test/log.test
+++ b/clang-tools-extra/clangd/test/log.test
@@ -1,7 +1,7 @@
# RUN: env CLANGD_FLAGS=-compile-commands-dir=no-such-dir not clangd -lit-test </dev/null 2>&1 >/dev/null | FileCheck %s
CHECK: I[{{.*}}]{{.*}} clangd version {{.*}}
CHECK: Working directory: {{.*}}
-CHECK: argv[0]: clangd
+CHECK: argv[0]: {{.*}}clangd
CHECK: argv[1]: -lit-test
CHECK: CLANGD_FLAGS: -compile-commands-dir=no-such-dir
CHECK: E[{{.*}}] Path specified by --compile-commands-dir does not exist.
diff --git a/clang-tools-extra/test/clang-query/invalid-command-line.cpp b/clang-tools-extra/test/clang-query/invalid-command-line.cpp
index e3e8af1..a66acc8 100644
--- a/clang-tools-extra/test/clang-query/invalid-command-line.cpp
+++ b/clang-tools-extra/test/clang-query/invalid-command-line.cpp
@@ -1,4 +1,4 @@
// RUN: not clang-query --invalid-arg 2>&1 | FileCheck %s
-// CHECK: error: clang-query{{(\.exe)?}}: Unknown command line argument '--invalid-arg'. Try: 'clang-query{{(\.exe)?}} --help'
+// CHECK: error: clang-query{{(\.exe)?}}: Unknown command line argument '--invalid-arg'. Try: '{{.*}}clang-query{{(\.exe)?}} --help'
// CHECK-NEXT: clang-query{{(\.exe)?}}: Did you mean '--extra-arg'?
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-starts-ends-with.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-starts-ends-with.cpp
index 798af26..9147724 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-starts-ends-with.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-starts-ends-with.cpp
@@ -32,14 +32,9 @@ struct prefer_underscore_version_flip {
size_t find(const char *s, size_t pos = 0) const;
};
-struct prefer_underscore_version_inherit : public string_like {
- bool startsWith(const char *s) const;
-};
-
void test(std::string s, std::string_view sv, sub_string ss, sub_sub_string sss,
string_like sl, string_like_camel slc, prefer_underscore_version puv,
- prefer_underscore_version_flip puvf,
- prefer_underscore_version_inherit puvi) {
+ prefer_underscore_version_flip puvf) {
s.find("a") == 0;
// CHECK-MESSAGES: :[[@LINE-1]]:{{[0-9]+}}: warning: use starts_with instead of find() == 0
// CHECK-FIXES: s.starts_with("a");
@@ -153,12 +148,6 @@ void test(std::string s, std::string_view sv, sub_string ss, sub_sub_string sss,
// CHECK-MESSAGES: :[[@LINE-1]]:{{[0-9]+}}: warning: use starts_with
// CHECK-FIXES: puvf.starts_with("a");
- // Here, the subclass has startsWith, the superclass has starts_with.
- // We prefer the version from the subclass.
- puvi.find("a") == 0;
- // CHECK-MESSAGES: :[[@LINE-1]]:{{[0-9]+}}: warning: use startsWith
- // CHECK-FIXES: puvi.startsWith("a");
-
s.compare(0, 1, "a") == 0;
// CHECK-MESSAGES: :[[@LINE-1]]:{{[0-9]+}}: warning: use starts_with instead of compare() == 0
// CHECK-FIXES: s.starts_with("a");
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/invalid-command-line.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/invalid-command-line.cpp
index c06b09d..4bdca50 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/invalid-command-line.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/invalid-command-line.cpp
@@ -1,4 +1,4 @@
// RUN: not clang-tidy --invalid-arg 2>&1 | FileCheck %s
-// CHECK: error: clang-tidy{{(\.exe)?}}: Unknown command line argument '--invalid-arg'. Try: 'clang-tidy{{(\.exe)?}} --help'
+// CHECK: error: clang-tidy{{(\.exe)?}}: Unknown command line argument '--invalid-arg'. Try: '{{.*}}clang-tidy{{(\.exe)?}} --help'
// CHECK-NEXT: clang-tidy{{(\.exe)?}}: Did you mean '--extra-arg'?
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index 8add0a53..f36a547 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -5505,6 +5505,31 @@ the configuration (without a prefix: ``Auto``).
}
}
+.. _RemoveEmptyLinesInUnwrappedLines:
+
+**RemoveEmptyLinesInUnwrappedLines** (``Boolean``) :versionbadge:`clang-format 20` :ref:`¶ <RemoveEmptyLinesInUnwrappedLines>`
+ Remove empty lines within unwrapped lines.
+
+ .. code-block:: c++
+
+ false: true:
+
+ int c vs. int c = a + b;
+
+ = a + b;
+
+ enum : unsigned vs. enum : unsigned {
+ AA = 0,
+ { BB
+ AA = 0, } myEnum;
+ BB
+ } myEnum;
+
+ while ( vs. while (true) {
+ }
+ true) {
+ }
+
.. _RemoveParentheses:
**RemoveParentheses** (``RemoveParenthesesStyle``) :versionbadge:`clang-format 17` :ref:`¶ <RemoveParentheses>`
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 1da8c82..b7a6ace 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -272,6 +272,7 @@ Non-comprehensive list of changes in this release
``__builtin_signbit`` can now be used in constant expressions.
- Plugins can now define custom attributes that apply to statements
as well as declarations.
+- ``__builtin_abs`` function can now be used in constant expressions.
New Compiler Flags
------------------
@@ -418,7 +419,7 @@ Improvements to Clang's diagnostics
- The warning for an unsupported type for a named register variable is now phrased ``unsupported type for named register variable``,
instead of ``bad type for named register variable``. This makes it clear that the type is not supported at all, rather than being
suboptimal in some way the error fails to mention (#GH111550).
-
+
- Clang now emits a ``-Wdepredcated-literal-operator`` diagnostic, even if the
name was a reserved name, which we improperly allowed to suppress the
diagnostic.
@@ -537,6 +538,7 @@ Bug Fixes to C++ Support
certain situations. (#GH47400), (#GH90896)
- Fix erroneous templated array size calculation leading to crashes in generated code. (#GH41441)
- During the lookup for a base class name, non-type names are ignored. (#GH16855)
+- Fix a crash when recovering an invalid expression involving an explicit object member conversion operator. (#GH112559)
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -699,8 +701,10 @@ clang-format
- Adds ``BreakBinaryOperations`` option.
- Adds ``TemplateNames`` option.
- Adds ``AlignFunctionDeclarations`` option to ``AlignConsecutiveDeclarations``.
-- Adds ``IndentOnly`` suboption to ``ReflowComments`` to fix the indentation of multi-line comments
- without touching their contents, renames ``false`` to ``Never``, and ``true`` to ``Always``.
+- Adds ``IndentOnly`` suboption to ``ReflowComments`` to fix the indentation of
+ multi-line comments without touching their contents, renames ``false`` to
+ ``Never``, and ``true`` to ``Always``.
+- Adds ``RemoveEmptyLinesInUnwrappedLines`` option.
libclang
--------
diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index 4f99bf4..0c5ac80 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -2980,7 +2980,7 @@ enum CXTypeKind {
CXType_Atomic = 177,
CXType_BTFTagAttributed = 178,
- // HLSL Types
+ /* HLSL Types */
CXType_HLSLResource = 179,
CXType_HLSLAttributedResource = 180
};
diff --git a/clang/include/clang/Basic/AArch64SVEACLETypes.def b/clang/include/clang/Basic/AArch64SVEACLETypes.def
index 72df1e3..2b80e43 100644
--- a/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -138,6 +138,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x2_t", "svfloat64x2_t", SveFloat64x2, Sv
SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x2_t", "svbfloat16x2_t", SveBFloat16x2, SveBFloat16x2Ty, 8, 16, 2)
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x2_t", "svmfloat8x2_t", SveMFloat8x2, SveMFloat8x2Ty, 16, 8, 2, false)
+
//
// x3
//
@@ -158,6 +160,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x3_t", "svfloat64x3_t", SveFloat64x3, Sv
SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x3_t", "svbfloat16x3_t", SveBFloat16x3, SveBFloat16x3Ty, 8, 16, 3)
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x3_t", "svmfloat8x3_t", SveMFloat8x3, SveMFloat8x3Ty, 16, 8, 3, false)
+
//
// x4
//
@@ -178,6 +182,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, Sv
SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 8, 16, 4)
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x4_t", "svmfloat8x4_t", SveMFloat8x4, SveMFloat8x4Ty, 16, 8, 4, false)
+
SVE_PREDICATE_TYPE_ALL("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16, 1)
SVE_PREDICATE_TYPE_ALL("__clang_svboolx2_t", "svboolx2_t", SveBoolx2, SveBoolx2Ty, 16, 2)
SVE_PREDICATE_TYPE_ALL("__clang_svboolx4_t", "svboolx4_t", SveBoolx4, SveBoolx4Ty, 16, 4)
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 382fb6b..90475a3 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -2714,6 +2714,7 @@ def Abs : IntMathTemplate, LibBuiltin<"stdlib.h"> {
let Attributes = [NoThrow, Const];
let Prototype = "T(T)";
let AddBuiltinPrefixedAlias = 1;
+ let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
}
def Calloc : LibBuiltin<"stdlib.h"> {
diff --git a/clang/include/clang/Basic/StackExhaustionHandler.h b/clang/include/clang/Basic/StackExhaustionHandler.h
new file mode 100644
index 0000000..fb02b95
--- /dev/null
+++ b/clang/include/clang/Basic/StackExhaustionHandler.h
@@ -0,0 +1,45 @@
+//===--- StackExhaustionHandler.h - A utility for warning once when close to out
+// of stack space -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Defines a utilitiy for warning once when close to out of stack space.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_STACK_EXHAUSTION_HANDLER_H
+#define LLVM_CLANG_BASIC_STACK_EXHAUSTION_HANDLER_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+class StackExhaustionHandler {
+public:
+ StackExhaustionHandler(DiagnosticsEngine &diags) : DiagsRef(diags) {}
+
+ /// Run some code with "sufficient" stack space. (Currently, at least 256K
+ /// is guaranteed). Produces a warning if we're low on stack space and
+ /// allocates more in that case. Use this in code that may recurse deeply to
+ /// avoid stack overflow.
+ void runWithSufficientStackSpace(SourceLocation Loc,
+ llvm::function_ref<void()> Fn);
+
+ /// Check to see if we're low on stack space and produce a warning if we're
+ /// low on stack space (Currently, at least 256Kis guaranteed).
+ void warnOnStackNearlyExhausted(SourceLocation Loc);
+
+private:
+ /// Warn that the stack is nearly exhausted.
+ void warnStackExhausted(SourceLocation Loc);
+
+ DiagnosticsEngine &DiagsRef;
+ bool WarnedStackExhausted = false;
+};
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_STACK_EXHAUSTION_HANDLER_H
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 4eb013d..152c43d 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -3454,7 +3454,8 @@ def fno_strict_aliasing : Flag<["-"], "fno-strict-aliasing">, Group<f_Group>,
def fstruct_path_tbaa : Flag<["-"], "fstruct-path-tbaa">, Group<f_Group>;
def fno_struct_path_tbaa : Flag<["-"], "fno-struct-path-tbaa">, Group<f_Group>;
def fno_strict_enums : Flag<["-"], "fno-strict-enums">, Group<f_Group>;
-def fno_strict_overflow : Flag<["-"], "fno-strict-overflow">, Group<f_Group>;
+def fno_strict_overflow : Flag<["-"], "fno-strict-overflow">, Group<f_Group>,
+ Visibility<[ClangOption, FlangOption]>;
def fno_pointer_tbaa : Flag<["-"], "fno-pointer-tbaa">, Group<f_Group>;
def fno_temp_file : Flag<["-"], "fno-temp-file">, Group<f_Group>,
Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>, HelpText<
@@ -3470,7 +3471,8 @@ def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>,
Visibility<[ClangOption, CC1Option]>,
MarshallingInfoNegativeFlag<CodeGenOpts<"AsmVerbose">>;
def fno_working_directory : Flag<["-"], "fno-working-directory">, Group<f_Group>;
-def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>;
+def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>,
+ Visibility<[ClangOption, FlangOption]>;
def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>,
Visibility<[ClangOption, CC1Option]>,
HelpText<"Synthesize retain and release calls for Objective-C pointers">;
@@ -3966,7 +3968,8 @@ defm strict_vtable_pointers : BoolFOption<"strict-vtable-pointers",
"Enable optimizations based on the strict rules for"
" overwriting polymorphic C++ objects">,
NegFlag<SetFalse>>;
-def fstrict_overflow : Flag<["-"], "fstrict-overflow">, Group<f_Group>;
+def fstrict_overflow : Flag<["-"], "fstrict-overflow">, Group<f_Group>,
+ Visibility<[ClangOption, FlangOption]>;
def fpointer_tbaa : Flag<["-"], "fpointer-tbaa">, Group<f_Group>;
def fdriver_only : Flag<["-"], "fdriver-only">, Flags<[NoXarchOption]>,
Visibility<[ClangOption, CLOption, DXCOption]>,
@@ -4235,7 +4238,7 @@ defm virtual_function_elimination : BoolFOption<"virtual-function-elimination",
NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>;
def fwrapv : Flag<["-"], "fwrapv">, Group<f_Group>,
- Visibility<[ClangOption, CC1Option]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Treat signed integer overflow as two's complement">;
def fwritable_strings : Flag<["-"], "fwritable-strings">, Group<f_Group>,
Visibility<[ClangOption, CC1Option]>,
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index a0762b0..debba1c 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -3938,6 +3938,29 @@ struct FormatStyle {
/// \version 14
bool RemoveBracesLLVM;
+ /// Remove empty lines within unwrapped lines.
+ /// \code
+ /// false: true:
+ ///
+ /// int c vs. int c = a + b;
+ ///
+ /// = a + b;
+ ///
+ /// enum : unsigned vs. enum : unsigned {
+ /// AA = 0,
+ /// { BB
+ /// AA = 0, } myEnum;
+ /// BB
+ /// } myEnum;
+ ///
+ /// while ( vs. while (true) {
+ /// }
+ /// true) {
+ /// }
+ /// \endcode
+ /// \version 20
+ bool RemoveEmptyLinesInUnwrappedLines;
+
/// Types of redundant parentheses to remove.
enum RemoveParenthesesStyle : int8_t {
/// Do not remove parentheses.
@@ -5232,6 +5255,8 @@ struct FormatStyle {
RawStringFormats == R.RawStringFormats &&
ReferenceAlignment == R.ReferenceAlignment &&
RemoveBracesLLVM == R.RemoveBracesLLVM &&
+ RemoveEmptyLinesInUnwrappedLines ==
+ R.RemoveEmptyLinesInUnwrappedLines &&
RemoveParentheses == R.RemoveParentheses &&
RemoveSemicolon == R.RemoveSemicolon &&
RequiresClausePosition == R.RequiresClausePosition &&
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 2c5769f..bc9c422 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -49,6 +49,7 @@
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/StackExhaustionHandler.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/TypeTraits.h"
@@ -546,9 +547,6 @@ public:
/// Print out statistics about the semantic analysis.
void PrintStats() const;
- /// Warn that the stack is nearly exhausted.
- void warnStackExhausted(SourceLocation Loc);
-
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
@@ -1183,7 +1181,7 @@ private:
std::optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
bool WarnedDarwinSDKInfoMissing = false;
- bool WarnedStackExhausted = false;
+ StackExhaustionHandler StackHandler;
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index d735e2d..e397dff 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -1149,7 +1149,7 @@ enum PredefinedTypeIDs {
///
/// Type IDs for non-predefined types will start at
/// NUM_PREDEF_TYPE_IDs.
-const unsigned NUM_PREDEF_TYPE_IDS = 506;
+const unsigned NUM_PREDEF_TYPE_IDS = 509;
// Ensure we do not overrun the predefined types we reserved
// in the enum PredefinedTypeIDs above.
diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h
index ee4e897..b476a40 100644
--- a/clang/include/clang/Serialization/ASTReader.h
+++ b/clang/include/clang/Serialization/ASTReader.h
@@ -19,6 +19,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/StackExhaustionHandler.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderSearch.h"
@@ -445,7 +446,7 @@ private:
DiagnosticsEngine &Diags;
// Sema has duplicate logic, but SemaObj can sometimes be null so ASTReader
// has its own version.
- bool WarnedStackExhausted = false;
+ StackExhaustionHandler StackHandler;
/// The semantic analysis object that will be processing the
/// AST files and the translation unit that uses it.
@@ -2180,7 +2181,8 @@ public:
/// Report a diagnostic.
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) const;
- void warnStackExhausted(SourceLocation Loc);
+ void runWithSufficientStackSpace(SourceLocation Loc,
+ llvm::function_ref<void()> Fn);
IdentifierInfo *DecodeIdentifierInfo(serialization::IdentifierID ID);
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 020a2f3..e7a6509 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -362,24 +362,24 @@ namespace clang {
template <typename TemplateParmDeclT>
Error importTemplateParameterDefaultArgument(const TemplateParmDeclT *D,
TemplateParmDeclT *ToD) {
- Error Err = Error::success();
if (D->hasDefaultArgument()) {
if (D->defaultArgumentWasInherited()) {
- auto *ToInheritedFrom = const_cast<TemplateParmDeclT *>(
- importChecked(Err, D->getDefaultArgStorage().getInheritedFrom()));
- if (Err)
- return Err;
+ Expected<TemplateParmDeclT *> ToInheritedFromOrErr =
+ import(D->getDefaultArgStorage().getInheritedFrom());
+ if (!ToInheritedFromOrErr)
+ return ToInheritedFromOrErr.takeError();
+ TemplateParmDeclT *ToInheritedFrom = *ToInheritedFromOrErr;
if (!ToInheritedFrom->hasDefaultArgument()) {
// Resolve possible circular dependency between default value of the
// template argument and the template declaration.
- const auto ToInheritedDefaultArg =
- importChecked(Err, D->getDefaultArgStorage()
- .getInheritedFrom()
- ->getDefaultArgument());
- if (Err)
- return Err;
+ Expected<TemplateArgumentLoc> ToInheritedDefaultArgOrErr =
+ import(D->getDefaultArgStorage()
+ .getInheritedFrom()
+ ->getDefaultArgument());
+ if (!ToInheritedDefaultArgOrErr)
+ return ToInheritedDefaultArgOrErr.takeError();
ToInheritedFrom->setDefaultArgument(Importer.getToContext(),
- ToInheritedDefaultArg);
+ *ToInheritedDefaultArgOrErr);
}
ToD->setInheritedDefaultArgument(ToD->getASTContext(),
ToInheritedFrom);
@@ -395,7 +395,7 @@ namespace clang {
*ToDefaultArgOrErr);
}
}
- return Err;
+ return Error::success();
}
public:
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 8ca63bf..672fa7f 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -4132,10 +4132,16 @@ template <class Emitter>
bool Compiler<Emitter>::visitExpr(const Expr *E, bool DestroyToplevelScope) {
LocalScope<Emitter> RootScope(this);
+ // If we won't destroy the toplevel scope, check for memory leaks first.
+ if (!DestroyToplevelScope) {
+ if (!this->emitCheckAllocations(E))
+ return false;
+ }
+
auto maybeDestroyLocals = [&]() -> bool {
if (DestroyToplevelScope)
- return RootScope.destroyLocals();
- return true;
+ return RootScope.destroyLocals() && this->emitCheckAllocations(E);
+ return this->emitCheckAllocations(E);
};
// Void expressions.
@@ -4171,8 +4177,7 @@ bool Compiler<Emitter>::visitExpr(const Expr *E, bool DestroyToplevelScope) {
return this->emitRetValue(E) && maybeDestroyLocals();
}
- (void)maybeDestroyLocals();
- return false;
+ return maybeDestroyLocals() && this->emitCheckAllocations(E) && false;
}
template <class Emitter>
@@ -4214,7 +4219,8 @@ bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD,
DeclScope<Emitter> LS(this, VD);
if (!this->visit(VD->getAnyInitializer()))
return false;
- return this->emitRet(VarT.value_or(PT_Ptr), VD) && LS.destroyLocals();
+ return this->emitRet(VarT.value_or(PT_Ptr), VD) && LS.destroyLocals() &&
+ this->emitCheckAllocations(VD);
}
LocalScope<Emitter> VDScope(this, VD);
@@ -4260,7 +4266,7 @@ bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD,
return false;
}
- return VDScope.destroyLocals();
+ return VDScope.destroyLocals() && this->emitCheckAllocations(VD);
}
template <class Emitter>
@@ -4535,6 +4541,10 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
return VisitBuiltinCallExpr(E, Builtin::BI__builtin_operator_delete);
}
}
+ // Explicit calls to trivial destructors
+ if (const auto *DD = dyn_cast_if_present<CXXDestructorDecl>(FuncDecl);
+ DD && DD->isTrivial())
+ return true;
QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
std::optional<PrimType> T = classify(ReturnType);
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 9bca813..7088cf0 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -78,8 +78,7 @@ bool Context::evaluate(State &Parent, const Expr *E, APValue &Result,
Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
auto Res = C.interpretExpr(E, /*ConvertResultToRValue=*/false,
- /*DestroyToplevelScope=*/Kind ==
- ConstantExprKind::ClassTemplateArgument);
+ /*DestroyToplevelScope=*/true);
if (Res.isInvalid()) {
C.cleanup();
Stk.clearTo(StackSizeBefore);
diff --git a/clang/lib/AST/ByteCode/EvalEmitter.cpp b/clang/lib/AST/ByteCode/EvalEmitter.cpp
index 7eecee2..65ad960 100644
--- a/clang/lib/AST/ByteCode/EvalEmitter.cpp
+++ b/clang/lib/AST/ByteCode/EvalEmitter.cpp
@@ -132,17 +132,10 @@ bool EvalEmitter::fallthrough(const LabelTy &Label) {
return true;
}
-static bool checkReturnState(InterpState &S) {
- return S.maybeDiagnoseDanglingAllocations();
-}
-
template <PrimType OpType> bool EvalEmitter::emitRet(const SourceInfo &Info) {
if (!isActive())
return true;
- if (!checkReturnState(S))
- return false;
-
using T = typename PrimConv<OpType>::T;
EvalResult.setValue(S.Stk.pop<T>().toAPValue(Ctx.getASTContext()));
return true;
@@ -159,9 +152,6 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr))
return false;
- if (!checkReturnState(S))
- return false;
-
// Implicitly convert lvalue to rvalue, if requested.
if (ConvertResultToRValue) {
if (!Ptr.isZero() && !Ptr.isDereferencable())
@@ -194,16 +184,12 @@ template <> bool EvalEmitter::emitRet<PT_FnPtr>(const SourceInfo &Info) {
if (!isActive())
return true;
- if (!checkReturnState(S))
- return false;
// Function pointers cannot be converted to rvalues.
EvalResult.setFunctionPointer(S.Stk.pop<FunctionPointer>());
return true;
}
bool EvalEmitter::emitRetVoid(const SourceInfo &Info) {
- if (!checkReturnState(S))
- return false;
EvalResult.setValid();
return true;
}
@@ -216,9 +202,6 @@ bool EvalEmitter::emitRetValue(const SourceInfo &Info) {
if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr))
return false;
- if (!checkReturnState(S))
- return false;
-
if (std::optional<APValue> APV =
Ptr.toRValue(S.getASTContext(), EvalResult.getSourceType())) {
EvalResult.setValue(*APV);
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index a1a9256..aafc848 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -1841,6 +1841,7 @@ bool Init(InterpState &S, CodePtr OpPC) {
assert(false);
return false;
}
+ Ptr.activate();
Ptr.initialize();
new (&Ptr.deref<T>()) T(Value);
return true;
@@ -1852,6 +1853,7 @@ bool InitPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckInit(S, OpPC, Ptr))
return false;
+ Ptr.activate();
Ptr.initialize();
new (&Ptr.deref<T>()) T(Value);
return true;
@@ -3005,6 +3007,10 @@ static inline bool IsConstantContext(InterpState &S, CodePtr OpPC) {
return true;
}
+static inline bool CheckAllocations(InterpState &S, CodePtr OpPC) {
+ return S.maybeDiagnoseDanglingAllocations();
+}
+
/// Check if the initializer and storage types of a placement-new expression
/// match.
bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 65c7b4e..d4a8e6c 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -563,6 +563,20 @@ static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *Func,
+ const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = peekToAPSInt(S.Stk, ArgT);
+ if (Val ==
+ APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
+ return false;
+ if (Val.isNegative())
+ Val.negate();
+ pushInteger(S, Val, Call->getType());
+ return true;
+}
+
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const Function *Func,
@@ -1808,6 +1822,13 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
return false;
break;
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs:
+ if (!interp__builtin_abs(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll:
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index 4fa9b6d..a1970f2 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -836,3 +836,4 @@ def CheckNewTypeMismatchArray : Opcode {
}
def IsConstantContext: Opcode;
+def CheckAllocations : Opcode;
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index f083fff..8321cee 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -2512,7 +2512,8 @@ bool VarDecl::isUsableInConstantExpressions(const ASTContext &Context) const {
if (!DefVD->mightBeUsableInConstantExpressions(Context))
return false;
// ... and its initializer is a constant initializer.
- if (Context.getLangOpts().CPlusPlus && !DefVD->hasConstantInitialization())
+ if ((Context.getLangOpts().CPlusPlus || getLangOpts().C23) &&
+ !DefVD->hasConstantInitialization())
return false;
// C++98 [expr.const]p1:
// An integral constant-expression can involve only [...] const variables
@@ -2619,8 +2620,11 @@ bool VarDecl::hasICEInitializer(const ASTContext &Context) const {
}
bool VarDecl::hasConstantInitialization() const {
- // In C, all globals (and only globals) have constant initialization.
- if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus)
+ // In C, all globals and constexpr variables should have constant
+ // initialization. For constexpr variables in C check that initializer is a
+ // constant initializer because they can be used in constant expressions.
+ if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus &&
+ !isConstexpr())
return true;
// In C++, it depends on whether the evaluation at the point of definition
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index 9ecbf12..66db626 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -1989,7 +1989,7 @@ Expr *CastExpr::getSubExprAsWritten() {
SubExpr = IgnoreExprNodes(cast<CXXConstructExpr>(SubExpr)->getArg(0),
ignoreImplicitSemaNodes);
} else if (E->getCastKind() == CK_UserDefinedConversion) {
- assert((isa<CXXMemberCallExpr>(SubExpr) || isa<BlockExpr>(SubExpr)) &&
+ assert((isa<CallExpr, BlockExpr>(SubExpr)) &&
"Unexpected SubExpr for CK_UserDefinedConversion.");
if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SubExpr))
SubExpr = MCE->getImplicitObjectArgument();
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 8544052..8e36cad 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -13098,6 +13098,20 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Val.popcount() % 2, E);
}
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+ if (Val == APSInt(APInt::getSignedMinValue(Val.getBitWidth()),
+ /*IsUnsigned=*/false))
+ return false;
+ if (Val.isNegative())
+ Val.negate();
+ return Success(Val, E);
+ }
+
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll:
diff --git a/clang/lib/Basic/CMakeLists.txt b/clang/lib/Basic/CMakeLists.txt
index e7ebc8f..e11e1ac 100644
--- a/clang/lib/Basic/CMakeLists.txt
+++ b/clang/lib/Basic/CMakeLists.txt
@@ -89,6 +89,7 @@ add_clang_library(clangBasic
SourceManager.cpp
SourceMgrAdapter.cpp
Stack.cpp
+ StackExhaustionHandler.cpp
TargetID.cpp
TargetInfo.cpp
Targets.cpp
diff --git a/clang/lib/Basic/StackExhaustionHandler.cpp b/clang/lib/Basic/StackExhaustionHandler.cpp
new file mode 100644
index 0000000..24b499c
--- /dev/null
+++ b/clang/lib/Basic/StackExhaustionHandler.cpp
@@ -0,0 +1,35 @@
+//===--- StackExhaustionHandler.cpp - - A utility for warning once when close
+// to out of stack space -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Defines a utilitiy for warning once when close to out of stack space.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/StackExhaustionHandler.h"
+#include "clang/Basic/Stack.h"
+
+void clang::StackExhaustionHandler::runWithSufficientStackSpace(
+ SourceLocation Loc, llvm::function_ref<void()> Fn) {
+ clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
+}
+
+void clang::StackExhaustionHandler::warnOnStackNearlyExhausted(
+ SourceLocation Loc) {
+ if (isStackNearlyExhausted())
+ warnStackExhausted(Loc);
+}
+
+void clang::StackExhaustionHandler::warnStackExhausted(SourceLocation Loc) {
+ // Only warn about this once.
+ if (!WarnedStackExhausted) {
+ DiagsRef.Report(Loc, diag::warn_stack_exhausted);
+ WarnedStackExhausted = true;
+ }
+}
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 870f0f3..eaaba76 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -100,6 +100,14 @@ bool RISCVTargetInfo::validateAsmConstraint(
case 'S': // A symbol or label reference with a constant offset
Info.setAllowsRegister();
return true;
+ case 'c':
+ // A RVC register - GPR or FPR
+ if (Name[1] == 'r' || Name[1] == 'f') {
+ Info.setAllowsRegister();
+ Name += 1;
+ return true;
+ }
+ return false;
case 'v':
// A vector register.
if (Name[1] == 'r' || Name[1] == 'd' || Name[1] == 'm') {
@@ -114,6 +122,8 @@ bool RISCVTargetInfo::validateAsmConstraint(
std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
std::string R;
switch (*Constraint) {
+ // c* and v* are two-letter constraints on RISC-V.
+ case 'c':
case 'v':
R = std::string("^") + std::string(Constraint, 2);
Constraint += 1;
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a048a56..28f28c7 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -5636,10 +5636,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
+ Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
return RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, BCast, PacketSize, PacketAlign}));
+ {Arg0, ACast, PacketSize, PacketAlign}));
} else {
assert(4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function");
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 9a84a11..24655b8 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -342,7 +342,7 @@ CodeGenModule::CodeGenModule(ASTContext &C,
: Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
- VMContext(M.getContext()), VTables(*this),
+ VMContext(M.getContext()), VTables(*this), StackHandler(diags),
SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
@@ -1595,17 +1595,9 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
-void CodeGenModule::warnStackExhausted(SourceLocation Loc) {
- // Only warn about this once.
- if (!WarnedStackExhausted) {
- getDiags().Report(Loc, diag::warn_stack_exhausted);
- WarnedStackExhausted = true;
- }
-}
-
void CodeGenModule::runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn) {
- clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
+ StackHandler.runWithSufficientStackSpace(Loc, Fn);
}
llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index fa82a81..1b77490 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -26,6 +26,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/ProfileList.h"
+#include "clang/Basic/StackExhaustionHandler.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
#include "clang/Lex/PreprocessorOptions.h"
@@ -336,7 +337,7 @@ private:
std::unique_ptr<llvm::IndexedInstrProfReader> PGOReader;
InstrProfStats PGOStats;
std::unique_ptr<llvm::SanitizerStatReport> SanStats;
- bool WarnedStackExhausted = false;
+ StackExhaustionHandler StackHandler;
// A set of references that have only been seen via a weakref so far. This is
// used to remove the weak of the reference if we ever see a direct reference
@@ -1298,9 +1299,6 @@ public:
/// Print out an error that codegen doesn't support the specified decl yet.
void ErrorUnsupported(const Decl *D, const char *Type);
- /// Warn that the stack is nearly exhausted.
- void warnStackExhausted(SourceLocation Loc);
-
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply to avoid stack
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 6c2a6f9..89f9457 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -3437,7 +3437,7 @@ class ItaniumRTTIBuilder {
llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
/// BuildVTablePointer - Build the vtable pointer for the given type.
- void BuildVTablePointer(const Type *Ty);
+ void BuildVTablePointer(const Type *Ty, llvm::Constant *StorageAddress);
/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
/// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
@@ -3834,7 +3834,8 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
return true;
}
-void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
+void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
+ llvm::Constant *StorageAddress) {
// abi::__class_type_info.
static const char * const ClassTypeInfo =
"_ZTVN10__cxxabiv117__class_type_infoE";
@@ -3981,9 +3982,12 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
VTable, Two);
}
- if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
- VTable = CGM.getConstantSignedPointer(VTable, Schema, nullptr, GlobalDecl(),
- QualType(Ty, 0));
+ if (const auto &Schema =
+ CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
+ VTable = CGM.getConstantSignedPointer(
+ VTable, Schema,
+ Schema.isAddressDiscriminated() ? StorageAddress : nullptr,
+ GlobalDecl(), QualType(Ty, 0));
Fields.push_back(VTable);
}
@@ -4099,8 +4103,18 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm::GlobalVariable::LinkageTypes Linkage,
llvm::GlobalValue::VisibilityTypes Visibility,
llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ llvm::Module &M = CGM.getModule();
+ llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
+ // int8 is an arbitrary type to be replaced later with replaceInitializer.
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(M, CGM.Int8Ty, /*isConstant=*/true, Linkage,
+ /*Initializer=*/nullptr, Name);
+
// Add the vtable pointer.
- BuildVTablePointer(cast<Type>(Ty));
+ BuildVTablePointer(cast<Type>(Ty), GV);
// And the name.
llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
@@ -4218,16 +4232,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm_unreachable("HLSL doesn't support RTTI");
}
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
-
- SmallString<256> Name;
- llvm::raw_svector_ostream Out(Name);
- CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
- llvm::Module &M = CGM.getModule();
- llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(M, Init->getType(),
- /*isConstant=*/true, Linkage, Init, Name);
+ GV->replaceInitializer(llvm::ConstantStruct::getAnon(Fields));
// Export the typeinfo in the same circumstances as the vtable is exported.
auto GVDLLStorageClass = DLLStorageClass;
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 3fc3929..d032fd7a 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -3595,7 +3595,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
StringRef Value = A->getValue();
if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64() &&
!EffectiveTriple.isARM() && !EffectiveTriple.isThumb() &&
- !EffectiveTriple.isRISCV())
+ !EffectiveTriple.isRISCV() && !EffectiveTriple.isPPC())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
if ((EffectiveTriple.isX86() || EffectiveTriple.isARM() ||
@@ -3635,7 +3635,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
<< A->getOption().getName() << Value << "sysreg global";
return;
}
- if (EffectiveTriple.isRISCV()) {
+ if (EffectiveTriple.isRISCV() || EffectiveTriple.isPPC()) {
if (Value != "tls" && Value != "global") {
D.Diag(diag::err_drv_invalid_value_with_suggestion)
<< A->getOption().getName() << Value << "tls global";
@@ -3656,7 +3656,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
StringRef Value = A->getValue();
if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64() &&
!EffectiveTriple.isARM() && !EffectiveTriple.isThumb() &&
- !EffectiveTriple.isRISCV())
+ !EffectiveTriple.isRISCV() && !EffectiveTriple.isPPC())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
int Offset;
@@ -3676,7 +3676,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_reg_EQ)) {
StringRef Value = A->getValue();
if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64() &&
- !EffectiveTriple.isRISCV())
+ !EffectiveTriple.isRISCV() && !EffectiveTriple.isPPC())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
if (EffectiveTriple.isX86() && (Value != "fs" && Value != "gs")) {
@@ -3693,6 +3693,16 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
<< A->getOption().getName() << Value << "tp";
return;
}
+ if (EffectiveTriple.isPPC64() && Value != "r13") {
+ D.Diag(diag::err_drv_invalid_value_with_suggestion)
+ << A->getOption().getName() << Value << "r13";
+ return;
+ }
+ if (EffectiveTriple.isPPC32() && Value != "r2") {
+ D.Diag(diag::err_drv_invalid_value_with_suggestion)
+ << A->getOption().getName() << Value << "r2";
+ return;
+ }
A->render(Args, CmdArgs);
}
@@ -6914,16 +6924,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_ftrap_function_EQ);
- // -fno-strict-overflow implies -fwrapv if it isn't disabled, but
- // -fstrict-overflow won't turn off an explicitly enabled -fwrapv.
- if (Arg *A = Args.getLastArg(options::OPT_fwrapv, options::OPT_fno_wrapv)) {
- if (A->getOption().matches(options::OPT_fwrapv))
- CmdArgs.push_back("-fwrapv");
- } else if (Arg *A = Args.getLastArg(options::OPT_fstrict_overflow,
- options::OPT_fno_strict_overflow)) {
- if (A->getOption().matches(options::OPT_fno_strict_overflow))
- CmdArgs.push_back("-fwrapv");
- }
+ // Handle -f[no-]wrapv and -f[no-]strict-overflow, which are used by both
+ // clang and flang.
+ renderCommonIntegerOverflowOptions(Args, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_ffinite_loops,
options::OPT_fno_finite_loops);
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index e662c3f..91605a6 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -3048,3 +3048,17 @@ bool tools::shouldRecordCommandLine(const ToolChain &TC,
return FRecordCommandLine || TC.UseDwarfDebugFlags() || GRecordCommandLine;
}
+
+void tools::renderCommonIntegerOverflowOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // -fno-strict-overflow implies -fwrapv if it isn't disabled, but
+ // -fstrict-overflow won't turn off an explicitly enabled -fwrapv.
+ if (Arg *A = Args.getLastArg(options::OPT_fwrapv, options::OPT_fno_wrapv)) {
+ if (A->getOption().matches(options::OPT_fwrapv))
+ CmdArgs.push_back("-fwrapv");
+ } else if (Arg *A = Args.getLastArg(options::OPT_fstrict_overflow,
+ options::OPT_fno_strict_overflow)) {
+ if (A->getOption().matches(options::OPT_fno_strict_overflow))
+ CmdArgs.push_back("-fwrapv");
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 9cafac2..b6ddd99 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -262,6 +262,9 @@ bool shouldRecordCommandLine(const ToolChain &TC,
bool &FRecordCommandLine,
bool &GRecordCommandLine);
+void renderCommonIntegerOverflowOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index e2f8f6e..a9d2b7a 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -869,6 +869,8 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ renderCommonIntegerOverflowOptions(Args, CmdArgs);
+
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 1482707..c612960 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -1104,6 +1104,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
IO.mapOptional("RemoveBracesLLVM", Style.RemoveBracesLLVM);
+ IO.mapOptional("RemoveEmptyLinesInUnwrappedLines",
+ Style.RemoveEmptyLinesInUnwrappedLines);
IO.mapOptional("RemoveParentheses", Style.RemoveParentheses);
IO.mapOptional("RemoveSemicolon", Style.RemoveSemicolon);
IO.mapOptional("RequiresClausePosition", Style.RequiresClausePosition);
@@ -1582,6 +1584,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
LLVMStyle.ReflowComments = FormatStyle::RCS_Always;
LLVMStyle.RemoveBracesLLVM = false;
+ LLVMStyle.RemoveEmptyLinesInUnwrappedLines = false;
LLVMStyle.RemoveParentheses = FormatStyle::RPS_Leave;
LLVMStyle.RemoveSemicolon = false;
LLVMStyle.RequiresClausePosition = FormatStyle::RCPS_OwnLine;
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index fcefaa7..13037b6 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -5509,8 +5509,10 @@ static bool isAllmanLambdaBrace(const FormatToken &Tok) {
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
- if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
+ if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0 &&
+ (!Style.RemoveEmptyLinesInUnwrappedLines || &Right == Line.First)) {
return true;
+ }
if (Style.BreakFunctionDefinitionParameters && Line.MightBeFunctionDecl &&
Line.mightBeFunctionDefinition() && Left.MightBeFunctionDeclParen &&
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index c9625c3..bda9850 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -2504,6 +2504,11 @@ bool UnwrappedLineParser::parseBracedList(bool IsAngleBracket, bool IsEnum) {
// Assume there are no blocks inside a braced init list apart
// from the ones we explicitly parse out (like lambdas).
FormatTok->setBlockKind(BK_BracedInit);
+ if (!IsAngleBracket) {
+ auto *Prev = FormatTok->Previous;
+ if (Prev && Prev->is(tok::greater))
+ Prev->setFinalizedType(TT_TemplateCloser);
+ }
nextToken();
parseBracedList();
break;
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index f0d1634..5e9886a 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -220,7 +220,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
- CurScope(nullptr), Ident_super(nullptr),
+ StackHandler(Diags), CurScope(nullptr), Ident_super(nullptr),
AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)),
ARMPtr(std::make_unique<SemaARM>(*this)),
AVRPtr(std::make_unique<SemaAVR>(*this)),
@@ -562,17 +562,9 @@ Sema::~Sema() {
SemaPPCallbackHandler->reset();
}
-void Sema::warnStackExhausted(SourceLocation Loc) {
- // Only warn about this once.
- if (!WarnedStackExhausted) {
- Diag(Loc, diag::warn_stack_exhausted);
- WarnedStackExhausted = true;
- }
-}
-
void Sema::runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn) {
- clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
+ StackHandler.runWithSufficientStackSpace(Loc, Fn);
}
bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
diff --git a/clang/lib/Sema/SemaFunctionEffects.cpp b/clang/lib/Sema/SemaFunctionEffects.cpp
index 70e5d78..3fa326d 100644
--- a/clang/lib/Sema/SemaFunctionEffects.cpp
+++ b/clang/lib/Sema/SemaFunctionEffects.cpp
@@ -1540,6 +1540,7 @@ bool Sema::FunctionEffectDiff::shouldDiagnoseConversion(
// matching is better.
return true;
}
+ break;
case FunctionEffect::Kind::Blocking:
case FunctionEffect::Kind::Allocating:
return false;
@@ -1563,6 +1564,7 @@ bool Sema::FunctionEffectDiff::shouldDiagnoseRedeclaration(
// All these forms of mismatches are diagnosed.
return true;
}
+ break;
case FunctionEffect::Kind::Blocking:
case FunctionEffect::Kind::Allocating:
return false;
@@ -1592,7 +1594,7 @@ Sema::FunctionEffectDiff::shouldDiagnoseMethodOverride(
case Kind::ConditionMismatch:
return OverrideResult::Warn;
}
-
+ break;
case FunctionEffect::Kind::Blocking:
case FunctionEffect::Kind::Allocating:
return OverrideResult::NoAction;
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 1d18a63..c6627b0 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -102,8 +102,10 @@ static ResourceClass getResourceClass(RegisterType RT) {
return ResourceClass::Sampler;
case RegisterType::C:
case RegisterType::I:
- llvm_unreachable("unexpected RegisterType value");
+ // Deliberately falling through to the unreachable below.
+ break;
}
+ llvm_unreachable("unexpected RegisterType value");
}
DeclBindingInfo *ResourceBindings::addDeclBindingInfo(const VarDecl *VD,
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 3da4b51..e63d605 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -50,7 +50,7 @@ struct RVVIntrinsicDef {
struct RVVOverloadIntrinsicDef {
// Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
- SmallVector<uint16_t, 8> Indexes;
+ SmallVector<uint32_t, 8> Indexes;
};
} // namespace
@@ -169,7 +169,7 @@ private:
// List of all RVV intrinsic.
std::vector<RVVIntrinsicDef> IntrinsicList;
// Mapping function name to index of IntrinsicList.
- StringMap<uint16_t> Intrinsics;
+ StringMap<uint32_t> Intrinsics;
// Mapping function name to RVVOverloadIntrinsicDef.
StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
@@ -399,7 +399,7 @@ void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
Record.HasFRMRoundModeOp);
// Put into IntrinsicList.
- uint16_t Index = IntrinsicList.size();
+ uint32_t Index = IntrinsicList.size();
assert(IntrinsicList.size() == (size_t)Index &&
"Intrinsics indices overflow.");
IntrinsicList.push_back({BuiltinName, Signature});
@@ -623,7 +623,12 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
TheCall->getType()->castAs<BuiltinType>());
- if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
+ const FunctionDecl *FD = SemaRef.getCurFunctionDecl();
+ llvm::StringMap<bool> FunctionFeatureMap;
+ Context.getFunctionFeatureMap(FunctionFeatureMap, FD);
+
+ if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v") &&
+ !FunctionFeatureMap.lookup("v"))
return Diag(TheCall->getBeginLoc(),
diag::err_riscv_builtin_requires_extension)
<< /* IsExtension */ true << TheCall->getSourceRange() << "v";
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 8665c09..457a996 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -806,8 +806,7 @@ void Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) {
// Check to see if we're low on stack space. We can't do anything about this
// from here, but we can at least warn the user.
- if (isStackNearlyExhausted())
- warnStackExhausted(Ctx.PointOfInstantiation);
+ StackHandler.warnOnStackNearlyExhausted(Ctx.PointOfInstantiation);
}
void Sema::popCodeSynthesisContext() {
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index 1b2473f..1cf6c93 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -64,6 +64,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/TokenKinds.h"
@@ -9648,18 +9649,15 @@ DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) const {
return Diags.Report(Loc, DiagID);
}
-void ASTReader::warnStackExhausted(SourceLocation Loc) {
+void ASTReader::runWithSufficientStackSpace(SourceLocation Loc,
+ llvm::function_ref<void()> Fn) {
// When Sema is available, avoid duplicate errors.
if (SemaObj) {
- SemaObj->warnStackExhausted(Loc);
+ SemaObj->runWithSufficientStackSpace(Loc, Fn);
return;
}
- if (WarnedStackExhausted)
- return;
- WarnedStackExhausted = true;
-
- Diag(Loc, diag::warn_stack_exhausted);
+ StackHandler.runWithSufficientStackSpace(Loc, Fn);
}
/// Retrieve the identifier table associated with the
@@ -10509,13 +10507,14 @@ ASTReader::ASTReader(Preprocessor &PP, InMemoryModuleCache &ModuleCache,
bool AllowConfigurationMismatch, bool ValidateSystemInputs,
bool ValidateASTInputFilesContent, bool UseGlobalIndex,
std::unique_ptr<llvm::Timer> ReadTimer)
- : Listener(bool(DisableValidationKind &DisableValidationForModuleKind::PCH)
+ : Listener(bool(DisableValidationKind & DisableValidationForModuleKind::PCH)
? cast<ASTReaderListener>(new SimpleASTReaderListener(PP))
: cast<ASTReaderListener>(new PCHValidator(PP, *this))),
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
- PCHContainerRdr(PCHContainerRdr), Diags(PP.getDiagnostics()), PP(PP),
- ContextObj(Context), ModuleMgr(PP.getFileManager(), ModuleCache,
- PCHContainerRdr, PP.getHeaderSearchInfo()),
+ PCHContainerRdr(PCHContainerRdr), Diags(PP.getDiagnostics()),
+ StackHandler(Diags), PP(PP), ContextObj(Context),
+ ModuleMgr(PP.getFileManager(), ModuleCache, PCHContainerRdr,
+ PP.getHeaderSearchInfo()),
DummyIdResolver(PP), ReadTimer(std::move(ReadTimer)), isysroot(isysroot),
DisableValidationKind(DisableValidationKind),
AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index 1ccc810..d4e392d 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -4168,8 +4168,7 @@ Decl *ASTReader::ReadDeclRecord(GlobalDeclID ID) {
D->setDeclContext(Context.getTranslationUnitDecl());
// Reading some declarations can result in deep recursion.
- clang::runWithSufficientStackSpace([&] { warnStackExhausted(DeclLoc); },
- [&] { Reader.Visit(D); });
+ runWithSufficientStackSpace(DeclLoc, [&] { Reader.Visit(D); });
// If this declaration is also a declaration context, get the
// offsets for its tables of lexical and visible declarations.
diff --git a/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp
index 339927c..17f12141 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp
@@ -177,7 +177,8 @@ BugReportPtr BitwiseShiftValidator::checkOvershift() {
RightOpStr = formatv(" '{0}'", ConcreteRight->getValue());
else {
SValBuilder &SVB = Ctx.getSValBuilder();
- if (const llvm::APSInt *MinRight = SVB.getMinValue(FoldedState, Right)) {
+ if (const llvm::APSInt *MinRight = SVB.getMinValue(FoldedState, Right);
+ MinRight && *MinRight >= LHSBitWidth) {
LowerBoundStr = formatv(" >= {0},", MinRight->getExtValue());
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 70d5a60..c39fa81 100644
--- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -1249,6 +1249,8 @@ public:
// calculate the effective range set by intersecting the range set
// for A - B and the negated range set of B - A.
getRangeForNegatedSymSym(SSE),
+ // If commutative, we may have constaints for the commuted variant.
+ getRangeCommutativeSymSym(SSE),
// If Sym is a comparison expression (except <=>),
// find any other comparisons with the same operands.
// See function description.
@@ -1485,6 +1487,21 @@ private:
Sym->getType());
}
+ std::optional<RangeSet> getRangeCommutativeSymSym(const SymSymExpr *SSE) {
+ auto Op = SSE->getOpcode();
+ bool IsCommutative = llvm::is_contained(
+ // ==, !=, |, &, +, *, ^
+ {BO_EQ, BO_NE, BO_Or, BO_And, BO_Add, BO_Mul, BO_Xor}, Op);
+ if (!IsCommutative)
+ return std::nullopt;
+
+ SymbolRef Commuted = State->getSymbolManager().getSymSymExpr(
+ SSE->getRHS(), Op, SSE->getLHS(), SSE->getType());
+ if (const RangeSet *Range = getConstraint(State, Commuted))
+ return *Range;
+ return std::nullopt;
+ }
+
// Returns ranges only for binary comparison operators (except <=>)
// when left and right operands are symbolic values.
// Finds any other comparisons with the same operands.
@@ -1936,30 +1953,27 @@ public:
const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
private:
- RangeSet::Factory F;
+ mutable RangeSet::Factory F;
- RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
- RangeSet getRange(ProgramStateRef State, EquivalenceClass Class);
+ RangeSet getRange(ProgramStateRef State, SymbolRef Sym) const;
ProgramStateRef setRange(ProgramStateRef State, SymbolRef Sym,
RangeSet Range);
- ProgramStateRef setRange(ProgramStateRef State, EquivalenceClass Class,
- RangeSet Range);
RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
+ const llvm::APSInt &Adjustment) const;
RangeSet getSymGTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
+ const llvm::APSInt &Adjustment) const;
RangeSet getSymLERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
+ const llvm::APSInt &Adjustment) const;
RangeSet getSymLERange(llvm::function_ref<RangeSet()> RS,
const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
+ const llvm::APSInt &Adjustment) const;
RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
+ const llvm::APSInt &Adjustment) const;
};
//===----------------------------------------------------------------------===//
@@ -2866,24 +2880,19 @@ ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
const llvm::APSInt *RangeConstraintManager::getSymVal(ProgramStateRef St,
SymbolRef Sym) const {
- const RangeSet *T = getConstraint(St, Sym);
- return T ? T->getConcreteValue() : nullptr;
+ return getRange(St, Sym).getConcreteValue();
}
const llvm::APSInt *RangeConstraintManager::getSymMinVal(ProgramStateRef St,
SymbolRef Sym) const {
- const RangeSet *T = getConstraint(St, Sym);
- if (!T || T->isEmpty())
- return nullptr;
- return &T->getMinValue();
+ RangeSet Range = getRange(St, Sym);
+ return Range.isEmpty() ? nullptr : &Range.getMinValue();
}
const llvm::APSInt *RangeConstraintManager::getSymMaxVal(ProgramStateRef St,
SymbolRef Sym) const {
- const RangeSet *T = getConstraint(St, Sym);
- if (!T || T->isEmpty())
- return nullptr;
- return &T->getMaxValue();
+ RangeSet Range = getRange(St, Sym);
+ return Range.isEmpty() ? nullptr : &Range.getMaxValue();
}
//===----------------------------------------------------------------------===//
@@ -3027,7 +3036,7 @@ RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
}
RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
- SymbolRef Sym) {
+ SymbolRef Sym) const {
return SymbolicRangeInferrer::inferRange(F, State, Sym);
}
@@ -3082,10 +3091,10 @@ RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
return setRange(St, Sym, New);
}
-RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
- SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet
+RangeConstraintManager::getSymLTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) const {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -3119,10 +3128,10 @@ RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
return setRange(St, Sym, New);
}
-RangeSet RangeConstraintManager::getSymGTRange(ProgramStateRef St,
- SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet
+RangeConstraintManager::getSymGTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) const {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -3156,10 +3165,10 @@ RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
return setRange(St, Sym, New);
}
-RangeSet RangeConstraintManager::getSymGERange(ProgramStateRef St,
- SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet
+RangeConstraintManager::getSymGERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) const {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -3196,7 +3205,7 @@ RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
RangeSet
RangeConstraintManager::getSymLERange(llvm::function_ref<RangeSet()> RS,
const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+ const llvm::APSInt &Adjustment) const {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -3222,10 +3231,10 @@ RangeConstraintManager::getSymLERange(llvm::function_ref<RangeSet()> RS,
return F.intersect(Default, Lower, Upper);
}
-RangeSet RangeConstraintManager::getSymLERange(ProgramStateRef St,
- SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet
+RangeConstraintManager::getSymLERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) const {
return getSymLERange([&] { return getRange(St, Sym); }, Int, Adjustment);
}
diff --git a/clang/test/AST/ByteCode/builtin-functions.cpp b/clang/test/AST/ByteCode/builtin-functions.cpp
index 450ff56..b5d3341 100644
--- a/clang/test/AST/ByteCode/builtin-functions.cpp
+++ b/clang/test/AST/ByteCode/builtin-functions.cpp
@@ -265,6 +265,20 @@ namespace fpclassify {
char classify_subnorm [__builtin_fpclassify(-1, -1, -1, +1, -1, 1.0e-38f)];
}
+namespace abs {
+ static_assert(__builtin_abs(14) == 14, "");
+ static_assert(__builtin_labs(14L) == 14L, "");
+ static_assert(__builtin_llabs(14LL) == 14LL, "");
+ static_assert(__builtin_abs(-14) == 14, "");
+ static_assert(__builtin_labs(-0x14L) == 0x14L, "");
+ static_assert(__builtin_llabs(-0x141414141414LL) == 0x141414141414LL, "");
+#define BITSIZE(x) (sizeof(x) * 8)
+ constexpr int abs4 = __builtin_abs(1 << (BITSIZE(int) - 1)); // both-error {{must be initialized by a constant expression}}
+ constexpr long abs6 = __builtin_labs(1L << (BITSIZE(long) - 1)); // both-error {{must be initialized by a constant expression}}
+ constexpr long long abs8 = __builtin_llabs(1LL << (BITSIZE(long long) - 1)); // both-error {{must be initialized by a constant expression}}
+#undef BITSIZE
+} // namespace abs
+
namespace fabs {
static_assert(__builtin_fabs(-14.0) == 14.0, "");
}
diff --git a/clang/test/AST/ByteCode/new-delete.cpp b/clang/test/AST/ByteCode/new-delete.cpp
index 8bcbed1..94fe2d4 100644
--- a/clang/test/AST/ByteCode/new-delete.cpp
+++ b/clang/test/AST/ByteCode/new-delete.cpp
@@ -796,6 +796,28 @@ static_assert(virt_delete(false)); // both-error {{not an integral constant expr
// both-note {{in call to}}
+namespace ToplevelScopeInTemplateArg {
+ class string {
+ public:
+ char *mem;
+ constexpr string() {
+ this->mem = new char(1);
+ }
+ constexpr ~string() {
+ delete this->mem;
+ }
+ constexpr unsigned size() const { return 4; }
+ };
+
+
+ template <unsigned N>
+ void test() {};
+
+ void f() {
+ test<string().size()>();
+ static_assert(string().size() == 4);
+ }
+}
#else
/// Make sure we reject this prior to C++20
diff --git a/clang/test/AST/ByteCode/placement-new.cpp b/clang/test/AST/ByteCode/placement-new.cpp
index 6bd83f2..5673b5c 100644
--- a/clang/test/AST/ByteCode/placement-new.cpp
+++ b/clang/test/AST/ByteCode/placement-new.cpp
@@ -300,3 +300,27 @@ namespace UsedToCrash {
}
int alloc1 = (alloc(), 0);
}
+
+constexpr bool change_union_member() {
+ union U {
+ int a;
+ int b;
+ };
+ U u = {.a = 1};
+ std::construct_at<int>(&u.b, 2);
+ return u.b == 2;
+}
+static_assert(change_union_member());
+
+namespace PR48606 {
+ struct A { mutable int n = 0; };
+
+ constexpr bool f() {
+ A a;
+ A *p = &a;
+ p->~A();
+ std::construct_at<A>(p);
+ return true;
+ }
+ static_assert(f());
+}
diff --git a/clang/test/Analysis/infeasible-sink.c b/clang/test/Analysis/infeasible-sink.c
index 9cb66fc..a88ca42 100644
--- a/clang/test/Analysis/infeasible-sink.c
+++ b/clang/test/Analysis/infeasible-sink.c
@@ -38,7 +38,7 @@ void test1(int x) {
}
int a, b, c, d, e;
-void test2() {
+void test2(void) {
if (a == 0)
return;
@@ -50,31 +50,10 @@ void test2() {
b = d;
a -= d;
- if (a != 0)
- return;
-
- clang_analyzer_warnIfReached(); // expected-warning{{REACHABLE}}
+ clang_analyzer_warnIfReached(); // expected-warning {{REACHABLE}}
- /* The BASELINE passes these checks ('wrning' is used to avoid lit to match)
- // The parent state is already infeasible, look at this contradiction:
- clang_analyzer_eval(b > 0); // expected-wrning{{FALSE}}
- clang_analyzer_eval(b <= 0); // expected-wrning{{FALSE}}
- // Crashes with expensive checks.
- if (b > 0) {
- clang_analyzer_warnIfReached(); // no-warning, OK
+ if (a != 0)
return;
- }
- // Should not be reachable.
- clang_analyzer_warnIfReached(); // expected-wrning{{REACHABLE}}
- */
- // The parent state is already infeasible, but we realize that only if b is
- // constrained.
- clang_analyzer_eval(b > 0); // expected-warning{{UNKNOWN}}
- clang_analyzer_eval(b <= 0); // expected-warning{{UNKNOWN}}
- if (b > 0) {
- clang_analyzer_warnIfReached(); // no-warning
- return;
- }
- clang_analyzer_warnIfReached(); // no-warning
+ clang_analyzer_warnIfReached(); // no-warning: Unreachable due to contradiction.
}
diff --git a/clang/test/Analysis/unary-sym-expr.c b/clang/test/Analysis/unary-sym-expr.c
index 7c4774f..92e11b2 100644
--- a/clang/test/Analysis/unary-sym-expr.c
+++ b/clang/test/Analysis/unary-sym-expr.c
@@ -29,12 +29,39 @@ int test(int x, int y) {
return 42;
}
-void test_svalbuilder_simplification(int x, int y) {
+void test_svalbuilder_simplification_add(int x, int y) {
if (x + y != 3)
return;
clang_analyzer_eval(-(x + y) == -3); // expected-warning{{TRUE}}
- // FIXME Commutativity is not supported yet.
- clang_analyzer_eval(-(y + x) == -3); // expected-warning{{UNKNOWN}}
+ clang_analyzer_eval(-(y + x) == -3); // expected-warning{{TRUE}}
+}
+
+void test_svalbuilder_simplification_mul(int x, int y) {
+ if (x * y != 3)
+ return;
+ clang_analyzer_eval(-(x * y) == -3); // expected-warning{{TRUE}}
+ clang_analyzer_eval(-(y * x) == -3); // expected-warning{{TRUE}}
+}
+
+void test_svalbuilder_simplification_and(int x, int y) {
+ if ((x & y) != 3)
+ return;
+ clang_analyzer_eval(-(x & y) == -3); // expected-warning{{TRUE}}
+ clang_analyzer_eval(-(y & x) == -3); // expected-warning{{TRUE}}
+}
+
+void test_svalbuilder_simplification_or(int x, int y) {
+ if ((x | y) != 3)
+ return;
+ clang_analyzer_eval(-(x | y) == -3); // expected-warning{{TRUE}}
+ clang_analyzer_eval(-(y | x) == -3); // expected-warning{{TRUE}}
+}
+
+void test_svalbuilder_simplification_xor(int x, int y) {
+ if ((x ^ y) != 3)
+ return;
+ clang_analyzer_eval(-(x ^ y) == -3); // expected-warning{{TRUE}}
+ clang_analyzer_eval(-(y ^ x) == -3); // expected-warning{{TRUE}}
}
int test_fp(int flag) {
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm.c b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
index fa0bf6a..75b91d3 100644
--- a/clang/test/CodeGen/RISCV/riscv-inline-asm.c
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
@@ -3,7 +3,35 @@
// RUN: %clang_cc1 -triple riscv64 -O2 -emit-llvm %s -o - \
// RUN: | FileCheck %s
-// Test RISC-V specific inline assembly constraints.
+// Test RISC-V specific inline assembly constraints and modifiers.
+
+long test_r(long x) {
+// CHECK-LABEL: define{{.*}} {{i64|i32}} @test_r(
+// CHECK: call {{i64|i32}} asm sideeffect "", "=r,r"({{i64|i32}} %{{.*}})
+ long ret;
+ asm volatile ("" : "=r"(ret) : "r"(x));
+// CHECK: call {{i64|i32}} asm sideeffect "", "=r,r"({{i64|i32}} %{{.*}})
+ asm volatile ("" : "=r"(ret) : "r"(x));
+ return ret;
+}
+
+long test_cr(long x) {
+// CHECK-LABEL: define{{.*}} {{i64|i32}} @test_cr(
+// CHECK: call {{i64|i32}} asm sideeffect "", "=^cr,^cr"({{i64|i32}} %{{.*}})
+ long ret;
+ asm volatile ("" : "=cr"(ret) : "cr"(x));
+ return ret;
+}
+
+float cf;
+double cd;
+void test_cf(float f, double d) {
+// CHECK-LABEL: define{{.*}} void @test_cf(
+// CHECK: call float asm sideeffect "", "=^cf,^cf"(float %{{.*}})
+ asm volatile("" : "=cf"(cf) : "cf"(f));
+// CHECK: call double asm sideeffect "", "=^cf,^cf"(double %{{.*}})
+ asm volatile("" : "=cf"(cd) : "cf"(d));
+}
void test_I(void) {
// CHECK-LABEL: define{{.*}} void @test_I()
@@ -58,3 +86,13 @@ void test_s(void) {
asm("// %0 %1 %2" :: "S"(&var), "S"(&arr[1][1]), "S"(test_s));
}
+
+// CHECK-LABEL: test_modifiers(
+// CHECK: call void asm sideeffect "// ${0:i} ${1:i}", "r,r"({{i32|i64}} %val, i32 37)
+// CHECK: call void asm sideeffect "// ${0:z} ${1:z}", "i,i"(i32 0, i32 1)
+// CHECK: call void asm sideeffect "// ${0:N}", "r"({{i32|i64}} %val)
+void test_modifiers(long val) {
+ asm volatile("// %i0 %i1" :: "r"(val), "r"(37));
+ asm volatile("// %z0 %z1" :: "i"(0), "i"(1));
+ asm volatile("// %N0" :: "r"(val));
+}
diff --git a/clang/test/CodeGen/math-libcalls-tbaa-indirect-args.c b/clang/test/CodeGen/math-libcalls-tbaa-indirect-args.c
index b94f964..8e5f015 100644
--- a/clang/test/CodeGen/math-libcalls-tbaa-indirect-args.c
+++ b/clang/test/CodeGen/math-libcalls-tbaa-indirect-args.c
@@ -153,39 +153,39 @@ _Complex long double test_cargl(_Complex long double cld) {
int ilogbl(long double a);
// CHECK-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-SAME: x86_fp80 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
+// CHECK-SAME: x86_fp80 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK: [[CALL:%.*]] = tail call i32 @ilogbl(x86_fp80 noundef [[A]]) #[[ATTR5]], !tbaa [[TBAA2]]
//
// CHECK-WIN64-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-WIN64-SAME: x86_fp80 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
+// CHECK-WIN64-SAME: x86_fp80 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-WIN64: [[CALL:%.*]] = tail call i32 @ilogbl(x86_fp80 noundef [[A]]) #[[ATTR5]], !tbaa [[TBAA2]]
//
// CHECK-I686-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-I686-SAME: x86_fp80 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
+// CHECK-I686-SAME: x86_fp80 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-I686: [[CALL:%.*]] = tail call i32 @ilogbl(x86_fp80 noundef [[A]]) #[[ATTR5]], !tbaa [[TBAA3]]
//
// CHECK-PPC-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-PPC-SAME: ppc_fp128 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-PPC-SAME: ppc_fp128 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-PPC: [[CALL:%.*]] = tail call i32 @ilogbl(ppc_fp128 noundef [[A]]) #[[ATTR3]], !tbaa [[TBAA2]]
//
// CHECK-ARM-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-ARM-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-ARM-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-ARM: [[CALL:%.*]] = tail call i32 @ilogbl(double noundef [[A]]) #[[ATTR2]], !tbaa [[TBAA3]]
//
// CHECK-ARM-HF-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-ARM-HF-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-ARM-HF-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-ARM-HF: [[CALL:%.*]] = tail call i32 @ilogbl(double noundef [[A]]) #[[ATTR2]], !tbaa [[TBAA3]]
//
// CHECK-THUMB-LABEL: define i32 @test_ilogb(
-// CHECK-THUMB-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-THUMB-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-THUMB: [[CALL:%.*]] = tail call i32 @ilogbl(double noundef [[A]]) #[[ATTR2]], !tbaa [[TBAA3]]
//
// CHECK-AARCH-LABEL: define dso_local i32 @test_ilogb(
-// CHECK-AARCH-SAME: fp128 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-AARCH-SAME: fp128 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-AARCH: [[CALL:%.*]] = tail call i32 @ilogbl(fp128 noundef [[A]]) #[[ATTR2]], !tbaa [[TBAA2]]
//
// CHECK-SPIR-LABEL: define dso_local spir_func i32 @test_ilogb(
-// CHECK-SPIR-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-SPIR-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-SPIR: [[CALL:%.*]] = tail call spir_func i32 @ilogbl(double noundef [[A]]) #[[ATTR3]], !tbaa [[TBAA2]]
//
// CHECK-MINGW32-LABEL: define dso_local i32 @test_ilogb(
diff --git a/clang/test/CodeGen/stack-protector-guard.c b/clang/test/CodeGen/stack-protector-guard.c
index 4777367..82616ae 100644
--- a/clang/test/CodeGen/stack-protector-guard.c
+++ b/clang/test/CodeGen/stack-protector-guard.c
@@ -12,6 +12,12 @@
// RUN: %clang_cc1 -mstack-protector-guard=tls -triple riscv64-unknown-elf \
// RUN: -mstack-protector-guard-offset=44 -mstack-protector-guard-reg=tp \
// RUN: -emit-llvm %s -o - | FileCheck %s --check-prefix=RISCV
+// RUN: %clang_cc1 -mstack-protector-guard=tls -triple powerpc64-unknown-elf \
+// RUN: -mstack-protector-guard-offset=52 -mstack-protector-guard-reg=r13 \
+// RUN: -emit-llvm %s -o - | FileCheck %s --check-prefix=POWERPC64
+// RUN: %clang_cc1 -mstack-protector-guard=tls -triple ppc32-unknown-elf \
+// RUN: -mstack-protector-guard-offset=16 -mstack-protector-guard-reg=r2 \
+// RUN: -emit-llvm %s -o - | FileCheck %s --check-prefix=POWERPC32
void foo(int*);
void bar(int x) {
int baz[x];
@@ -31,3 +37,13 @@ void bar(int x) {
// RISCV: [[ATTR1]] = !{i32 1, !"stack-protector-guard", !"tls"}
// RISCV: [[ATTR2]] = !{i32 1, !"stack-protector-guard-reg", !"tp"}
// RISCV: [[ATTR3]] = !{i32 1, !"stack-protector-guard-offset", i32 44}
+
+// POWERPC64: !llvm.module.flags = !{{{.*}}[[ATTR1:![0-9]+]], [[ATTR2:![0-9]+]], [[ATTR3:![0-9]+]], [[ATTR4:![0-9]+]]}
+// POWERPC64: [[ATTR2]] = !{i32 1, !"stack-protector-guard", !"tls"}
+// POWERPC64: [[ATTR3]] = !{i32 1, !"stack-protector-guard-reg", !"r13"}
+// POWERPC64: [[ATTR4]] = !{i32 1, !"stack-protector-guard-offset", i32 52}
+
+// POWERPC32: !llvm.module.flags = !{{{.*}}[[ATTR1:![0-9]+]], [[ATTR2:![0-9]+]], [[ATTR3:![0-9]+]], [[ATTR4:![0-9]+]]}
+// POWERPC32: [[ATTR2]] = !{i32 1, !"stack-protector-guard", !"tls"}
+// POWERPC32: [[ATTR3]] = !{i32 1, !"stack-protector-guard-reg", !"r2"}
+// POWERPC32: [[ATTR4]] = !{i32 1, !"stack-protector-guard-offset", i32 16}
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp
index bb86d45..e6a9456 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/child-inheritted-from-parent-in-comdat.cpp
@@ -4,8 +4,8 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -O1 -o - -emit-llvm -fhalf-no-semantic-interposition | FileCheck %s
// The inline function is emitted in each module with the same comdat
-// CHECK: $_ZTS1A = comdat any
// CHECK: $_ZTI1A = comdat any
+// CHECK: $_ZTS1A = comdat any
// CHECK: $_ZTI1B.rtti_proxy = comdat any
// The VTable is emitted everywhere used
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/inlined-key-function.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/inlined-key-function.cpp
index d5d9a85..70f8289 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/inlined-key-function.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/inlined-key-function.cpp
@@ -4,8 +4,8 @@
// RUN: %clang_cc1 %s -triple=aarch64-unknown-fuchsia -O1 -o - -emit-llvm | FileCheck %s
// CHECK: $_ZTV1A = comdat any
-// CHECK: $_ZTS1A = comdat any
// CHECK: $_ZTI1A = comdat any
+// CHECK: $_ZTS1A = comdat any
// CHECK: $_ZTI1A.rtti_proxy = comdat any
// The VTable is linkonce_odr and in a comdat here bc it’s key function is inline defined.
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/parent-and-child-in-comdats.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/parent-and-child-in-comdats.cpp
index a033ac4..c1b9a93 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/parent-and-child-in-comdats.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/parent-and-child-in-comdats.cpp
@@ -8,12 +8,12 @@
// CHECK: $_ZN1A3fooEv = comdat any
// CHECK: $_ZN1B3fooEv = comdat any
// CHECK: $_ZTV1A = comdat any
-// CHECK: $_ZTS1A = comdat any
// CHECK: $_ZTI1A = comdat any
+// CHECK: $_ZTS1A = comdat any
// CHECK: $_ZTI1A.rtti_proxy = comdat any
// CHECK: $_ZTV1B = comdat any
-// CHECK: $_ZTS1B = comdat any
// CHECK: $_ZTI1B = comdat any
+// CHECK: $_ZTS1B = comdat any
// CHECK: $_ZTI1B.rtti_proxy = comdat any
// Both the vtables for A and B are emitted and in their own comdats.
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/parent-vtable-in-comdat.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/parent-vtable-in-comdat.cpp
index 341c531..d6eda79 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/parent-vtable-in-comdat.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/parent-vtable-in-comdat.cpp
@@ -7,17 +7,17 @@
// A::foo() has a comdat since it is an inline function
// CHECK: $_ZN1A3fooEv = comdat any
// CHECK: $_ZTV1A = comdat any
+// CHECK: $_ZTI1A = comdat any
// CHECK: $_ZTS1A = comdat any
// The VTable for A has its own comdat section bc it has no key function
-// CHECK: $_ZTI1A = comdat any
// CHECK: $_ZTI1A.rtti_proxy = comdat any
// The VTable for A is emitted here and in a comdat section since it has no key function, and is used in this module when creating an instance of A.
// CHECK: @_ZTV1A.local = linkonce_odr hidden unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, comdat($_ZTV1A), align 4
+// CHECK: @_ZTI1A = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, comdat, align 8
// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1A = linkonce_odr constant [3 x i8] c"1A\00", comdat, align 1
-// CHECK: @_ZTI1A = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, comdat, align 8
// CHECK: @_ZTI1A.rtti_proxy = linkonce_odr hidden unnamed_addr constant ptr @_ZTI1A, comdat
// CHECK: @_ZTV1A = linkonce_odr unnamed_addr alias { [3 x i32] }, ptr @_ZTV1A.local
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp
index ad8018e..9dcb1c3 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/simple-vtable-definition.cpp
@@ -9,9 +9,9 @@
// The vtable definition itself is private so we can take relative references to
// it. The vtable symbol will be exposed through a public alias.
// CHECK: @_ZTV1A.local = internal unnamed_addr constant { [3 x i32] } { [3 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr @_ZTI1A.rtti_proxy to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32)] }, align 4
+// CHECK: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, align 8
// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
-// CHECK: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, align 8
// The rtti should be in a comdat
// CHECK: @_ZTI1A.rtti_proxy = {{.*}}comdat
diff --git a/clang/test/CodeGenCXX/RelativeVTablesABI/type-info.cpp b/clang/test/CodeGenCXX/RelativeVTablesABI/type-info.cpp
index fc5ee50..c471e5d 100644
--- a/clang/test/CodeGenCXX/RelativeVTablesABI/type-info.cpp
+++ b/clang/test/CodeGenCXX/RelativeVTablesABI/type-info.cpp
@@ -5,12 +5,12 @@
// CHECK: $_ZTI1A.rtti_proxy = comdat any
// CHECK: $_ZTI1B.rtti_proxy = comdat any
+// CHECK: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, align 8
// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
-// CHECK: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 8), ptr @_ZTS1A }, align 8
+// CHECK: @_ZTI1B ={{.*}} constant { ptr, ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 8), ptr @_ZTS1B, ptr @_ZTI1A }, align 8
// CHECK: @_ZTVN10__cxxabiv120__si_class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1B ={{.*}} constant [3 x i8] c"1B\00", align 1
-// CHECK: @_ZTI1B ={{.*}} constant { ptr, ptr, ptr } { ptr getelementptr inbounds (i8, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 8), ptr @_ZTS1B, ptr @_ZTI1A }, align 8
// CHECK: @_ZTI1A.rtti_proxy = linkonce_odr hidden unnamed_addr constant ptr @_ZTI1A, comdat
// CHECK: @_ZTI1B.rtti_proxy = linkonce_odr hidden unnamed_addr constant ptr @_ZTI1B, comdat
diff --git a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
index 3f2b062..9f481e1 100644
--- a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
+++ b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
@@ -59,6 +59,9 @@ void f(__clang_svbfloat16x3_t, __clang_svbfloat16x3_t);
void f(__clang_svbfloat16x4_t, __clang_svbfloat16x4_t);
void f(__clang_svboolx2_t, __clang_svboolx2_t);
void f(__clang_svboolx4_t, __clang_svboolx4_t);
+void f(__clang_svmfloat8x2_t, __clang_svmfloat8x2_t);
+void f(__clang_svmfloat8x3_t, __clang_svmfloat8x3_t);
+void f(__clang_svmfloat8x4_t, __clang_svmfloat8x4_t);
// CHECK-LABEL: define dso_local void @_Z3foov(
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
@@ -139,6 +142,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
// CHECK-NEXT: [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
// CHECK-NEXT: [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
// CHECK-NEXT: [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
+// CHECK-NEXT: [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
// CHECK-NEXT: call void @_Z1fu10__SVInt8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -151,7 +160,7 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
// CHECK-NEXT: call void @_Z1fu13__SVFloat16_tS_(<vscale x 8 x half> zeroinitializer, <vscale x 8 x half> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu13__SVFloat32_tS_(<vscale x 4 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu13__SVFloat64_tS_(<vscale x 2 x double> zeroinitializer, <vscale x 2 x double> zeroinitializer)
-// CHECK-NEXT: call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu14__SVBfloat16_tS_(<vscale x 8 x bfloat> zeroinitializer, <vscale x 8 x bfloat> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu10__SVBool_tS_(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> zeroinitializer)
// CHECK-NEXT: call void @_Z1fu11__SVCount_tS_(target("aarch64.svcount") zeroinitializer, target("aarch64.svcount") zeroinitializer)
@@ -573,6 +582,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
// CHECK-NEXT: [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
// CHECK-NEXT: [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
// CHECK-NEXT: call void @_Z1f10svboolx4_tS_(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
+// CHECK-NEXT: [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
+// CHECK-NEXT: [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
+// CHECK-NEXT: [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
+// CHECK-NEXT: [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
+// CHECK-NEXT: [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
+// CHECK-NEXT: [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
+// CHECK-NEXT: call void @_Z1f13svmfloat8x2_tS_(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
+// CHECK-NEXT: [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
+// CHECK-NEXT: [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
+// CHECK-NEXT: [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
+// CHECK-NEXT: [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
+// CHECK-NEXT: [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
+// CHECK-NEXT: [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
+// CHECK-NEXT: [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
+// CHECK-NEXT: [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
+// CHECK-NEXT: call void @_Z1f13svmfloat8x3_tS_(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
+// CHECK-NEXT: [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
+// CHECK-NEXT: [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
+// CHECK-NEXT: [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
+// CHECK-NEXT: [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
+// CHECK-NEXT: [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
+// CHECK-NEXT: [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
+// CHECK-NEXT: [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
+// CHECK-NEXT: [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
+// CHECK-NEXT: [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
+// CHECK-NEXT: [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
+// CHECK-NEXT: call void @_Z1f13svmfloat8x4_tS_(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
// CHECK-NEXT: ret void
//
// COMPAT_17-LABEL: define dso_local void @_Z3foov(
@@ -654,6 +696,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
// COMPAT_17-NEXT: [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
// COMPAT_17-NEXT: [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
// COMPAT_17-NEXT: [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
+// COMPAT_17-NEXT: [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT: [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT: [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT: [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT: [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT: [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
// COMPAT_17-NEXT: call void @_Z1fu10__SVInt8_tu10__SVInt8_t(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
// COMPAT_17-NEXT: call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
// COMPAT_17-NEXT: call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -1088,6 +1136,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
// COMPAT_17-NEXT: [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
// COMPAT_17-NEXT: [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
// COMPAT_17-NEXT: call void @_Z1f10svboolx4_t10svboolx4_t(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
+// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
+// COMPAT_17-NEXT: [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
+// COMPAT_17-NEXT: [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
+// COMPAT_17-NEXT: [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
+// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
+// COMPAT_17-NEXT: [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
+// COMPAT_17-NEXT: [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
+// COMPAT_17-NEXT: [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
+// COMPAT_17-NEXT: call void @_Z1f13svmfloat8x2_t13svmfloat8x2_t(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
+// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
+// COMPAT_17-NEXT: [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
+// COMPAT_17-NEXT: [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
+// COMPAT_17-NEXT: [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
+// COMPAT_17-NEXT: [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
+// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
+// COMPAT_17-NEXT: [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
+// COMPAT_17-NEXT: [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
+// COMPAT_17-NEXT: [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
+// COMPAT_17-NEXT: [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
+// COMPAT_17-NEXT: call void @_Z1f13svmfloat8x3_t13svmfloat8x3_t(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
+// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
+// COMPAT_17-NEXT: [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
+// COMPAT_17-NEXT: [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
+// COMPAT_17-NEXT: [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
+// COMPAT_17-NEXT: [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
+// COMPAT_17-NEXT: [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
+// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
+// COMPAT_17-NEXT: [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
+// COMPAT_17-NEXT: [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
+// COMPAT_17-NEXT: [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
+// COMPAT_17-NEXT: [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
+// COMPAT_17-NEXT: [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
+// COMPAT_17-NEXT: call void @_Z1f13svmfloat8x4_t13svmfloat8x4_t(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
// COMPAT_17-NEXT: ret void
//
void foo() {
@@ -1146,4 +1227,7 @@ void foo() {
f(__clang_svbfloat16x4_t(), __clang_svbfloat16x4_t());
f(__clang_svboolx2_t(), __clang_svboolx2_t());
f(__clang_svboolx4_t(), __clang_svboolx4_t());
+ f(__clang_svmfloat8x2_t(), __clang_svmfloat8x2_t());
+ f(__clang_svmfloat8x3_t(), __clang_svmfloat8x3_t());
+ f(__clang_svmfloat8x4_t(), __clang_svmfloat8x4_t());
}
diff --git a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
index 45cf808..f906836 100644
--- a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
+++ b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
@@ -57,6 +57,9 @@
// CHECK-NEXT: [[B8X2:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
// CHECK-NEXT: [[B8X4:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
// CHECK-NEXT: [[CNT:%.*]] = alloca target("aarch64.svcount"), align 2
+// CHECK-NEXT: [[MF8X2:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[MF8X3:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[MF8X4:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
// CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[S8]], align 16
// CHECK-NEXT: store <vscale x 8 x i16> zeroinitializer, ptr [[S16]], align 16
// CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[S32]], align 16
@@ -110,6 +113,9 @@
// CHECK-NEXT: store { <vscale x 16 x i1>, <vscale x 16 x i1> } zeroinitializer, ptr [[B8X2]], align 2
// CHECK-NEXT: store { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } zeroinitializer, ptr [[B8X4]], align 2
// CHECK-NEXT: store target("aarch64.svcount") zeroinitializer, ptr [[CNT]], align 2
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[MF8X2]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[MF8X3]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[MF8X4]], align 16
// CHECK-NEXT: ret void
//
void test_locals(void) {
@@ -171,6 +177,10 @@ void test_locals(void) {
__clang_svboolx4_t b8x4{};
__SVCount_t cnt{};
+
+ __clang_svmfloat8x2_t mf8x2{};
+ __clang_svmfloat8x3_t mf8x3{};
+ __clang_svmfloat8x4_t mf8x4{};
}
// CHECK-LABEL: define dso_local void @_Z12test_copy_s8u10__SVInt8_t
@@ -1142,3 +1152,63 @@ void test_copy_b8x4(__clang_svboolx4_t a) {
void test_copy_cnt(__SVCount_t a) {
__SVCount_t b{a};
}
+
+// CHECK-LABEL: define dso_local void @_Z15test_copy_mf8x213svmfloat8x2_t
+// CHECK-SAME: (<vscale x 16 x i8> [[A_COERCE0:%.*]], <vscale x 16 x i8> [[A_COERCE1:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[A:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[B:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[A_COERCE0]], 0
+// CHECK-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], <vscale x 16 x i8> [[A_COERCE1]], 1
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP1]], ptr [[A]], align 16
+// CHECK-NEXT: [[A1:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[A]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } [[A1]], ptr [[A_ADDR]], align 16
+// CHECK-NEXT: [[TMP2:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[A_ADDR]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], ptr [[B]], align 16
+// CHECK-NEXT: ret void
+//
+void test_copy_mf8x2(__clang_svmfloat8x2_t a) {
+ __clang_svmfloat8x2_t b{a};
+}
+
+// CHECK-LABEL: define dso_local void @_Z15test_copy_mf8x313svmfloat8x3_t
+// CHECK-SAME: (<vscale x 16 x i8> [[A_COERCE0:%.*]], <vscale x 16 x i8> [[A_COERCE1:%.*]], <vscale x 16 x i8> [[A_COERCE2:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[A:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[B:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[A_COERCE0]], 0
+// CHECK-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], <vscale x 16 x i8> [[A_COERCE1]], 1
+// CHECK-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP1]], <vscale x 16 x i8> [[A_COERCE2]], 2
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], ptr [[A]], align 16
+// CHECK-NEXT: [[A1:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[A]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[A1]], ptr [[A_ADDR]], align 16
+// CHECK-NEXT: [[TMP3:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[A_ADDR]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP3]], ptr [[B]], align 16
+// CHECK-NEXT: ret void
+//
+void test_copy_mf8x3(__clang_svmfloat8x3_t a) {
+ __clang_svmfloat8x3_t b{a};
+}
+
+// CHECK-LABEL: define dso_local void @_Z15test_copy_mf8x413svmfloat8x4_t
+// CHECK-SAME: (<vscale x 16 x i8> [[A_COERCE0:%.*]], <vscale x 16 x i8> [[A_COERCE1:%.*]], <vscale x 16 x i8> [[A_COERCE2:%.*]], <vscale x 16 x i8> [[A_COERCE3:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[A:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[B:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[A_COERCE0]], 0
+// CHECK-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], <vscale x 16 x i8> [[A_COERCE1]], 1
+// CHECK-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP1]], <vscale x 16 x i8> [[A_COERCE2]], 2
+// CHECK-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], <vscale x 16 x i8> [[A_COERCE3]], 3
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP3]], ptr [[A]], align 16
+// CHECK-NEXT: [[A1:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[A]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[A1]], ptr [[A_ADDR]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[A_ADDR]], align 16
+// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP4]], ptr [[B]], align 16
+// CHECK-NEXT: ret void
+//
+void test_copy_mf8x4(__clang_svmfloat8x4_t a) {
+ __clang_svmfloat8x4_t b{a};
+}
diff --git a/clang/test/CodeGenCXX/armv7k.cpp b/clang/test/CodeGenCXX/armv7k.cpp
index a4a243c..7aa9fd7 100644
--- a/clang/test/CodeGenCXX/armv7k.cpp
+++ b/clang/test/CodeGenCXX/armv7k.cpp
@@ -50,17 +50,17 @@ namespace test2 {
struct __attribute__((visibility("hidden"))) B {};
const std::type_info &b0 = typeid(B);
- // CHECK-GLOBALS: @_ZTSN5test21BE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIN5test21BE = linkonce_odr hidden constant { {{.*}}, ptr @_ZTSN5test21BE }
+ // CHECK-GLOBALS: @_ZTSN5test21BE = linkonce_odr hidden constant
const std::type_info &b1 = typeid(B*);
- // CHECK-GLOBALS: @_ZTSPN5test21BE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIPN5test21BE = linkonce_odr hidden constant { {{.*}}, ptr @_ZTSPN5test21BE, i32 0, ptr @_ZTIN5test21BE
+ // CHECK-GLOBALS: @_ZTSPN5test21BE = linkonce_odr hidden constant
struct C {};
const std::type_info &c0 = typeid(C);
- // CHECK-GLOBALS: @_ZTSN5test21CE = linkonce_odr constant [11 x i8] c"N5test21CE\00"
// CHECK-GLOBALS: @_ZTIN5test21CE = linkonce_odr constant { {{.*}}, ptr @_ZTSN5test21CE }
+ // CHECK-GLOBALS: @_ZTSN5test21CE = linkonce_odr constant [11 x i8] c"N5test21CE\00"
}
// va_list should be based on "char *" rather than "ptr".
diff --git a/clang/test/CodeGenCXX/builtins.cpp b/clang/test/CodeGenCXX/builtins.cpp
index 90265186..37f9491 100644
--- a/clang/test/CodeGenCXX/builtins.cpp
+++ b/clang/test/CodeGenCXX/builtins.cpp
@@ -14,6 +14,12 @@ int o = X::__builtin_fabs(-2.0);
long p = X::__builtin_fabsf(-3.0f);
// CHECK: @p ={{.*}} global i64 3, align 8
+int x = __builtin_abs(-2);
+// CHECK: @x ={{.*}} global i32 2, align 4
+
+long y = __builtin_abs(-2l);
+// CHECK: @y ={{.*}} global i64 2, align 8
+
// PR8839
extern "C" char memmove();
@@ -52,14 +58,6 @@ extern "C" int __builtin_abs(int); // #1
long __builtin_abs(long); // #2
extern "C" int __builtin_abs(int); // #3
-int x = __builtin_abs(-2);
-// CHECK: [[X:%.+]] = call i32 @llvm.abs.i32(i32 -2, i1 true)
-// CHECK-NEXT: store i32 [[X]], ptr @x, align 4
-
-long y = __builtin_abs(-2l);
-// CHECK: [[Y:%.+]] = call noundef i64 @_Z13__builtin_absl(i64 noundef -2)
-// CHECK: store i64 [[Y]], ptr @y, align 8
-
extern const char char_memchr_arg[32];
char *memchr_result = __builtin_char_memchr(char_memchr_arg, 123, 32);
// CHECK: call ptr @memchr(ptr noundef @char_memchr_arg, i32 noundef 123, i64 noundef 32)
diff --git a/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp b/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp
index d0c87d9..271d9ed 100644
--- a/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp
+++ b/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp
@@ -10,17 +10,17 @@ B fail;
// CHECK: @_ZTV1B = linkonce_odr unnamed_addr addrspace(1) constant { [3 x ptr addrspace(1)] } { [3 x ptr addrspace(1)] [ptr addrspace(1) null, ptr addrspace(1) @_ZTI1B, ptr addrspace(1) addrspacecast (ptr @_ZN1A1fEv to ptr addrspace(1))] }, comdat, align 8
// CHECK: @fail = addrspace(1) global { ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1B, i32 0, i32 0, i32 2) }, align 8
// CHECK: @_ZTI1A = external addrspace(1) constant ptr addrspace(1)
+// CHECK: @_ZTI1B = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1B, ptr addrspace(1) @_ZTI1A }, comdat, align 8
// CHECK: @_ZTVN10__cxxabiv120__si_class_type_infoE = external addrspace(1) global [0 x ptr addrspace(1)]
// CHECK: @_ZTS1B = linkonce_odr addrspace(1) constant [3 x i8] c"1B\00", comdat, align 1
-// CHECK: @_ZTI1B = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1B, ptr addrspace(1) @_ZTI1A }, comdat, align 8
// CHECK: @__oclc_ABI_version = weak_odr hidden local_unnamed_addr addrspace(4) constant i32 500
//.
// WITH-NONZERO-DEFAULT-AS: @_ZTV1B = linkonce_odr unnamed_addr addrspace(1) constant { [3 x ptr addrspace(1)] } { [3 x ptr addrspace(1)] [ptr addrspace(1) null, ptr addrspace(1) @_ZTI1B, ptr addrspace(1) addrspacecast (ptr addrspace(4) @_ZN1A1fEv to ptr addrspace(1))] }, comdat, align 8
// WITH-NONZERO-DEFAULT-AS: @fail = addrspace(1) global { ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1B, i32 0, i32 0, i32 2) }, align 8
// WITH-NONZERO-DEFAULT-AS: @_ZTI1A = external addrspace(1) constant ptr addrspace(1)
+// WITH-NONZERO-DEFAULT-AS: @_ZTI1B = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1B, ptr addrspace(1) @_ZTI1A }, comdat, align 8
// WITH-NONZERO-DEFAULT-AS: @_ZTVN10__cxxabiv120__si_class_type_infoE = external addrspace(1) global [0 x ptr addrspace(1)]
// WITH-NONZERO-DEFAULT-AS: @_ZTS1B = linkonce_odr addrspace(1) constant [3 x i8] c"1B\00", comdat, align 1
-// WITH-NONZERO-DEFAULT-AS: @_ZTI1B = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1B, ptr addrspace(1) @_ZTI1A }, comdat, align 8
//.
// CHECK-LABEL: define dso_local noundef nonnull align 8 dereferenceable(8) ptr @_Z1fP1A(
// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
diff --git a/clang/test/CodeGenCXX/exceptions-no-rtti.cpp b/clang/test/CodeGenCXX/exceptions-no-rtti.cpp
index 7c73285..a3d9696 100644
--- a/clang/test/CodeGenCXX/exceptions-no-rtti.cpp
+++ b/clang/test/CodeGenCXX/exceptions-no-rtti.cpp
@@ -3,8 +3,8 @@
// CHECK: @_ZTIN5test11AE = linkonce_odr constant
// CHECK: @_ZTIN5test11BE = linkonce_odr constant
// CHECK: @_ZTIN5test11CE = linkonce_odr constant
-// CHECK: @_ZTIN5test11DE = linkonce_odr constant
// CHECK: @_ZTIPN5test11DE = linkonce_odr constant {{.*}} @_ZTIN5test11DE
+// CHECK: @_ZTIN5test11DE = linkonce_odr constant
// PR6974: this shouldn't crash
namespace test0 {
diff --git a/clang/test/CodeGenCXX/implicit-record-visibility.cpp b/clang/test/CodeGenCXX/implicit-record-visibility.cpp
index ef388c7..84ad822 100644
--- a/clang/test/CodeGenCXX/implicit-record-visibility.cpp
+++ b/clang/test/CodeGenCXX/implicit-record-visibility.cpp
@@ -7,6 +7,6 @@
// under -fvisibility=hidden the type of function f, due to its va_list (aka
// __builtin_va_list, aka __va_list_tag (*)[1]) parameter would be hidden:
-// CHECK: @_ZTSFvP13__va_list_tagE = linkonce_odr constant
// CHECK: @_ZTIFvP13__va_list_tagE = linkonce_odr constant
+// CHECK: @_ZTSFvP13__va_list_tagE = linkonce_odr constant
void f(va_list) { (void)typeid(f); }
diff --git a/clang/test/CodeGenCXX/mdefault-visibility-export-mapping-rtti.cpp b/clang/test/CodeGenCXX/mdefault-visibility-export-mapping-rtti.cpp
index 1af105e..2fc0a6a 100644
--- a/clang/test/CodeGenCXX/mdefault-visibility-export-mapping-rtti.cpp
+++ b/clang/test/CodeGenCXX/mdefault-visibility-export-mapping-rtti.cpp
@@ -16,20 +16,20 @@
// C is an incomplete class type, so any direct or indirect pointer types should have
// internal linkage, as should the type info for C itself.
struct C;
+// CHECK: @_ZTIP1C = internal constant
// CHECK: @_ZTSP1C = internal constant
-// CHECK: @_ZTS1C = internal constant
// CHECK: @_ZTI1C = internal constant
-// CHECK: @_ZTIP1C = internal constant
-// CHECK: @_ZTSPP1C = internal constant
+// CHECK: @_ZTS1C = internal constant
// CHECK: @_ZTIPP1C = internal constant
+// CHECK: @_ZTSPP1C = internal constant
struct __attribute__((type_visibility("default"))) D;
+// CHECK: @_ZTIP1D = internal constant
// CHECK: @_ZTSP1D = internal constant
-// CHECK: @_ZTS1D = internal constant
// CHECK: @_ZTI1D = internal constant
-// CHECK: @_ZTIP1D = internal constant
-// CHECK: @_ZTSPP1D = internal constant
+// CHECK: @_ZTS1D = internal constant
// CHECK: @_ZTIPP1D = internal constant
+// CHECK: @_ZTSPP1D = internal constant
void __attribute__((visibility("default"))) tfunc() {
(void)typeid(C *);
@@ -46,12 +46,12 @@ void s::foo() {}
// UNSPECIFIED-DEF: @_ZTV1s = unnamed_addr constant
// UNSPECIFIED-HID: @_ZTV1s = hidden unnamed_addr constant
// UNSPECIFIED-EXP: @_ZTV1s = dllexport unnamed_addr constant
-// UNSPECIFIED-DEF: @_ZTS1s = constant
-// UNSPECIFIED-HID: @_ZTS1s = hidden constant
-// UNSPECIFIED-EXP: @_ZTS1s = dllexport constant
// UNSPECIFIED-DEF: @_ZTI1s = constant
// UNSPECIFIED-HID: @_ZTI1s = hidden constant
// UNSPECIFIED-EXP: @_ZTI1s = dllexport constant
+// UNSPECIFIED-DEF: @_ZTS1s = constant
+// UNSPECIFIED-HID: @_ZTS1s = hidden constant
+// UNSPECIFIED-EXP: @_ZTS1s = dllexport constant
// explicit default visibility RTTI & vtable
struct __attribute__((type_visibility("default"))) t {
@@ -61,12 +61,12 @@ void t::foo() {}
// EXPLICIT-DEF: @_ZTV1t = unnamed_addr constant
// EXPLICIT-HID: @_ZTV1t = hidden unnamed_addr constant
// EXPLICIT-EXP: @_ZTV1t = dllexport unnamed_addr constant
-// EXPLICIT-DEF: @_ZTS1t = constant
-// EXPLICIT-HID: @_ZTS1t = hidden constant
-// EXPLICIT-EXP: @_ZTS1t = dllexport constant
// EXPLICIT-DEF: @_ZTI1t = constant
// EXPLICIT-HID: @_ZTI1t = hidden constant
// EXPLICIT-EXP: @_ZTI1t = dllexport constant
+// EXPLICIT-DEF: @_ZTS1t = constant
+// EXPLICIT-HID: @_ZTS1t = hidden constant
+// EXPLICIT-EXP: @_ZTS1t = dllexport constant
#ifdef FUNDAMENTAL_IS_EXPLICIT
#define TYPE_VIS __attribute__((type_visibility("default")))
@@ -86,511 +86,511 @@ __fundamental_type_info::~__fundamental_type_info() {}
// __cxxabiv1::__fundamental_type_info
// FUND-DEF: @_ZTVN10__cxxabiv123__fundamental_type_infoE = unnamed_addr constant
-// FUND-DEF: @_ZTSN10__cxxabiv123__fundamental_type_infoE = constant
// FUND-DEF: @_ZTIN10__cxxabiv123__fundamental_type_infoE = constant
+// FUND-DEF: @_ZTSN10__cxxabiv123__fundamental_type_infoE = constant
// FUND-HID: @_ZTVN10__cxxabiv123__fundamental_type_infoE = hidden unnamed_addr constant
-// FUND-HID: @_ZTSN10__cxxabiv123__fundamental_type_infoE = hidden constant
// FUND-HID: @_ZTIN10__cxxabiv123__fundamental_type_infoE = hidden constant
+// FUND-HID: @_ZTSN10__cxxabiv123__fundamental_type_infoE = hidden constant
// FUND-EXP: @_ZTVN10__cxxabiv123__fundamental_type_infoE = dllexport unnamed_addr constant
-// FUND-EXP: @_ZTSN10__cxxabiv123__fundamental_type_infoE = dllexport constant
// FUND-EXP: @_ZTIN10__cxxabiv123__fundamental_type_infoE = dllexport constant
+// FUND-EXP: @_ZTSN10__cxxabiv123__fundamental_type_infoE = dllexport constant
// void
-// FUND-DEF: @_ZTSv = constant
// FUND-DEF: @_ZTIv = constant
-// FUND-DEF: @_ZTSPv = constant
+// FUND-DEF: @_ZTSv = constant
// FUND-DEF: @_ZTIPv = constant
-// FUND-DEF: @_ZTSPKv = constant
+// FUND-DEF: @_ZTSPv = constant
// FUND-DEF: @_ZTIPKv = constant
-// FUND-HID: @_ZTSv = hidden constant
+// FUND-DEF: @_ZTSPKv = constant
// FUND-HID: @_ZTIv = hidden constant
-// FUND-HID: @_ZTSPv = hidden constant
+// FUND-HID: @_ZTSv = hidden constant
// FUND-HID: @_ZTIPv = hidden constant
-// FUND-HID: @_ZTSPKv = hidden constant
+// FUND-HID: @_ZTSPv = hidden constant
// FUND-HID: @_ZTIPKv = hidden constant
-// FUND-EXP: @_ZTSv = dllexport constant
+// FUND-HID: @_ZTSPKv = hidden constant
// FUND-EXP: @_ZTIv = dllexport constant
-// FUND-EXP: @_ZTSPv = dllexport constant
+// FUND-EXP: @_ZTSv = dllexport constant
// FUND-EXP: @_ZTIPv = dllexport constant
-// FUND-EXP: @_ZTSPKv = dllexport constant
+// FUND-EXP: @_ZTSPv = dllexport constant
// FUND-EXP: @_ZTIPKv = dllexport constant
+// FUND-EXP: @_ZTSPKv = dllexport constant
// std::nullptr_t
-// FUND-DEF: @_ZTSDn = constant
// FUND-DEF: @_ZTIDn = constant
-// FUND-DEF: @_ZTSPDn = constant
+// FUND-DEF: @_ZTSDn = constant
// FUND-DEF: @_ZTIPDn = constant
-// FUND-DEF: @_ZTSPKDn = constant
+// FUND-DEF: @_ZTSPDn = constant
// FUND-DEF: @_ZTIPKDn = constant
-// FUND-HID: @_ZTSDn = hidden constant
+// FUND-DEF: @_ZTSPKDn = constant
// FUND-HID: @_ZTIDn = hidden constant
-// FUND-HID: @_ZTSPDn = hidden constant
+// FUND-HID: @_ZTSDn = hidden constant
// FUND-HID: @_ZTIPDn = hidden constant
-// FUND-HID: @_ZTSPKDn = hidden constant
+// FUND-HID: @_ZTSPDn = hidden constant
// FUND-HID: @_ZTIPKDn = hidden constant
-// FUND-EXP: @_ZTSDn = dllexport constant
+// FUND-HID: @_ZTSPKDn = hidden constant
// FUND-EXP: @_ZTIDn = dllexport constant
-// FUND-EXP: @_ZTSPDn = dllexport constant
+// FUND-EXP: @_ZTSDn = dllexport constant
// FUND-EXP: @_ZTIPDn = dllexport constant
-// FUND-EXP: @_ZTSPKDn = dllexport constant
+// FUND-EXP: @_ZTSPDn = dllexport constant
// FUND-EXP: @_ZTIPKDn = dllexport constant
+// FUND-EXP: @_ZTSPKDn = dllexport constant
// bool
-// FUND-DEF: @_ZTSb = constant
// FUND-DEF: @_ZTIb = constant
-// FUND-DEF: @_ZTSPb = constant
+// FUND-DEF: @_ZTSb = constant
// FUND-DEF: @_ZTIPb = constant
-// FUND-DEF: @_ZTSPKb = constant
+// FUND-DEF: @_ZTSPb = constant
// FUND-DEF: @_ZTIPKb = constant
-// FUND-HID: @_ZTSb = hidden constant
+// FUND-DEF: @_ZTSPKb = constant
// FUND-HID: @_ZTIb = hidden constant
-// FUND-HID: @_ZTSPb = hidden constant
+// FUND-HID: @_ZTSb = hidden constant
// FUND-HID: @_ZTIPb = hidden constant
-// FUND-HID: @_ZTSPKb = hidden constant
+// FUND-HID: @_ZTSPb = hidden constant
// FUND-HID: @_ZTIPKb = hidden constant
-// FUND-EXP: @_ZTSb = dllexport constant
+// FUND-HID: @_ZTSPKb = hidden constant
// FUND-EXP: @_ZTIb = dllexport constant
-// FUND-EXP: @_ZTSPb = dllexport constant
+// FUND-EXP: @_ZTSb = dllexport constant
// FUND-EXP: @_ZTIPb = dllexport constant
-// FUND-EXP: @_ZTSPKb = dllexport constant
+// FUND-EXP: @_ZTSPb = dllexport constant
// FUND-EXP: @_ZTIPKb = dllexport constant
+// FUND-EXP: @_ZTSPKb = dllexport constant
// wchar_t
-// FUND-DEF: @_ZTSw = constant
// FUND-DEF: @_ZTIw = constant
-// FUND-DEF: @_ZTSPw = constant
+// FUND-DEF: @_ZTSw = constant
// FUND-DEF: @_ZTIPw = constant
-// FUND-DEF: @_ZTSPKw = constant
+// FUND-DEF: @_ZTSPw = constant
// FUND-DEF: @_ZTIPKw = constant
-// FUND-HID: @_ZTSw = hidden constant
+// FUND-DEF: @_ZTSPKw = constant
// FUND-HID: @_ZTIw = hidden constant
-// FUND-HID: @_ZTSPw = hidden constant
+// FUND-HID: @_ZTSw = hidden constant
// FUND-HID: @_ZTIPw = hidden constant
-// FUND-HID: @_ZTSPKw = hidden constant
+// FUND-HID: @_ZTSPw = hidden constant
// FUND-HID: @_ZTIPKw = hidden constant
-// FUND-EXP: @_ZTSw = dllexport constant
+// FUND-HID: @_ZTSPKw = hidden constant
// FUND-EXP: @_ZTIw = dllexport constant
-// FUND-EXP: @_ZTSPw = dllexport constant
+// FUND-EXP: @_ZTSw = dllexport constant
// FUND-EXP: @_ZTIPw = dllexport constant
-// FUND-EXP: @_ZTSPKw = dllexport constant
+// FUND-EXP: @_ZTSPw = dllexport constant
// FUND-EXP: @_ZTIPKw = dllexport constant
+// FUND-EXP: @_ZTSPKw = dllexport constant
// char
-// FUND-DEF: @_ZTSc = constant
// FUND-DEF: @_ZTIc = constant
-// FUND-DEF: @_ZTSPc = constant
+// FUND-DEF: @_ZTSc = constant
// FUND-DEF: @_ZTIPc = constant
-// FUND-DEF: @_ZTSPKc = constant
+// FUND-DEF: @_ZTSPc = constant
// FUND-DEF: @_ZTIPKc = constant
-// FUND-HID: @_ZTSc = hidden constant
+// FUND-DEF: @_ZTSPKc = constant
// FUND-HID: @_ZTIc = hidden constant
-// FUND-HID: @_ZTSPc = hidden constant
+// FUND-HID: @_ZTSc = hidden constant
// FUND-HID: @_ZTIPc = hidden constant
-// FUND-HID: @_ZTSPKc = hidden constant
+// FUND-HID: @_ZTSPc = hidden constant
// FUND-HID: @_ZTIPKc = hidden constant
-// FUND-EXP: @_ZTSc = dllexport constant
+// FUND-HID: @_ZTSPKc = hidden constant
// FUND-EXP: @_ZTIc = dllexport constant
-// FUND-EXP: @_ZTSPc = dllexport constant
+// FUND-EXP: @_ZTSc = dllexport constant
// FUND-EXP: @_ZTIPc = dllexport constant
-// FUND-EXP: @_ZTSPKc = dllexport constant
+// FUND-EXP: @_ZTSPc = dllexport constant
// FUND-EXP: @_ZTIPKc = dllexport constant
+// FUND-EXP: @_ZTSPKc = dllexport constant
// unsigned char
-// FUND-DEF: @_ZTSh = constant
// FUND-DEF: @_ZTIh = constant
-// FUND-DEF: @_ZTSPh = constant
+// FUND-DEF: @_ZTSh = constant
// FUND-DEF: @_ZTIPh = constant
-// FUND-DEF: @_ZTSPKh = constant
+// FUND-DEF: @_ZTSPh = constant
// FUND-DEF: @_ZTIPKh = constant
-// FUND-HID: @_ZTSh = hidden constant
+// FUND-DEF: @_ZTSPKh = constant
// FUND-HID: @_ZTIh = hidden constant
-// FUND-HID: @_ZTSPh = hidden constant
+// FUND-HID: @_ZTSh = hidden constant
// FUND-HID: @_ZTIPh = hidden constant
-// FUND-HID: @_ZTSPKh = hidden constant
+// FUND-HID: @_ZTSPh = hidden constant
// FUND-HID: @_ZTIPKh = hidden constant
-// FUND-EXP: @_ZTSh = dllexport constant
+// FUND-HID: @_ZTSPKh = hidden constant
// FUND-EXP: @_ZTIh = dllexport constant
-// FUND-EXP: @_ZTSPh = dllexport constant
+// FUND-EXP: @_ZTSh = dllexport constant
// FUND-EXP: @_ZTIPh = dllexport constant
-// FUND-EXP: @_ZTSPKh = dllexport constant
+// FUND-EXP: @_ZTSPh = dllexport constant
// FUND-EXP: @_ZTIPKh = dllexport constant
+// FUND-EXP: @_ZTSPKh = dllexport constant
// signed char
-// FUND-DEF: @_ZTSa = constant
// FUND-DEF: @_ZTIa = constant
-// FUND-DEF: @_ZTSPa = constant
+// FUND-DEF: @_ZTSa = constant
// FUND-DEF: @_ZTIPa = constant
-// FUND-DEF: @_ZTSPKa = constant
+// FUND-DEF: @_ZTSPa = constant
// FUND-DEF: @_ZTIPKa = constant
-// FUND-HID: @_ZTSa = hidden constant
+// FUND-DEF: @_ZTSPKa = constant
// FUND-HID: @_ZTIa = hidden constant
-// FUND-HID: @_ZTSPa = hidden constant
+// FUND-HID: @_ZTSa = hidden constant
// FUND-HID: @_ZTIPa = hidden constant
-// FUND-HID: @_ZTSPKa = hidden constant
+// FUND-HID: @_ZTSPa = hidden constant
// FUND-HID: @_ZTIPKa = hidden constant
-// FUND-EXP: @_ZTSa = dllexport constant
+// FUND-HID: @_ZTSPKa = hidden constant
// FUND-EXP: @_ZTIa = dllexport constant
-// FUND-EXP: @_ZTSPa = dllexport constant
+// FUND-EXP: @_ZTSa = dllexport constant
// FUND-EXP: @_ZTIPa = dllexport constant
-// FUND-EXP: @_ZTSPKa = dllexport constant
+// FUND-EXP: @_ZTSPa = dllexport constant
// FUND-EXP: @_ZTIPKa = dllexport constant
+// FUND-EXP: @_ZTSPKa = dllexport constant
// short
-// FUND-DEF: @_ZTSs = constant
// FUND-DEF: @_ZTIs = constant
-// FUND-DEF: @_ZTSPs = constant
+// FUND-DEF: @_ZTSs = constant
// FUND-DEF: @_ZTIPs = constant
-// FUND-DEF: @_ZTSPKs = constant
+// FUND-DEF: @_ZTSPs = constant
// FUND-DEF: @_ZTIPKs = constant
-// FUND-HID: @_ZTSs = hidden constant
+// FUND-DEF: @_ZTSPKs = constant
// FUND-HID: @_ZTIs = hidden constant
-// FUND-HID: @_ZTSPs = hidden constant
+// FUND-HID: @_ZTSs = hidden constant
// FUND-HID: @_ZTIPs = hidden constant
-// FUND-HID: @_ZTSPKs = hidden constant
+// FUND-HID: @_ZTSPs = hidden constant
// FUND-HID: @_ZTIPKs = hidden constant
-// FUND-EXP: @_ZTSs = dllexport constant
+// FUND-HID: @_ZTSPKs = hidden constant
// FUND-EXP: @_ZTIs = dllexport constant
-// FUND-EXP: @_ZTSPs = dllexport constant
+// FUND-EXP: @_ZTSs = dllexport constant
// FUND-EXP: @_ZTIPs = dllexport constant
-// FUND-EXP: @_ZTSPKs = dllexport constant
+// FUND-EXP: @_ZTSPs = dllexport constant
// FUND-EXP: @_ZTIPKs = dllexport constant
+// FUND-EXP: @_ZTSPKs = dllexport constant
// unsigned short
-// FUND-DEF: @_ZTSt = constant
// FUND-DEF: @_ZTIt = constant
-// FUND-DEF: @_ZTSPt = constant
+// FUND-DEF: @_ZTSt = constant
// FUND-DEF: @_ZTIPt = constant
-// FUND-DEF: @_ZTSPKt = constant
+// FUND-DEF: @_ZTSPt = constant
// FUND-DEF: @_ZTIPKt = constant
-// FUND-HID: @_ZTSt = hidden constant
+// FUND-DEF: @_ZTSPKt = constant
// FUND-HID: @_ZTIt = hidden constant
-// FUND-HID: @_ZTSPt = hidden constant
+// FUND-HID: @_ZTSt = hidden constant
// FUND-HID: @_ZTIPt = hidden constant
-// FUND-HID: @_ZTSPKt = hidden constant
+// FUND-HID: @_ZTSPt = hidden constant
// FUND-HID: @_ZTIPKt = hidden constant
-// FUND-EXP: @_ZTSt = dllexport constant
+// FUND-HID: @_ZTSPKt = hidden constant
// FUND-EXP: @_ZTIt = dllexport constant
-// FUND-EXP: @_ZTSPt = dllexport constant
+// FUND-EXP: @_ZTSt = dllexport constant
// FUND-EXP: @_ZTIPt = dllexport constant
-// FUND-EXP: @_ZTSPKt = dllexport constant
+// FUND-EXP: @_ZTSPt = dllexport constant
// FUND-EXP: @_ZTIPKt = dllexport constant
+// FUND-EXP: @_ZTSPKt = dllexport constant
// int
-// FUND-DEF: @_ZTSi = constant
// FUND-DEF: @_ZTIi = constant
-// FUND-DEF: @_ZTSPi = constant
+// FUND-DEF: @_ZTSi = constant
// FUND-DEF: @_ZTIPi = constant
-// FUND-DEF: @_ZTSPKi = constant
+// FUND-DEF: @_ZTSPi = constant
// FUND-DEF: @_ZTIPKi = constant
-// FUND-HID: @_ZTSi = hidden constant
+// FUND-DEF: @_ZTSPKi = constant
// FUND-HID: @_ZTIi = hidden constant
-// FUND-HID: @_ZTSPi = hidden constant
+// FUND-HID: @_ZTSi = hidden constant
// FUND-HID: @_ZTIPi = hidden constant
-// FUND-HID: @_ZTSPKi = hidden constant
+// FUND-HID: @_ZTSPi = hidden constant
// FUND-HID: @_ZTIPKi = hidden constant
-// FUND-EXP: @_ZTSi = dllexport constant
+// FUND-HID: @_ZTSPKi = hidden constant
// FUND-EXP: @_ZTIi = dllexport constant
-// FUND-EXP: @_ZTSPi = dllexport constant
+// FUND-EXP: @_ZTSi = dllexport constant
// FUND-EXP: @_ZTIPi = dllexport constant
-// FUND-EXP: @_ZTSPKi = dllexport constant
+// FUND-EXP: @_ZTSPi = dllexport constant
// FUND-EXP: @_ZTIPKi = dllexport constant
+// FUND-EXP: @_ZTSPKi = dllexport constant
// unsigned int
-// FUND-DEF: @_ZTSj = constant
// FUND-DEF: @_ZTIj = constant
-// FUND-DEF: @_ZTSPj = constant
+// FUND-DEF: @_ZTSj = constant
// FUND-DEF: @_ZTIPj = constant
-// FUND-DEF: @_ZTSPKj = constant
+// FUND-DEF: @_ZTSPj = constant
// FUND-DEF: @_ZTIPKj = constant
-// FUND-HID: @_ZTSj = hidden constant
+// FUND-DEF: @_ZTSPKj = constant
// FUND-HID: @_ZTIj = hidden constant
-// FUND-HID: @_ZTSPj = hidden constant
+// FUND-HID: @_ZTSj = hidden constant
// FUND-HID: @_ZTIPj = hidden constant
-// FUND-HID: @_ZTSPKj = hidden constant
+// FUND-HID: @_ZTSPj = hidden constant
// FUND-HID: @_ZTIPKj = hidden constant
-// FUND-EXP: @_ZTSj = dllexport constant
+// FUND-HID: @_ZTSPKj = hidden constant
// FUND-EXP: @_ZTIj = dllexport constant
-// FUND-EXP: @_ZTSPj = dllexport constant
+// FUND-EXP: @_ZTSj = dllexport constant
// FUND-EXP: @_ZTIPj = dllexport constant
-// FUND-EXP: @_ZTSPKj = dllexport constant
+// FUND-EXP: @_ZTSPj = dllexport constant
// FUND-EXP: @_ZTIPKj = dllexport constant
+// FUND-EXP: @_ZTSPKj = dllexport constant
// long
-// FUND-DEF: @_ZTSl = constant
// FUND-DEF: @_ZTIl = constant
-// FUND-DEF: @_ZTSPl = constant
+// FUND-DEF: @_ZTSl = constant
// FUND-DEF: @_ZTIPl = constant
-// FUND-DEF: @_ZTSPKl = constant
+// FUND-DEF: @_ZTSPl = constant
// FUND-DEF: @_ZTIPKl = constant
-// FUND-HID: @_ZTSl = hidden constant
+// FUND-DEF: @_ZTSPKl = constant
// FUND-HID: @_ZTIl = hidden constant
-// FUND-HID: @_ZTSPl = hidden constant
+// FUND-HID: @_ZTSl = hidden constant
// FUND-HID: @_ZTIPl = hidden constant
-// FUND-HID: @_ZTSPKl = hidden constant
+// FUND-HID: @_ZTSPl = hidden constant
// FUND-HID: @_ZTIPKl = hidden constant
-// FUND-EXP: @_ZTSl = dllexport constant
+// FUND-HID: @_ZTSPKl = hidden constant
// FUND-EXP: @_ZTIl = dllexport constant
-// FUND-EXP: @_ZTSPl = dllexport constant
+// FUND-EXP: @_ZTSl = dllexport constant
// FUND-EXP: @_ZTIPl = dllexport constant
-// FUND-EXP: @_ZTSPKl = dllexport constant
+// FUND-EXP: @_ZTSPl = dllexport constant
// FUND-EXP: @_ZTIPKl = dllexport constant
+// FUND-EXP: @_ZTSPKl = dllexport constant
// unsigned long
-// FUND-DEF: @_ZTSm = constant
// FUND-DEF: @_ZTIm = constant
-// FUND-DEF: @_ZTSPm = constant
+// FUND-DEF: @_ZTSm = constant
// FUND-DEF: @_ZTIPm = constant
-// FUND-DEF: @_ZTSPKm = constant
+// FUND-DEF: @_ZTSPm = constant
// FUND-DEF: @_ZTIPKm = constant
-// FUND-HID: @_ZTSm = hidden constant
+// FUND-DEF: @_ZTSPKm = constant
// FUND-HID: @_ZTIm = hidden constant
-// FUND-HID: @_ZTSPm = hidden constant
+// FUND-HID: @_ZTSm = hidden constant
// FUND-HID: @_ZTIPm = hidden constant
-// FUND-HID: @_ZTSPKm = hidden constant
+// FUND-HID: @_ZTSPm = hidden constant
// FUND-HID: @_ZTIPKm = hidden constant
-// FUND-EXP: @_ZTSm = dllexport constant
+// FUND-HID: @_ZTSPKm = hidden constant
// FUND-EXP: @_ZTIm = dllexport constant
-// FUND-EXP: @_ZTSPm = dllexport constant
+// FUND-EXP: @_ZTSm = dllexport constant
// FUND-EXP: @_ZTIPm = dllexport constant
-// FUND-EXP: @_ZTSPKm = dllexport constant
+// FUND-EXP: @_ZTSPm = dllexport constant
// FUND-EXP: @_ZTIPKm = dllexport constant
+// FUND-EXP: @_ZTSPKm = dllexport constant
// long long
-// FUND-DEF: @_ZTSx = constant
// FUND-DEF: @_ZTIx = constant
-// FUND-DEF: @_ZTSPx = constant
+// FUND-DEF: @_ZTSx = constant
// FUND-DEF: @_ZTIPx = constant
-// FUND-DEF: @_ZTSPKx = constant
+// FUND-DEF: @_ZTSPx = constant
// FUND-DEF: @_ZTIPKx = constant
-// FUND-HID: @_ZTSx = hidden constant
+// FUND-DEF: @_ZTSPKx = constant
// FUND-HID: @_ZTIx = hidden constant
-// FUND-HID: @_ZTSPx = hidden constant
+// FUND-HID: @_ZTSx = hidden constant
// FUND-HID: @_ZTIPx = hidden constant
-// FUND-HID: @_ZTSPKx = hidden constant
+// FUND-HID: @_ZTSPx = hidden constant
// FUND-HID: @_ZTIPKx = hidden constant
-// FUND-EXP: @_ZTSx = dllexport constant
+// FUND-HID: @_ZTSPKx = hidden constant
// FUND-EXP: @_ZTIx = dllexport constant
-// FUND-EXP: @_ZTSPx = dllexport constant
+// FUND-EXP: @_ZTSx = dllexport constant
// FUND-EXP: @_ZTIPx = dllexport constant
-// FUND-EXP: @_ZTSPKx = dllexport constant
+// FUND-EXP: @_ZTSPx = dllexport constant
// FUND-EXP: @_ZTIPKx = dllexport constant
+// FUND-EXP: @_ZTSPKx = dllexport constant
// unsigned long long
-// FUND-DEF: @_ZTSy = constant
// FUND-DEF: @_ZTIy = constant
-// FUND-DEF: @_ZTSPy = constant
+// FUND-DEF: @_ZTSy = constant
// FUND-DEF: @_ZTIPy = constant
-// FUND-DEF: @_ZTSPKy = constant
+// FUND-DEF: @_ZTSPy = constant
// FUND-DEF: @_ZTIPKy = constant
-// FUND-HID: @_ZTSy = hidden constant
+// FUND-DEF: @_ZTSPKy = constant
// FUND-HID: @_ZTIy = hidden constant
-// FUND-HID: @_ZTSPy = hidden constant
+// FUND-HID: @_ZTSy = hidden constant
// FUND-HID: @_ZTIPy = hidden constant
-// FUND-HID: @_ZTSPKy = hidden constant
+// FUND-HID: @_ZTSPy = hidden constant
// FUND-HID: @_ZTIPKy = hidden constant
-// FUND-EXP: @_ZTSy = dllexport constant
+// FUND-HID: @_ZTSPKy = hidden constant
// FUND-EXP: @_ZTIy = dllexport constant
-// FUND-EXP: @_ZTSPy = dllexport constant
+// FUND-EXP: @_ZTSy = dllexport constant
// FUND-EXP: @_ZTIPy = dllexport constant
-// FUND-EXP: @_ZTSPKy = dllexport constant
+// FUND-EXP: @_ZTSPy = dllexport constant
// FUND-EXP: @_ZTIPKy = dllexport constant
+// FUND-EXP: @_ZTSPKy = dllexport constant
// __int128
-// FUND-DEF: @_ZTSn = constant
// FUND-DEF: @_ZTIn = constant
-// FUND-DEF: @_ZTSPn = constant
+// FUND-DEF: @_ZTSn = constant
// FUND-DEF: @_ZTIPn = constant
-// FUND-DEF: @_ZTSPKn = constant
+// FUND-DEF: @_ZTSPn = constant
// FUND-DEF: @_ZTIPKn = constant
-// FUND-HID: @_ZTSn = hidden constant
+// FUND-DEF: @_ZTSPKn = constant
// FUND-HID: @_ZTIn = hidden constant
-// FUND-HID: @_ZTSPn = hidden constant
+// FUND-HID: @_ZTSn = hidden constant
// FUND-HID: @_ZTIPn = hidden constant
-// FUND-HID: @_ZTSPKn = hidden constant
+// FUND-HID: @_ZTSPn = hidden constant
// FUND-HID: @_ZTIPKn = hidden constant
-// FUND-EXP: @_ZTSn = dllexport constant
+// FUND-HID: @_ZTSPKn = hidden constant
// FUND-EXP: @_ZTIn = dllexport constant
-// FUND-EXP: @_ZTSPn = dllexport constant
+// FUND-EXP: @_ZTSn = dllexport constant
// FUND-EXP: @_ZTIPn = dllexport constant
-// FUND-EXP: @_ZTSPKn = dllexport constant
+// FUND-EXP: @_ZTSPn = dllexport constant
// FUND-EXP: @_ZTIPKn = dllexport constant
+// FUND-EXP: @_ZTSPKn = dllexport constant
// unsigned __int128
-// FUND-DEF: @_ZTSo = constant
// FUND-DEF: @_ZTIo = constant
-// FUND-DEF: @_ZTSPo = constant
+// FUND-DEF: @_ZTSo = constant
// FUND-DEF: @_ZTIPo = constant
-// FUND-DEF: @_ZTSPKo = constant
+// FUND-DEF: @_ZTSPo = constant
// FUND-DEF: @_ZTIPKo = constant
-// FUND-HID: @_ZTSo = hidden constant
+// FUND-DEF: @_ZTSPKo = constant
// FUND-HID: @_ZTIo = hidden constant
-// FUND-HID: @_ZTSPo = hidden constant
+// FUND-HID: @_ZTSo = hidden constant
// FUND-HID: @_ZTIPo = hidden constant
-// FUND-HID: @_ZTSPKo = hidden constant
+// FUND-HID: @_ZTSPo = hidden constant
// FUND-HID: @_ZTIPKo = hidden constant
-// FUND-EXP: @_ZTSo = dllexport constant
+// FUND-HID: @_ZTSPKo = hidden constant
// FUND-EXP: @_ZTIo = dllexport constant
-// FUND-EXP: @_ZTSPo = dllexport constant
+// FUND-EXP: @_ZTSo = dllexport constant
// FUND-EXP: @_ZTIPo = dllexport constant
-// FUND-EXP: @_ZTSPKo = dllexport constant
+// FUND-EXP: @_ZTSPo = dllexport constant
// FUND-EXP: @_ZTIPKo = dllexport constant
+// FUND-EXP: @_ZTSPKo = dllexport constant
// half
-// FUND-DEF: @_ZTSDh = constant
// FUND-DEF: @_ZTIDh = constant
-// FUND-DEF: @_ZTSPDh = constant
+// FUND-DEF: @_ZTSDh = constant
// FUND-DEF: @_ZTIPDh = constant
-// FUND-DEF: @_ZTSPKDh = constant
+// FUND-DEF: @_ZTSPDh = constant
// FUND-DEF: @_ZTIPKDh = constant
-// FUND-HID: @_ZTSDh = hidden constant
+// FUND-DEF: @_ZTSPKDh = constant
// FUND-HID: @_ZTIDh = hidden constant
-// FUND-HID: @_ZTSPDh = hidden constant
+// FUND-HID: @_ZTSDh = hidden constant
// FUND-HID: @_ZTIPDh = hidden constant
-// FUND-HID: @_ZTSPKDh = hidden constant
+// FUND-HID: @_ZTSPDh = hidden constant
// FUND-HID: @_ZTIPKDh = hidden constant
-// FUND-EXP: @_ZTSDh = dllexport constant
+// FUND-HID: @_ZTSPKDh = hidden constant
// FUND-EXP: @_ZTIDh = dllexport constant
-// FUND-EXP: @_ZTSPDh = dllexport constant
+// FUND-EXP: @_ZTSDh = dllexport constant
// FUND-EXP: @_ZTIPDh = dllexport constant
-// FUND-EXP: @_ZTSPKDh = dllexport constant
+// FUND-EXP: @_ZTSPDh = dllexport constant
// FUND-EXP: @_ZTIPKDh = dllexport constant
+// FUND-EXP: @_ZTSPKDh = dllexport constant
// float
-// FUND-DEF: @_ZTSf = constant
// FUND-DEF: @_ZTIf = constant
-// FUND-DEF: @_ZTSPf = constant
+// FUND-DEF: @_ZTSf = constant
// FUND-DEF: @_ZTIPf = constant
-// FUND-DEF: @_ZTSPKf = constant
+// FUND-DEF: @_ZTSPf = constant
// FUND-DEF: @_ZTIPKf = constant
-// FUND-HID: @_ZTSf = hidden constant
+// FUND-DEF: @_ZTSPKf = constant
// FUND-HID: @_ZTIf = hidden constant
-// FUND-HID: @_ZTSPf = hidden constant
+// FUND-HID: @_ZTSf = hidden constant
// FUND-HID: @_ZTIPf = hidden constant
-// FUND-HID: @_ZTSPKf = hidden constant
+// FUND-HID: @_ZTSPf = hidden constant
// FUND-HID: @_ZTIPKf = hidden constant
-// FUND-EXP: @_ZTSf = dllexport constant
+// FUND-HID: @_ZTSPKf = hidden constant
// FUND-EXP: @_ZTIf = dllexport constant
-// FUND-EXP: @_ZTSPf = dllexport constant
+// FUND-EXP: @_ZTSf = dllexport constant
// FUND-EXP: @_ZTIPf = dllexport constant
-// FUND-EXP: @_ZTSPKf = dllexport constant
+// FUND-EXP: @_ZTSPf = dllexport constant
// FUND-EXP: @_ZTIPKf = dllexport constant
+// FUND-EXP: @_ZTSPKf = dllexport constant
// double
-// FUND-DEF: @_ZTSd = constant
// FUND-DEF: @_ZTId = constant
-// FUND-DEF: @_ZTSPd = constant
+// FUND-DEF: @_ZTSd = constant
// FUND-DEF: @_ZTIPd = constant
-// FUND-DEF: @_ZTSPKd = constant
+// FUND-DEF: @_ZTSPd = constant
// FUND-DEF: @_ZTIPKd = constant
-// FUND-HID: @_ZTSd = hidden constant
+// FUND-DEF: @_ZTSPKd = constant
// FUND-HID: @_ZTId = hidden constant
-// FUND-HID: @_ZTSPd = hidden constant
+// FUND-HID: @_ZTSd = hidden constant
// FUND-HID: @_ZTIPd = hidden constant
-// FUND-HID: @_ZTSPKd = hidden constant
+// FUND-HID: @_ZTSPd = hidden constant
// FUND-HID: @_ZTIPKd = hidden constant
-// FUND-EXP: @_ZTSd = dllexport constant
+// FUND-HID: @_ZTSPKd = hidden constant
// FUND-EXP: @_ZTId = dllexport constant
-// FUND-EXP: @_ZTSPd = dllexport constant
+// FUND-EXP: @_ZTSd = dllexport constant
// FUND-EXP: @_ZTIPd = dllexport constant
-// FUND-EXP: @_ZTSPKd = dllexport constant
+// FUND-EXP: @_ZTSPd = dllexport constant
// FUND-EXP: @_ZTIPKd = dllexport constant
+// FUND-EXP: @_ZTSPKd = dllexport constant
// long double
-// FUND-DEF: @_ZTSe = constant
// FUND-DEF: @_ZTIe = constant
-// FUND-DEF: @_ZTSPe = constant
+// FUND-DEF: @_ZTSe = constant
// FUND-DEF: @_ZTIPe = constant
-// FUND-DEF: @_ZTSPKe = constant
+// FUND-DEF: @_ZTSPe = constant
// FUND-DEF: @_ZTIPKe = constant
-// FUND-HID: @_ZTSe = hidden constant
+// FUND-DEF: @_ZTSPKe = constant
// FUND-HID: @_ZTIe = hidden constant
-// FUND-HID: @_ZTSPe = hidden constant
+// FUND-HID: @_ZTSe = hidden constant
// FUND-HID: @_ZTIPe = hidden constant
-// FUND-HID: @_ZTSPKe = hidden constant
+// FUND-HID: @_ZTSPe = hidden constant
// FUND-HID: @_ZTIPKe = hidden constant
-// FUND-EXP: @_ZTSe = dllexport constant
+// FUND-HID: @_ZTSPKe = hidden constant
// FUND-EXP: @_ZTIe = dllexport constant
-// FUND-EXP: @_ZTSPe = dllexport constant
+// FUND-EXP: @_ZTSe = dllexport constant
// FUND-EXP: @_ZTIPe = dllexport constant
-// FUND-EXP: @_ZTSPKe = dllexport constant
+// FUND-EXP: @_ZTSPe = dllexport constant
// FUND-EXP: @_ZTIPKe = dllexport constant
+// FUND-EXP: @_ZTSPKe = dllexport constant
// __ieee128
-// FUND-DEF: @_ZTSu9__ieee128 = constant
// FUND-DEF: @_ZTIu9__ieee128 = constant
-// FUND-DEF: @_ZTSPu9__ieee128 = constant
+// FUND-DEF: @_ZTSu9__ieee128 = constant
// FUND-DEF: @_ZTIPu9__ieee128 = constant
-// FUND-DEF: @_ZTSPKu9__ieee128 = constant
+// FUND-DEF: @_ZTSPu9__ieee128 = constant
// FUND-DEF: @_ZTIPKu9__ieee128 = constant
-// FUND-HID: @_ZTSu9__ieee128 = hidden constant
+// FUND-DEF: @_ZTSPKu9__ieee128 = constant
// FUND-HID: @_ZTIu9__ieee128 = hidden constant
-// FUND-HID: @_ZTSPu9__ieee128 = hidden constant
+// FUND-HID: @_ZTSu9__ieee128 = hidden constant
// FUND-HID: @_ZTIPu9__ieee128 = hidden constant
-// FUND-HID: @_ZTSPKu9__ieee128 = hidden constant
+// FUND-HID: @_ZTSPu9__ieee128 = hidden constant
// FUND-HID: @_ZTIPKu9__ieee128 = hidden constant
-// FUND-EXP: @_ZTSu9__ieee128 = dllexport constant
+// FUND-HID: @_ZTSPKu9__ieee128 = hidden constant
// FUND-EXP: @_ZTIu9__ieee128 = dllexport constant
-// FUND-EXP: @_ZTSPu9__ieee128 = dllexport constant
+// FUND-EXP: @_ZTSu9__ieee128 = dllexport constant
// FUND-EXP: @_ZTIPu9__ieee128 = dllexport constant
-// FUND-EXP: @_ZTSPKu9__ieee128 = dllexport constant
+// FUND-EXP: @_ZTSPu9__ieee128 = dllexport constant
// FUND-EXP: @_ZTIPKu9__ieee128 = dllexport constant
+// FUND-EXP: @_ZTSPKu9__ieee128 = dllexport constant
// char8_t
-// FUND-DEF: @_ZTSDu = constant
// FUND-DEF: @_ZTIDu = constant
-// FUND-DEF: @_ZTSPDu = constant
+// FUND-DEF: @_ZTSDu = constant
// FUND-DEF: @_ZTIPDu = constant
-// FUND-DEF: @_ZTSPKDu = constant
+// FUND-DEF: @_ZTSPDu = constant
// FUND-DEF: @_ZTIPKDu = constant
-// FUND-HID: @_ZTSDu = hidden constant
+// FUND-DEF: @_ZTSPKDu = constant
// FUND-HID: @_ZTIDu = hidden constant
-// FUND-HID: @_ZTSPDu = hidden constant
+// FUND-HID: @_ZTSDu = hidden constant
// FUND-HID: @_ZTIPDu = hidden constant
-// FUND-HID: @_ZTSPKDu = hidden constant
+// FUND-HID: @_ZTSPDu = hidden constant
// FUND-HID: @_ZTIPKDu = hidden constant
-// FUND-EXP: @_ZTSDu = dllexport constant
+// FUND-HID: @_ZTSPKDu = hidden constant
// FUND-EXP: @_ZTIDu = dllexport constant
-// FUND-EXP: @_ZTSPDu = dllexport constant
+// FUND-EXP: @_ZTSDu = dllexport constant
// FUND-EXP: @_ZTIPDu = dllexport constant
-// FUND-EXP: @_ZTSPKDu = dllexport constant
+// FUND-EXP: @_ZTSPDu = dllexport constant
// FUND-EXP: @_ZTIPKDu = dllexport constant
+// FUND-EXP: @_ZTSPKDu = dllexport constant
// char16_t
-// FUND-DEF: @_ZTSDs = constant
// FUND-DEF: @_ZTIDs = constant
-// FUND-DEF: @_ZTSPDs = constant
+// FUND-DEF: @_ZTSDs = constant
// FUND-DEF: @_ZTIPDs = constant
-// FUND-DEF: @_ZTSPKDs = constant
+// FUND-DEF: @_ZTSPDs = constant
// FUND-DEF: @_ZTIPKDs = constant
-// FUND-HID: @_ZTSDs = hidden constant
+// FUND-DEF: @_ZTSPKDs = constant
// FUND-HID: @_ZTIDs = hidden constant
-// FUND-HID: @_ZTSPDs = hidden constant
+// FUND-HID: @_ZTSDs = hidden constant
// FUND-HID: @_ZTIPDs = hidden constant
-// FUND-HID: @_ZTSPKDs = hidden constant
+// FUND-HID: @_ZTSPDs = hidden constant
// FUND-HID: @_ZTIPKDs = hidden constant
-// FUND-EXP: @_ZTSDs = dllexport constant
+// FUND-HID: @_ZTSPKDs = hidden constant
// FUND-EXP: @_ZTIDs = dllexport constant
-// FUND-EXP: @_ZTSPDs = dllexport constant
+// FUND-EXP: @_ZTSDs = dllexport constant
// FUND-EXP: @_ZTIPDs = dllexport constant
-// FUND-EXP: @_ZTSPKDs = dllexport constant
+// FUND-EXP: @_ZTSPDs = dllexport constant
// FUND-EXP: @_ZTIPKDs = dllexport constant
+// FUND-EXP: @_ZTSPKDs = dllexport constant
// char32_t
-// FUND-DEF: @_ZTSDi = constant
// FUND-DEF: @_ZTIDi = constant
-// FUND-DEF: @_ZTSPDi = constant
+// FUND-DEF: @_ZTSDi = constant
// FUND-DEF: @_ZTIPDi = constant
-// FUND-DEF: @_ZTSPKDi = constant
+// FUND-DEF: @_ZTSPDi = constant
// FUND-DEF: @_ZTIPKDi = constant
-// FUND-HID: @_ZTSDi = hidden constant
+// FUND-DEF: @_ZTSPKDi = constant
// FUND-HID: @_ZTIDi = hidden constant
-// FUND-HID: @_ZTSPDi = hidden constant
+// FUND-HID: @_ZTSDi = hidden constant
// FUND-HID: @_ZTIPDi = hidden constant
-// FUND-HID: @_ZTSPKDi = hidden constant
+// FUND-HID: @_ZTSPDi = hidden constant
// FUND-HID: @_ZTIPKDi = hidden constant
-// FUND-EXP: @_ZTSDi = dllexport constant
+// FUND-HID: @_ZTSPKDi = hidden constant
// FUND-EXP: @_ZTIDi = dllexport constant
-// FUND-EXP: @_ZTSPDi = dllexport constant
+// FUND-EXP: @_ZTSDi = dllexport constant
// FUND-EXP: @_ZTIPDi = dllexport constant
-// FUND-EXP: @_ZTSPKDi = dllexport constant
+// FUND-EXP: @_ZTSPDi = dllexport constant
// FUND-EXP: @_ZTIPKDi = dllexport constant
+// FUND-EXP: @_ZTSPKDi = dllexport constant
diff --git a/clang/test/CodeGenCXX/modules-vtable.cppm b/clang/test/CodeGenCXX/modules-vtable.cppm
index 5cc3504..6589b9f 100644
--- a/clang/test/CodeGenCXX/modules-vtable.cppm
+++ b/clang/test/CodeGenCXX/modules-vtable.cppm
@@ -40,13 +40,13 @@ inline
Base::~Base() {}
// CHECK: @_ZTVW3Mod4Base = unnamed_addr constant
-// CHECK: @_ZTSW3Mod4Base = constant
// CHECK: @_ZTIW3Mod4Base = constant
+// CHECK: @_ZTSW3Mod4Base = constant
// With the new Itanium C++ ABI, the linkage of vtables in modules don't need to be linkonce ODR.
// CHECK-INLINE: @_ZTVW3Mod4Base = {{.*}}unnamed_addr constant
-// CHECK-INLINE: @_ZTSW3Mod4Base = {{.*}}constant
// CHECK-INLINE: @_ZTIW3Mod4Base = {{.*}}constant
+// CHECK-INLINE: @_ZTSW3Mod4Base = {{.*}}constant
module :private;
int private_use() {
@@ -61,12 +61,12 @@ int use() {
return 43;
}
-// CHECK-NOT: @_ZTSW3Mod4Base
// CHECK-NOT: @_ZTIW3Mod4Base
+// CHECK-NOT: @_ZTSW3Mod4Base
// CHECK: @_ZTVW3Mod4Base = external
-// CHECK-INLINE-NOT: @_ZTSW3Mod4Base
// CHECK-INLINE-NOT: @_ZTIW3Mod4Base
+// CHECK-INLINE-NOT: @_ZTSW3Mod4Base
// CHECK-INLINE: @_ZTVW3Mod4Base = external
// Check the case that the declaration of the key function comes from another
@@ -86,8 +86,8 @@ int a_use() {
}
// CHECK: @_ZTVW1M1C = unnamed_addr constant
-// CHECK: @_ZTSW1M1C = constant
// CHECK: @_ZTIW1M1C = constant
+// CHECK: @_ZTSW1M1C = constant
//--- M-B.cppm
export module M:B;
@@ -101,5 +101,5 @@ int b_use() {
}
// CHECK: @_ZTVW1M1C = external
-// CHECK-NOT: @_ZTSW1M1C
// CHECK-NOT: @_ZTIW1M1C
+// CHECK-NOT: @_ZTSW1M1C
diff --git a/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp b/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp
index 2b633add..b50e090 100644
--- a/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp
+++ b/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp
@@ -5,12 +5,12 @@
struct A { int a; };
+// DARWIN: @_ZTI1A = linkonce_odr hidden constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr inttoptr (i64 add (i64 ptrtoint (ptr @_ZTS1A to i64), i64 -9223372036854775808) to ptr) }
// DARWIN: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// DARWIN: @_ZTS1A = linkonce_odr hidden constant [3 x i8] c"1A\00"
-// DARWIN: @_ZTI1A = linkonce_odr hidden constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr inttoptr (i64 add (i64 ptrtoint (ptr @_ZTS1A to i64), i64 -9223372036854775808) to ptr) }
+// ELF: @_ZTI1A = linkonce_odr constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr @_ZTS1A }
// ELF: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// ELF: @_ZTS1A = linkonce_odr constant [3 x i8] c"1A\00"
-// ELF: @_ZTI1A = linkonce_odr constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr @_ZTS1A }
auto ATI = typeid(A);
diff --git a/clang/test/CodeGenCXX/ptrauth-type-info-vtable.cpp b/clang/test/CodeGenCXX/ptrauth-type-info-vtable.cpp
index 174aeda..f4396e4 100644
--- a/clang/test/CodeGenCXX/ptrauth-type-info-vtable.cpp
+++ b/clang/test/CodeGenCXX/ptrauth-type-info-vtable.cpp
@@ -60,12 +60,13 @@ static_assert(__has_feature(ptrauth_type_info_vtable_pointer_discrimination) ==
extern "C" int disc_std_type_info = __builtin_ptrauth_string_discriminator("_ZTVSt9type_info");
// CHECK: @_ZTV10TestStruct = unnamed_addr constant { [4 x ptr] } { [4 x ptr] [ptr null, ptr @_ZTI10TestStruct, ptr ptrauth (ptr @_ZN10TestStructD1Ev, i32 0, i64 52216, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV10TestStruct, i32 0, i32 0, i32 2)), ptr ptrauth (ptr @_ZN10TestStructD0Ev, i32 0, i64 39671, ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV10TestStruct, i32 0, i32 0, i32 3))] }, align 8
-// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
-// CHECK: @_ZTS10TestStruct = constant [13 x i8] c"10TestStruct\00", align 1
// NODISC: @_ZTI10TestStruct = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr @_ZTS10TestStruct }, align 8
-// DISC: @_ZTI10TestStruct = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2, i64 [[STDTYPEINFO_DISC]]), ptr @_ZTS10TestStruct }, align 8
+// DISC: @_ZTI10TestStruct = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2, i64 [[STDTYPEINFO_DISC]], ptr @_ZTI10TestStruct), ptr @_ZTS10TestStruct }, align 8
+
+// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
+// CHECK: @_ZTS10TestStruct = constant [13 x i8] c"10TestStruct\00", align 1
struct TestStruct {
virtual ~TestStruct();
diff --git a/clang/test/CodeGenCXX/ptrauth-vtable-virtual-inheritance-thunk.cpp b/clang/test/CodeGenCXX/ptrauth-vtable-virtual-inheritance-thunk.cpp
index 031bb48..b5c15a2 100644
--- a/clang/test/CodeGenCXX/ptrauth-vtable-virtual-inheritance-thunk.cpp
+++ b/clang/test/CodeGenCXX/ptrauth-vtable-virtual-inheritance-thunk.cpp
@@ -94,30 +94,30 @@
// CHECK-SAME: ptr ptrauth (ptr @_ZN1AD1Ev, i32 0, i64 2043, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 5)),
// CHECK-SAME: ptr ptrauth (ptr @_ZN1AD0Ev, i32 0, i64 63674, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 6))] }, align 8
+// CHECK: @_ZTI1A = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr @_ZTS1A }, align 8
+
// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1A = constant [3 x i8] c"1A\00", align 1
-// CHECK: @_ZTI1A = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr @_ZTS1A }, align 8
+// CHECK: @_ZTI1C = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), i32 2), ptr @_ZTS1C, i32 0, i32 1, ptr @_ZTI1B, i64 -6141 }, align 8
// CHECK: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global [0 x ptr]
// CHECK: @_ZTS1C = constant [3 x i8] c"1C\00", align 1
+// DARWIN: @_ZTI1B = linkonce_odr hidden constant { ptr, ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), i32 2), ptr inttoptr (i64 add (i64 ptrtoint (ptr @_ZTS1B to i64), i64 -9223372036854775808) to ptr), ptr @_ZTI1A }, align 8
+// ELF: @_ZTI1B = linkonce_odr constant { ptr, ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), i32 2), ptr @_ZTS1B, ptr @_ZTI1A }, comdat, align 8
+
// CHECK: @_ZTVN10__cxxabiv120__si_class_type_infoE = external global [0 x ptr]
// DARWIN: @_ZTS1B = linkonce_odr hidden constant [3 x i8] c"1B\00", align 1
// ELF: @_ZTS1B = linkonce_odr constant [3 x i8] c"1B\00", comdat, align 1
-// DARWIN: @_ZTI1B = linkonce_odr hidden constant { ptr, ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), i32 2), ptr inttoptr (i64 add (i64 ptrtoint (ptr @_ZTS1B to i64), i64 -9223372036854775808) to ptr), ptr @_ZTI1A }, align 8
-// ELF: @_ZTI1B = linkonce_odr constant { ptr, ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), i32 2), ptr @_ZTS1B, ptr @_ZTI1A }, comdat, align 8
-
-// CHECK: @_ZTI1C = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), i32 2), ptr @_ZTS1C, i32 0, i32 1, ptr @_ZTI1B, i64 -6141 }, align 8
+// CHECK: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), i32 2), ptr @_ZTS1D, i32 0, i32 1, ptr @_ZTI1B, i64 -6141 }, align 8
// CHECK: @_ZTS1D = constant [3 x i8] c"1D\00", align 1
-// CHECK: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), i32 2), ptr @_ZTS1D, i32 0, i32 1, ptr @_ZTI1B, i64 -6141 }, align 8
-
// CHECK: @_ZTV1E = unnamed_addr constant { [7 x ptr] } { [7 x ptr] [ptr null, ptr @_ZTI1E,
// CHECK-SAME: ptr ptrauth (ptr @_ZN1E1fEv, i32 0, i64 28408, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1E, i32 0, i32 0, i32 2)),
// CHECK-SAME: ptr ptrauth (ptr @_ZN1E1gEv, i32 0, i64 22926, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1E, i32 0, i32 0, i32 3)),
@@ -125,10 +125,10 @@
// CHECK-SAME: ptr ptrauth (ptr @_ZN1ED1Ev, i32 0, i64 5817, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1E, i32 0, i32 0, i32 5)),
// CHECK-SAME: ptr ptrauth (ptr @_ZN1ED0Ev, i32 0, i64 26464, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1E, i32 0, i32 0, i32 6))] }, align 8
-// CHECK: @_ZTS1E = constant [3 x i8] c"1E\00", align 1
-
// CHECK: @_ZTI1E = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2), ptr @_ZTS1E }, align 8
+// CHECK: @_ZTS1E = constant [3 x i8] c"1E\00", align 1
+
// CHECK: @_ZTC1F0_1C = unnamed_addr constant { [5 x ptr], [11 x ptr] } { [5 x ptr] [ptr inttoptr (i64 16 to ptr), ptr null, ptr @_ZTI1C,
// CHECK-SAME: ptr ptrauth (ptr @_ZN1CD1Ev, i32 0, i64 31214, ptr getelementptr inbounds ({ [5 x ptr], [11 x ptr] }, ptr @_ZTC1F0_1C, i32 0, i32 0, i32 3)),
// CHECK-SAME: ptr ptrauth (ptr @_ZN1CD0Ev, i32 0, i64 8507, ptr getelementptr inbounds ({ [5 x ptr], [11 x ptr] }, ptr @_ZTC1F0_1C, i32 0, i32 0, i32 4))], [11 x ptr] [ptr inttoptr (i64 -16 to ptr), ptr null, ptr null, ptr null, ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1C,
@@ -149,10 +149,10 @@
// CHECK-SAME: ptr ptrauth (ptr @_ZTv0_n48_N1DD1Ev, i32 0, i64 2043, ptr getelementptr inbounds ({ [7 x ptr], [11 x ptr] }, ptr @_ZTC1F8_1D, i32 0, i32 1, i32 9)),
// CHECK-SAME: ptr ptrauth (ptr @_ZTv0_n48_N1DD0Ev, i32 0, i64 63674, ptr getelementptr inbounds ({ [7 x ptr], [11 x ptr] }, ptr @_ZTC1F8_1D, i32 0, i32 1, i32 10))] }, align 8
-// CHECK: @_ZTS1F = constant [3 x i8] c"1F\00", align 1
-
// CHECK: @_ZTI1F = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64, ptr, i64 } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), i32 2), ptr @_ZTS1F, i32 3, i32 3, ptr @_ZTI1C, i64 2, ptr @_ZTI1D, i64 2050, ptr @_ZTI1E, i64 -8189 }, align 8
+// CHECK: @_ZTS1F = constant [3 x i8] c"1F\00", align 1
+
// CHECK: @_ZTC1G0_1C = unnamed_addr constant { [5 x ptr], [11 x ptr] } { [5 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C,
// CHECK-SAME: ptr ptrauth (ptr @_ZN1CD1Ev, i32 0, i64 31214, ptr getelementptr inbounds ({ [5 x ptr], [11 x ptr] }, ptr @_ZTC1G0_1C, i32 0, i32 0, i32 3)),
// CHECK-SAME: ptr ptrauth (ptr @_ZN1CD0Ev, i32 0, i64 8507, ptr getelementptr inbounds ({ [5 x ptr], [11 x ptr] }, ptr @_ZTC1G0_1C, i32 0, i32 0, i32 4))], [11 x ptr] [ptr inttoptr (i64 -24 to ptr), ptr null, ptr null, ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C,
@@ -173,10 +173,10 @@
// CHECK-SAME: ptr ptrauth (ptr @_ZTv0_n48_N1DD1Ev, i32 0, i64 2043, ptr getelementptr inbounds ({ [7 x ptr], [11 x ptr] }, ptr @_ZTC1G8_1D, i32 0, i32 1, i32 9)),
// CHECK-SAME: ptr ptrauth (ptr @_ZTv0_n48_N1DD0Ev, i32 0, i64 63674, ptr getelementptr inbounds ({ [7 x ptr], [11 x ptr] }, ptr @_ZTC1G8_1D, i32 0, i32 1, i32 10))] }, align 8
-// CHECK: @_ZTS1G = constant [3 x i8] c"1G\00", align 1
-
// CHECK: @_ZTI1G = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64, ptr, i64 } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), i32 2), ptr @_ZTS1G, i32 3, i32 3, ptr @_ZTI1E, i64 -8189, ptr @_ZTI1C, i64 2, ptr @_ZTI1D, i64 2050 }, align 8
+// CHECK: @_ZTS1G = constant [3 x i8] c"1G\00", align 1
+
// CHECK: @_ZTV1B = linkonce_odr unnamed_addr constant { [7 x ptr] } { [7 x ptr] [ptr null, ptr @_ZTI1B,
// CHECK-SAME: ptr ptrauth (ptr @_ZN1A1fEv, i32 0, i64 55636, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 2)),
// CHECK-SAME: ptr ptrauth (ptr @_ZN1A1gEv, i32 0, i64 19402, ptr getelementptr inbounds ({ [7 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3)),
diff --git a/clang/test/CodeGenCXX/rtti-linkage.cpp b/clang/test/CodeGenCXX/rtti-linkage.cpp
index ca50a1b..03e7cde 100644
--- a/clang/test/CodeGenCXX/rtti-linkage.cpp
+++ b/clang/test/CodeGenCXX/rtti-linkage.cpp
@@ -3,73 +3,73 @@
#include <typeinfo>
+// CHECK-BOTH: _ZTIP1C = internal constant
// CHECK-BOTH: _ZTSP1C = internal constant
-// CHECK-BOTH: _ZTS1C = internal constant
// CHECK-BOTH: _ZTI1C = internal constant
-// CHECK-BOTH: _ZTIP1C = internal constant
-// CHECK-BOTH: _ZTSPP1C = internal constant
+// CHECK-BOTH: _ZTS1C = internal constant
// CHECK-BOTH: _ZTIPP1C = internal constant
-// CHECK-BOTH: _ZTSM1Ci = internal constant
+// CHECK-BOTH: _ZTSPP1C = internal constant
// CHECK-BOTH: _ZTIM1Ci = internal constant
-// CHECK-BOTH: _ZTSPM1Ci = internal constant
+// CHECK-BOTH: _ZTSM1Ci = internal constant
// CHECK-BOTH: _ZTIPM1Ci = internal constant
-// CHECK-BOTH: _ZTSM1CS_ = internal constant
+// CHECK-BOTH: _ZTSPM1Ci = internal constant
// CHECK-BOTH: _ZTIM1CS_ = internal constant
-// CHECK-BOTH: _ZTSM1CPS_ = internal constant
+// CHECK-BOTH: _ZTSM1CS_ = internal constant
// CHECK-BOTH: _ZTIM1CPS_ = internal constant
+// CHECK-BOTH: _ZTSM1CPS_ = internal constant
+// CHECK-BOTH: _ZTIM1A1C = internal constant
// CHECK-BOTH: _ZTSM1A1C = internal constant
-// CHECK: _ZTS1A = linkonce_odr constant
-// CHECK-WITH-HIDDEN: _ZTS1A = linkonce_odr hidden constant
// CHECK: _ZTI1A = linkonce_odr constant
// CHECK-WITH-HIDDEN: _ZTI1A = linkonce_odr hidden constant
-// CHECK-BOTH: _ZTIM1A1C = internal constant
-// CHECK-BOTH: _ZTSM1AP1C = internal constant
+// CHECK: _ZTS1A = linkonce_odr constant
+// CHECK-WITH-HIDDEN: _ZTS1A = linkonce_odr hidden constant
// CHECK-BOTH: _ZTIM1AP1C = internal constant
+// CHECK-BOTH: _ZTSM1AP1C = internal constant
// CHECK-WITH-HIDDEN: _ZTSFN12_GLOBAL__N_11DEvE = internal constant
-// CHECK-WITH-HIDDEN: @_ZTSPK2T4 = linkonce_odr hidden constant
-// CHECK-WITH-HIDDEN: @_ZTS2T4 = linkonce_odr hidden constant
-// CHECK-WITH-HIDDEN: @_ZTI2T4 = linkonce_odr hidden constant
-// CHECK-WITH-HIDDEN: @_ZTIPK2T4 = linkonce_odr hidden constant
-// CHECK-WITH-HIDDEN: @_ZTSZ2t5vE1A = internal constant
+// CHECK-WITH-HIDDEN: @_ZTIPK2T4 = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTSPK2T4 = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTI2T4 = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTS2T4 = linkonce_odr hidden constant
// CHECK-WITH-HIDDEN: @_ZTIZ2t5vE1A = internal constant
-// CHECK-WITH-HIDDEN: @_ZTSZ2t6vE1A = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTSZ2t5vE1A = internal constant
// CHECK-WITH-HIDDEN: @_ZTIZ2t6vE1A = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTSZ2t6vE1A = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTIPZ2t7vE1A = linkonce_odr hidden constant
// CHECK-WITH-HIDDEN: @_ZTSPZ2t7vE1A = linkonce_odr hidden constant
-// CHECK-WITH-HIDDEN: @_ZTSZ2t7vE1A = linkonce_odr hidden constant
// CHECK-WITH-HIDDEN: @_ZTIZ2t7vE1A = linkonce_odr hidden constant
-// CHECK-WITH-HIDDEN: @_ZTIPZ2t7vE1A = linkonce_odr hidden constant
+// CHECK-WITH-HIDDEN: @_ZTSZ2t7vE1A = linkonce_odr hidden constant
-// CHECK: _ZTSN12_GLOBAL__N_11DE = internal constant
// CHECK: _ZTIN12_GLOBAL__N_11DE = internal constant
-// CHECK: _ZTSPN12_GLOBAL__N_11DE = internal constant
+// CHECK: _ZTSN12_GLOBAL__N_11DE = internal constant
// CHECK: _ZTIPN12_GLOBAL__N_11DE = internal constant
-// CHECK: _ZTSFN12_GLOBAL__N_11DEvE = internal constant
+// CHECK: _ZTSPN12_GLOBAL__N_11DE = internal constant
// CHECK: _ZTIFN12_GLOBAL__N_11DEvE = internal constant
-// CHECK: _ZTSFvN12_GLOBAL__N_11DEE = internal constant
+// CHECK: _ZTSFN12_GLOBAL__N_11DEvE = internal constant
// CHECK: _ZTIFvN12_GLOBAL__N_11DEE = internal constant
+// CHECK: _ZTSFvN12_GLOBAL__N_11DEE = internal constant
+// CHECK: _ZTIPFvvE = linkonce_odr constant
// CHECK: _ZTSPFvvE = linkonce_odr constant
-// CHECK: _ZTSFvvE = linkonce_odr constant
// CHECK: _ZTIFvvE = linkonce_odr constant
-// CHECK: _ZTIPFvvE = linkonce_odr constant
-// CHECK: _ZTSN12_GLOBAL__N_11EE = internal constant
+// CHECK: _ZTSFvvE = linkonce_odr constant
// CHECK: _ZTIN12_GLOBAL__N_11EE = internal constant
-// CHECK: _ZTSA10_i = linkonce_odr constant
+// CHECK: _ZTSN12_GLOBAL__N_11EE = internal constant
// CHECK: _ZTIA10_i = linkonce_odr constant
+// CHECK: _ZTSA10_i = linkonce_odr constant
// CHECK: _ZTI1TILj0EE = linkonce_odr constant
// CHECK: _ZTI1TILj1EE = weak_odr constant
// CHECK: _ZTI1TILj2EE = external constant
-// CHECK: _ZTSZ2t5vE1A = internal constant
// CHECK: _ZTIZ2t5vE1A = internal constant
-// CHECK: _ZTS1B ={{.*}} constant
+// CHECK: _ZTSZ2t5vE1A = internal constant
// CHECK: _ZTI1B ={{.*}} constant
+// CHECK: _ZTS1B ={{.*}} constant
// CHECK: _ZTS1F = linkonce_odr constant
-// CHECK: _ZTSZ2t6vE1A = linkonce_odr constant
// CHECK: _ZTIZ2t6vE1A = linkonce_odr constant
+// CHECK: _ZTSZ2t6vE1A = linkonce_odr constant
+// CHECK: _ZTIPZ2t7vE1A = linkonce_odr constant
// CHECK: _ZTSPZ2t7vE1A = linkonce_odr constant
-// CHECK: _ZTSZ2t7vE1A = linkonce_odr constant
// CHECK: _ZTIZ2t7vE1A = linkonce_odr constant
-// CHECK: _ZTIPZ2t7vE1A = linkonce_odr constant
+// CHECK: _ZTSZ2t7vE1A = linkonce_odr constant
// CHECK: _ZTIN12_GLOBAL__N_11DE
diff --git a/clang/test/CodeGenCXX/rtti-visibility.cpp b/clang/test/CodeGenCXX/rtti-visibility.cpp
index 5945be5..1813fee 100644
--- a/clang/test/CodeGenCXX/rtti-visibility.cpp
+++ b/clang/test/CodeGenCXX/rtti-visibility.cpp
@@ -6,10 +6,10 @@
namespace Test1 {
// A is explicitly marked hidden, so all RTTI data should also be marked hidden.
- // CHECK-TEST1: @_ZTSN5Test11AE = linkonce_odr hidden constant
// CHECK-TEST1: @_ZTIN5Test11AE = linkonce_odr hidden constant
- // CHECK-TEST1: @_ZTSPN5Test11AE = linkonce_odr hidden constant
+ // CHECK-TEST1: @_ZTSN5Test11AE = linkonce_odr hidden constant
// CHECK-TEST1: @_ZTIPN5Test11AE = linkonce_odr hidden constant
+ // CHECK-TEST1: @_ZTSPN5Test11AE = linkonce_odr hidden constant
struct __attribute__((visibility("hidden"))) A { };
void f() {
@@ -20,8 +20,8 @@ namespace Test1 {
namespace Test2 {
// A is weak, so its linkage should be linkoce_odr, but not marked hidden.
- // CHECK-TEST2: @_ZTSN5Test21AE = linkonce_odr constant
// CHECK-TEST2: @_ZTIN5Test21AE = linkonce_odr constant
+ // CHECK-TEST2: @_ZTSN5Test21AE = linkonce_odr constant
struct A { };
void f() {
(void)typeid(A);
diff --git a/clang/test/CodeGenCXX/symbol-partition.cpp b/clang/test/CodeGenCXX/symbol-partition.cpp
index ecc58e2..cefeeac 100644
--- a/clang/test/CodeGenCXX/symbol-partition.cpp
+++ b/clang/test/CodeGenCXX/symbol-partition.cpp
@@ -2,8 +2,8 @@
// CHECK: @gv = {{.*}}, partition "foo"
// CHECK: @_ZTV1S = {{.*}}, partition "foo"
-// CHECK: @_ZTS1S = {{.*}}, partition "foo"
// CHECK: @_ZTI1S = {{.*}}, partition "foo"
+// CHECK: @_ZTS1S = {{.*}}, partition "foo"
// CHECK: @_Z5ifuncv = {{.*}}, partition "foo"
diff --git a/clang/test/CodeGenCXX/type_visibility.cpp b/clang/test/CodeGenCXX/type_visibility.cpp
index 13aafcf..00833e3 100644
--- a/clang/test/CodeGenCXX/type_visibility.cpp
+++ b/clang/test/CodeGenCXX/type_visibility.cpp
@@ -26,12 +26,12 @@ namespace temp0 {
template struct B<A>;
// FUNS-LABEL: define weak_odr void @_ZN5temp01BINS_1AEE3fooEv(
// VARS: @_ZTVN5temp01BINS_1AEEE = weak_odr unnamed_addr constant
- // VARS: @_ZTSN5temp01BINS_1AEEE = weak_odr constant
// VARS: @_ZTIN5temp01BINS_1AEEE = weak_odr constant
+ // VARS: @_ZTSN5temp01BINS_1AEEE = weak_odr constant
// FUNS-HIDDEN-LABEL: define weak_odr hidden void @_ZN5temp01BINS_1AEE3fooEv(
// VARS-HIDDEN: @_ZTVN5temp01BINS_1AEEE = weak_odr hidden unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5temp01BINS_1AEEE = weak_odr hidden constant
// VARS-HIDDEN: @_ZTIN5temp01BINS_1AEEE = weak_odr hidden constant
+ // VARS-HIDDEN: @_ZTSN5temp01BINS_1AEEE = weak_odr hidden constant
}
namespace temp1 {
@@ -43,12 +43,12 @@ namespace temp1 {
template struct B<A>;
// FUNS-LABEL: define weak_odr void @_ZN5temp11BINS_1AEE3fooEv(
// VARS: @_ZTVN5temp11BINS_1AEEE = weak_odr unnamed_addr constant
- // VARS: @_ZTSN5temp11BINS_1AEEE = weak_odr constant
// VARS: @_ZTIN5temp11BINS_1AEEE = weak_odr constant
+ // VARS: @_ZTSN5temp11BINS_1AEEE = weak_odr constant
// FUNS-HIDDEN-LABEL: define weak_odr hidden void @_ZN5temp11BINS_1AEE3fooEv(
// VARS-HIDDEN: @_ZTVN5temp11BINS_1AEEE = weak_odr unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5temp11BINS_1AEEE = weak_odr constant
// VARS-HIDDEN: @_ZTIN5temp11BINS_1AEEE = weak_odr constant
+ // VARS-HIDDEN: @_ZTSN5temp11BINS_1AEEE = weak_odr constant
}
namespace temp2 {
@@ -60,12 +60,12 @@ namespace temp2 {
template struct B<A>;
// FUNS-LABEL: define weak_odr void @_ZN5temp21BINS_1AEE3fooEv(
// VARS: @_ZTVN5temp21BINS_1AEEE = weak_odr unnamed_addr constant
- // VARS: @_ZTSN5temp21BINS_1AEEE = weak_odr constant
// VARS: @_ZTIN5temp21BINS_1AEEE = weak_odr constant
+ // VARS: @_ZTSN5temp21BINS_1AEEE = weak_odr constant
// FUNS-HIDDEN-LABEL: define weak_odr hidden void @_ZN5temp21BINS_1AEE3fooEv(
// VARS-HIDDEN: @_ZTVN5temp21BINS_1AEEE = weak_odr hidden unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5temp21BINS_1AEEE = weak_odr hidden constant
// VARS-HIDDEN: @_ZTIN5temp21BINS_1AEEE = weak_odr hidden constant
+ // VARS-HIDDEN: @_ZTSN5temp21BINS_1AEEE = weak_odr hidden constant
}
namespace temp3 {
@@ -77,12 +77,12 @@ namespace temp3 {
template struct B<A>;
// FUNS-LABEL: define weak_odr hidden void @_ZN5temp31BINS_1AEE3fooEv(
// VARS: @_ZTVN5temp31BINS_1AEEE = weak_odr hidden unnamed_addr constant
- // VARS: @_ZTSN5temp31BINS_1AEEE = weak_odr hidden constant
// VARS: @_ZTIN5temp31BINS_1AEEE = weak_odr hidden constant
+ // VARS: @_ZTSN5temp31BINS_1AEEE = weak_odr hidden constant
// FUNS-HIDDEN-LABEL: define weak_odr hidden void @_ZN5temp31BINS_1AEE3fooEv(
// VARS-HIDDEN: @_ZTVN5temp31BINS_1AEEE = weak_odr hidden unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5temp31BINS_1AEEE = weak_odr hidden constant
// VARS-HIDDEN: @_ZTIN5temp31BINS_1AEEE = weak_odr hidden constant
+ // VARS-HIDDEN: @_ZTSN5temp31BINS_1AEEE = weak_odr hidden constant
}
namespace temp4 {
@@ -94,12 +94,12 @@ namespace temp4 {
template struct B<A>;
// FUNS-LABEL: define weak_odr void @_ZN5temp41BINS_1AEE3fooEv(
// VARS: @_ZTVN5temp41BINS_1AEEE = weak_odr hidden unnamed_addr constant
- // VARS: @_ZTSN5temp41BINS_1AEEE = weak_odr hidden constant
// VARS: @_ZTIN5temp41BINS_1AEEE = weak_odr hidden constant
+ // VARS: @_ZTSN5temp41BINS_1AEEE = weak_odr hidden constant
// FUNS-HIDDEN-LABEL: define weak_odr hidden void @_ZN5temp41BINS_1AEE3fooEv(
// VARS-HIDDEN: @_ZTVN5temp41BINS_1AEEE = weak_odr hidden unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5temp41BINS_1AEEE = weak_odr hidden constant
// VARS-HIDDEN: @_ZTIN5temp41BINS_1AEEE = weak_odr hidden constant
+ // VARS-HIDDEN: @_ZTSN5temp41BINS_1AEEE = weak_odr hidden constant
}
namespace type0 {
@@ -110,12 +110,12 @@ namespace type0 {
void A::foo() {}
// FUNS-LABEL: define void @_ZN5type01A3fooEv(
// VARS: @_ZTVN5type01AE = unnamed_addr constant
- // VARS: @_ZTSN5type01AE = constant
// VARS: @_ZTIN5type01AE = constant
+ // VARS: @_ZTSN5type01AE = constant
// FUNS-HIDDEN-LABEL: define hidden void @_ZN5type01A3fooEv(
// VARS-HIDDEN: @_ZTVN5type01AE = unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5type01AE = constant
// VARS-HIDDEN: @_ZTIN5type01AE = constant
+ // VARS-HIDDEN: @_ZTSN5type01AE = constant
}
namespace type1 {
@@ -126,12 +126,12 @@ namespace type1 {
void A::foo() {}
// FUNS-LABEL: define hidden void @_ZN5type11A3fooEv(
// VARS: @_ZTVN5type11AE = unnamed_addr constant
- // VARS: @_ZTSN5type11AE = constant
// VARS: @_ZTIN5type11AE = constant
+ // VARS: @_ZTSN5type11AE = constant
// FUNS-HIDDEN-LABEL: define hidden void @_ZN5type11A3fooEv(
// VARS-HIDDEN: @_ZTVN5type11AE = unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5type11AE = constant
// VARS-HIDDEN: @_ZTIN5type11AE = constant
+ // VARS-HIDDEN: @_ZTSN5type11AE = constant
}
namespace type2 {
@@ -142,12 +142,12 @@ namespace type2 {
void A::foo() {}
// FUNS-LABEL: define void @_ZN5type21A3fooEv(
// VARS: @_ZTVN5type21AE = hidden unnamed_addr constant
- // VARS: @_ZTSN5type21AE = hidden constant
// VARS: @_ZTIN5type21AE = hidden constant
+ // VARS: @_ZTSN5type21AE = hidden constant
// FUNS-HIDDEN-LABEL: define hidden void @_ZN5type21A3fooEv(
// VARS-HIDDEN: @_ZTVN5type21AE = hidden unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5type21AE = hidden constant
// VARS-HIDDEN: @_ZTIN5type21AE = hidden constant
+ // VARS-HIDDEN: @_ZTSN5type21AE = hidden constant
}
namespace type3 {
@@ -158,11 +158,11 @@ namespace type3 {
void A::foo() {}
// FUNS-LABEL: define void @_ZN5type31A3fooEv(
// VARS: @_ZTVN5type31AE = hidden unnamed_addr constant
- // VARS: @_ZTSN5type31AE = hidden constant
// VARS: @_ZTIN5type31AE = hidden constant
+ // VARS: @_ZTSN5type31AE = hidden constant
// FUNS-HIDDEN-LABEL: define void @_ZN5type31A3fooEv(
// VARS-HIDDEN: @_ZTVN5type31AE = hidden unnamed_addr constant
- // VARS-HIDDEN: @_ZTSN5type31AE = hidden constant
// VARS-HIDDEN: @_ZTIN5type31AE = hidden constant
+ // VARS-HIDDEN: @_ZTSN5type31AE = hidden constant
}
diff --git a/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp b/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp
index 60eb8f1..68eb5cb 100644
--- a/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp
+++ b/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp
@@ -15,12 +15,12 @@ class B : A {
// NO-AS: @_ZTISt9type_info = external constant ptr
// AS: @_ZTIi = external addrspace(1) constant ptr addrspace(1)
// NO-AS: @_ZTIi = external constant ptr
+// AS: @_ZTI1A = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1A }, comdat, align 8
+// NO-AS: @_ZTI1A = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS1A }, comdat, align 8
// AS: @_ZTVN10__cxxabiv117__class_type_infoE = external addrspace(1) global [0 x ptr addrspace(1)]
// NO-AS: @_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
// AS: @_ZTS1A = linkonce_odr addrspace(1) constant [3 x i8] c"1A\00", comdat, align 1
// NO-AS: @_ZTS1A = linkonce_odr constant [3 x i8] c"1A\00", comdat, align 1
-// AS: @_ZTI1A = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1A }, comdat, align 8
-// NO-AS: @_ZTI1A = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS1A }, comdat, align 8
// AS: @_ZTIf = external addrspace(1) constant ptr addrspace(1)
// NO-AS: @_ZTIf = external constant ptr
diff --git a/clang/test/CodeGenCXX/visibility-ms-compat.cpp b/clang/test/CodeGenCXX/visibility-ms-compat.cpp
index 5256913..0344803 100644
--- a/clang/test/CodeGenCXX/visibility-ms-compat.cpp
+++ b/clang/test/CodeGenCXX/visibility-ms-compat.cpp
@@ -24,8 +24,8 @@ namespace test0 {
// CHECK: declare void @_ZN5test01A3barEv()
const std::type_info &ti = typeid(A);
- // CHECK-GLOBAL: @_ZTSN5test01AE = linkonce_odr constant
// CHECK-GLOBAL: @_ZTIN5test01AE = linkonce_odr constant
+ // CHECK-GLOBAL: @_ZTSN5test01AE = linkonce_odr constant
// CHECK-GLOBAL: @_ZN5test02tiE = hidden constant
}
@@ -40,8 +40,8 @@ namespace test1 {
// CHECK: declare hidden void @_ZN5test11A3barEv()
const std::type_info &ti = typeid(A);
- // CHECK-GLOBAL: @_ZTSN5test11AE = linkonce_odr hidden constant
// CHECK-GLOBAL: @_ZTIN5test11AE = linkonce_odr hidden constant
+ // CHECK-GLOBAL: @_ZTSN5test11AE = linkonce_odr hidden constant
// CHECK-GLOBAL: @_ZN5test12tiE = hidden constant
}
@@ -56,8 +56,8 @@ namespace test2 {
// CHECK: declare void @_ZN5test21A3barEv()
const std::type_info &ti = typeid(A);
- // CHECK-GLOBAL: @_ZTSN5test21AE = linkonce_odr constant
// CHECK-GLOBAL: @_ZTIN5test21AE = linkonce_odr constant
+ // CHECK-GLOBAL: @_ZTSN5test21AE = linkonce_odr constant
// CHECK-GLOBAL: @_ZN5test22tiE = hidden constant
}
@@ -73,8 +73,8 @@ namespace test3 {
// CHECK: declare void @_ZN5test31BINS_1AEE3barEv()
const std::type_info &ti = typeid(B<A>);
- // CHECK-GLOBAL: @_ZTSN5test31BINS_1AEEE = linkonce_odr constant
// CHECK-GLOBAL: @_ZTIN5test31BINS_1AEEE = linkonce_odr constant
+ // CHECK-GLOBAL: @_ZTSN5test31BINS_1AEEE = linkonce_odr constant
}
namespace test4 {
@@ -89,8 +89,8 @@ namespace test4 {
// CHECK: declare void @_ZN5test41BINS_1AEE3barEv()
const std::type_info &ti = typeid(B<A>);
- // CHECK-GLOBAL: @_ZTSN5test41BINS_1AEEE = linkonce_odr constant
// CHECK-GLOBAL: @_ZTIN5test41BINS_1AEEE = linkonce_odr constant
+ // CHECK-GLOBAL: @_ZTSN5test41BINS_1AEEE = linkonce_odr constant
}
namespace test5 {
@@ -105,6 +105,6 @@ namespace test5 {
// CHECK: declare hidden void @_ZN5test51BINS_1AEE3barEv()
const std::type_info &ti = typeid(B<A>);
- // CHECK-GLOBAL: @_ZTSN5test51BINS_1AEEE = linkonce_odr hidden constant
// CHECK-GLOBAL: @_ZTIN5test51BINS_1AEEE = linkonce_odr hidden constant
+ // CHECK-GLOBAL: @_ZTSN5test51BINS_1AEEE = linkonce_odr hidden constant
}
diff --git a/clang/test/CodeGenCXX/vtable-align-address-space.cpp b/clang/test/CodeGenCXX/vtable-align-address-space.cpp
index 5eac0bd..5eccf0a 100644
--- a/clang/test/CodeGenCXX/vtable-align-address-space.cpp
+++ b/clang/test/CodeGenCXX/vtable-align-address-space.cpp
@@ -9,5 +9,5 @@ struct A {
void A::f() {}
// CHECK: @_ZTV1A ={{.*}} unnamed_addr addrspace(1) constant { [5 x ptr addrspace(1)] } { [5 x ptr addrspace(1)] [ptr addrspace(1) null, ptr addrspace(1) @_ZTI1A, ptr addrspace(1) addrspacecast (ptr @_ZN1A1fEv to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr @_ZN1A1gEv to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr @_ZN1A1hEv to ptr addrspace(1))]
-// CHECK: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
// CHECK: @_ZTI1A ={{.*}} addrspace(1) constant { ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1A }, align 8
+// CHECK: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
diff --git a/clang/test/CodeGenCXX/vtable-align.cpp b/clang/test/CodeGenCXX/vtable-align.cpp
index fb8ff1a..f1d5e09 100644
--- a/clang/test/CodeGenCXX/vtable-align.cpp
+++ b/clang/test/CodeGenCXX/vtable-align.cpp
@@ -10,8 +10,8 @@ struct A {
void A::f() {}
// CHECK-32: @_ZTV1A ={{.*}} unnamed_addr constant { [5 x ptr] } { [5 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A1fEv, ptr @_ZN1A1gEv, ptr @_ZN1A1hEv] }, align 4
-// CHECK-32: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
// CHECK-32: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS1A }, align 4
+// CHECK-32: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
// CHECK-64: @_ZTV1A ={{.*}} unnamed_addr constant { [5 x ptr] } { [5 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A1fEv, ptr @_ZN1A1gEv, ptr @_ZN1A1hEv] }, align 8
-// CHECK-64: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
// CHECK-64: @_ZTI1A ={{.*}} constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS1A }, align 8
+// CHECK-64: @_ZTS1A ={{.*}} constant [3 x i8] c"1A\00", align 1
diff --git a/clang/test/CodeGenCXX/vtable-available-externally.cpp b/clang/test/CodeGenCXX/vtable-available-externally.cpp
index ab10526..4415e24 100644
--- a/clang/test/CodeGenCXX/vtable-available-externally.cpp
+++ b/clang/test/CodeGenCXX/vtable-available-externally.cpp
@@ -49,8 +49,8 @@ void g() {
// This tests mainly that the typeinfo and typename constants have their linkage
// updated correctly.
-// CHECK-TEST2: @_ZTSN5Test21AE ={{.*}} constant
// CHECK-TEST2: @_ZTIN5Test21AE ={{.*}} constant
+// CHECK-TEST2: @_ZTSN5Test21AE ={{.*}} constant
// CHECK-TEST2: @_ZTVN5Test21AE ={{.*}} unnamed_addr constant
namespace Test2 {
struct A {
diff --git a/clang/test/CodeGenCXX/vtable-key-function-arm.cpp b/clang/test/CodeGenCXX/vtable-key-function-arm.cpp
index a054fd8..83889bf 100644
--- a/clang/test/CodeGenCXX/vtable-key-function-arm.cpp
+++ b/clang/test/CodeGenCXX/vtable-key-function-arm.cpp
@@ -90,8 +90,8 @@ struct Test2a {
// V-table should be defined with strong linkage.
Test2a::Test2a() { use(typeid(Test2a)); }
// CHECK: @_ZTV6Test2a ={{.*}} unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test2a ={{.*}} constant
// CHECK-LATE: @_ZTI6Test2a ={{.*}} constant
+// CHECK-LATE: @_ZTS6Test2a ={{.*}} constant
// 'bar' becomes the key function when 'foo' is defined inline.
void Test2a::bar() {}
@@ -111,8 +111,8 @@ void Test2b::bar() {}
// V-table should be defined with strong linkage.
Test2b::Test2b() { use(typeid(Test2b)); }
// CHECK: @_ZTV6Test2b ={{.*}} unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test2b ={{.*}} constant
// CHECK-LATE: @_ZTI6Test2b ={{.*}} constant
+// CHECK-LATE: @_ZTS6Test2b ={{.*}} constant
inline void Test2b::foo() {}
@@ -131,8 +131,8 @@ inline void Test2c::foo() {}
// V-table should be defined with strong linkage.
Test2c::Test2c() { use(typeid(Test2c)); }
// CHECK: @_ZTV6Test2c ={{.*}} unnamed_addr constant
-// CHECK: @_ZTS6Test2c ={{.*}} constant
// CHECK: @_ZTI6Test2c ={{.*}} constant
+// CHECK: @_ZTS6Test2c ={{.*}} constant
/*** Test3a ******************************************************************/
@@ -145,8 +145,8 @@ struct Test3a {
// V-table should be defined with weak linkage.
Test3a::Test3a() { use(typeid(Test3a)); }
// CHECK: @_ZTV6Test3a = linkonce_odr unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test3a = linkonce_odr constant
// CHECK-LATE: @_ZTI6Test3a = linkonce_odr constant
+// CHECK-LATE: @_ZTS6Test3a = linkonce_odr constant
// There ceases to be a key function after these declarations.
inline void Test3a::bar() {}
@@ -166,8 +166,8 @@ inline void Test3b::bar() {}
// V-table should be defined with weak linkage.
Test3b::Test3b() { use(typeid(Test3b)); }
// CHECK: @_ZTV6Test3b = linkonce_odr unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test3b = linkonce_odr constant
// CHECK-LATE: @_ZTI6Test3b = linkonce_odr constant
+// CHECK-LATE: @_ZTS6Test3b = linkonce_odr constant
inline void Test3b::foo() {}
@@ -186,8 +186,8 @@ inline void Test3c::foo() {}
// V-table should be defined with weak linkage.
Test3c::Test3c() { use(typeid(Test3c)); }
// CHECK: @_ZTV6Test3c = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test3c = linkonce_odr constant
// CHECK: @_ZTI6Test3c = linkonce_odr constant
+// CHECK: @_ZTS6Test3c = linkonce_odr constant
/*** Test4a ******************************************************************/
@@ -200,8 +200,8 @@ template <class T> struct Test4a {
// V-table should be defined with weak linkage.
template <> Test4a<int>::Test4a() { use(typeid(Test4a)); }
// CHECK: @_ZTV6Test4aIiE = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test4aIiE = linkonce_odr constant
// CHECK: @_ZTI6Test4aIiE = linkonce_odr constant
+// CHECK: @_ZTS6Test4aIiE = linkonce_odr constant
// There ceases to be a key function after these declarations.
template <> inline void Test4a<int>::bar() {}
@@ -221,8 +221,8 @@ template <> inline void Test4b<int>::bar() {}
// V-table should be defined with weak linkage.
template <> Test4b<int>::Test4b() { use(typeid(Test4b)); }
// CHECK: @_ZTV6Test4bIiE = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test4bIiE = linkonce_odr constant
// CHECK: @_ZTI6Test4bIiE = linkonce_odr constant
+// CHECK: @_ZTS6Test4bIiE = linkonce_odr constant
template <> inline void Test4b<int>::foo() {}
@@ -241,8 +241,8 @@ template <> inline void Test4c<int>::foo() {}
// V-table should be defined with weak linkage.
template <> Test4c<int>::Test4c() { use(typeid(Test4c)); }
// CHECK: @_ZTV6Test4cIiE = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test4cIiE = linkonce_odr constant
// CHECK: @_ZTI6Test4cIiE = linkonce_odr constant
+// CHECK: @_ZTS6Test4cIiE = linkonce_odr constant
/*** Test5a ******************************************************************/
@@ -258,8 +258,8 @@ template <> inline void Test5a<int>::foo();
// V-table should be defined with weak linkage.
template <> Test5a<int>::Test5a() { use(typeid(Test5a)); }
// CHECK: @_ZTV6Test5aIiE = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test5aIiE = linkonce_odr constant
// CHECK: @_ZTI6Test5aIiE = linkonce_odr constant
+// CHECK: @_ZTS6Test5aIiE = linkonce_odr constant
// There ceases to be a key function after these declarations.
template <> inline void Test5a<int>::bar() {}
@@ -280,8 +280,8 @@ template <> inline void Test5b<int>::bar() {}
// V-table should be defined with weak linkage.
template <> Test5b<int>::Test5b() { use(typeid(Test5b)); }
// CHECK: @_ZTV6Test5bIiE = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test5bIiE = linkonce_odr constant
// CHECK: @_ZTI6Test5bIiE = linkonce_odr constant
+// CHECK: @_ZTS6Test5bIiE = linkonce_odr constant
template <> inline void Test5a<int>::foo();
template <> inline void Test5b<int>::foo() {}
@@ -303,5 +303,5 @@ template <> inline void Test5c<int>::foo() {}
// V-table should be defined with weak linkage.
template <> Test5c<int>::Test5c() { use(typeid(Test5c)); }
// CHECK: @_ZTV6Test5cIiE = linkonce_odr unnamed_addr constant
-// CHECK: @_ZTS6Test5cIiE = linkonce_odr constant
// CHECK: @_ZTI6Test5cIiE = linkonce_odr constant
+// CHECK: @_ZTS6Test5cIiE = linkonce_odr constant
diff --git a/clang/test/CodeGenCXX/vtable-key-function-ios.cpp b/clang/test/CodeGenCXX/vtable-key-function-ios.cpp
index ff2793a..43abfb6 100644
--- a/clang/test/CodeGenCXX/vtable-key-function-ios.cpp
+++ b/clang/test/CodeGenCXX/vtable-key-function-ios.cpp
@@ -63,8 +63,8 @@ struct Test1a {
// V-table needs to be defined weakly.
Test1a::Test1a() { use(typeid(Test1a)); }
// CHECK: @_ZTV6Test1a = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test1a = linkonce_odr {{(dso_local )?}}constant
// CHECK-LATE: @_ZTI6Test1a = linkonce_odr {{(dso_local )?}}constant
+// CHECK-LATE: @_ZTS6Test1a = linkonce_odr {{(dso_local )?}}constant
// This defines the key function.
inline void Test1a::foo() {}
@@ -83,8 +83,8 @@ inline void Test1b::foo() {}
// V-table should be defined weakly..
Test1b::Test1b() { use(typeid(Test1b)); }
// CHECK: @_ZTV6Test1b = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK: @_ZTS6Test1b = linkonce_odr {{(dso_local )?}}constant
// CHECK: @_ZTI6Test1b = linkonce_odr {{(dso_local )?}}constant
+// CHECK: @_ZTS6Test1b = linkonce_odr {{(dso_local )?}}constant
/*** Test2a ******************************************************************/
@@ -97,8 +97,8 @@ struct Test2a {
// V-table should be defined with weak linkage.
Test2a::Test2a() { use(typeid(Test2a)); }
// CHECK: @_ZTV6Test2a = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test2a = linkonce_odr {{(dso_local )?}}constant
// CHECK-LATE: @_ZTI6Test2a = linkonce_odr {{(dso_local )?}}constant
+// CHECK-LATE: @_ZTS6Test2a = linkonce_odr {{(dso_local )?}}constant
void Test2a::bar() {}
inline void Test2a::foo() {}
@@ -116,8 +116,8 @@ void Test2b::bar() {}
// V-table should be defined with weak linkage.
Test2b::Test2b() { use(typeid(Test2b)); }
// CHECK: @_ZTV6Test2b = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test2b = linkonce_odr {{(dso_local )?}}constant
// CHECK-LATE: @_ZTI6Test2b = linkonce_odr {{(dso_local )?}}constant
+// CHECK-LATE: @_ZTS6Test2b = linkonce_odr {{(dso_local )?}}constant
inline void Test2b::foo() {}
@@ -135,8 +135,8 @@ inline void Test2c::foo() {}
// V-table should be defined with weak linkage.
Test2c::Test2c() { use(typeid(Test2c)); }
// CHECK: @_ZTV6Test2c = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK: @_ZTS6Test2c = linkonce_odr {{(dso_local )?}}constant
// CHECK: @_ZTI6Test2c = linkonce_odr {{(dso_local )?}}constant
+// CHECK: @_ZTS6Test2c = linkonce_odr {{(dso_local )?}}constant
/*** Test3a ******************************************************************/
@@ -149,8 +149,8 @@ struct Test3a {
// V-table should be defined with weak linkage.
Test3a::Test3a() { use(typeid(Test3a)); }
// CHECK: @_ZTV6Test3a = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test3a = linkonce_odr {{(dso_local )?}}constant
// CHECK-LATE: @_ZTI6Test3a = linkonce_odr {{(dso_local )?}}constant
+// CHECK-LATE: @_ZTS6Test3a = linkonce_odr {{(dso_local )?}}constant
// This defines the key function.
inline void Test3a::bar() {}
@@ -169,8 +169,8 @@ inline void Test3b::bar() {}
// V-table should be defined with weak linkage.
Test3b::Test3b() { use(typeid(Test3b)); }
// CHECK: @_ZTV6Test3b = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK-LATE: @_ZTS6Test3b = linkonce_odr {{(dso_local )?}}constant
// CHECK-LATE: @_ZTI6Test3b = linkonce_odr {{(dso_local )?}}constant
+// CHECK-LATE: @_ZTS6Test3b = linkonce_odr {{(dso_local )?}}constant
// This defines the key function.
inline void Test3b::foo() {}
@@ -190,5 +190,5 @@ inline void Test3c::foo() {}
// V-table should be defined with weak linkage.
Test3c::Test3c() { use(typeid(Test3c)); }
// CHECK: @_ZTV6Test3c = linkonce_odr {{(dso_local )?}}unnamed_addr constant
-// CHECK: @_ZTS6Test3c = linkonce_odr {{(dso_local )?}}constant
// CHECK: @_ZTI6Test3c = linkonce_odr {{(dso_local )?}}constant
+// CHECK: @_ZTS6Test3c = linkonce_odr {{(dso_local )?}}constant
diff --git a/clang/test/CodeGenCXX/vtable-key-function-win-comdat.cpp b/clang/test/CodeGenCXX/vtable-key-function-win-comdat.cpp
index dd4fd9f..b3de2f6 100644
--- a/clang/test/CodeGenCXX/vtable-key-function-win-comdat.cpp
+++ b/clang/test/CodeGenCXX/vtable-key-function-win-comdat.cpp
@@ -15,11 +15,11 @@ Test1a::Test1a() { use(typeid(Test1a)); }
inline void Test1a::foo() {}
// CHECK: $_ZTV6Test1a = comdat any
-// CHECK: $_ZTS6Test1a = comdat any
// CHECK: $_ZTI6Test1a = comdat any
-// CHECK-NOT: $_ZTS6Test1a.1 = comdat any
+// CHECK: $_ZTS6Test1a = comdat any
// CHECK-NOT: $_ZTI6Test1a.1 = comdat any
+// CHECK-NOT: $_ZTS6Test1a.1 = comdat any
// CHECK: @_ZTV6Test1a = linkonce_odr dso_local unnamed_addr constant {{.*}} ptr @_ZTI6Test1a
-// CHECK: @_ZTS6Test1a = linkonce_odr dso_local constant
// CHECK: @_ZTI6Test1a = linkonce_odr dso_local constant {{.*}} ptr @_ZTS6Test1a
+// CHECK: @_ZTS6Test1a = linkonce_odr dso_local constant
diff --git a/clang/test/CodeGenCXX/weak-extern-typeinfo.cpp b/clang/test/CodeGenCXX/weak-extern-typeinfo.cpp
index 932d36f..8c948d1 100644
--- a/clang/test/CodeGenCXX/weak-extern-typeinfo.cpp
+++ b/clang/test/CodeGenCXX/weak-extern-typeinfo.cpp
@@ -30,17 +30,17 @@ class V2 : public virtual V1 {
void V1::foo() { }
void V2::foo() { }
-// CHECK: @_ZTS1A = weak_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI1A = weak_odr {{(dso_local |hidden )?}}constant
-// CHECK: @_ZTS1B = weak_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTS1A = weak_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI1B = weak_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTS1B = weak_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTI1C = weak_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTS1C = weak_odr {{(dso_local |hidden )?}}constant
-// CHECK: @_ZTS2T1 = linkonce_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI2T1 = linkonce_odr {{(dso_local |hidden )?}}constant
-// CHECK: @_ZTS1T = linkonce_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTS2T1 = linkonce_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI1T = linkonce_odr {{(dso_local |hidden )?}}constant
-// CHECK: @_ZTI1C = weak_odr {{(dso_local |hidden )?}}constant
-// CHECK: @_ZTS2V1 = weak_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTS1T = linkonce_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI2V1 = weak_odr {{(dso_local |hidden )?}}constant
-// CHECK: @_ZTS2V2 = weak_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTS2V1 = weak_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI2V2 = weak_odr {{(dso_local |hidden )?}}constant
+// CHECK: @_ZTS2V2 = weak_odr {{(dso_local |hidden )?}}constant
diff --git a/clang/test/CodeGenCXX/windows-itanium-type-info.cpp b/clang/test/CodeGenCXX/windows-itanium-type-info.cpp
index 20bd78d..95b7b3a 100644
--- a/clang/test/CodeGenCXX/windows-itanium-type-info.cpp
+++ b/clang/test/CodeGenCXX/windows-itanium-type-info.cpp
@@ -33,8 +33,8 @@ void f() {
// CHECK-DAG: @_ZTI4base = external dllimport constant
-// CHECK-EH-IMPORT: @_ZTS4base = linkonce_odr dso_local constant
// CHECK-EH-IMPORT: @_ZTI4base = linkonce_odr dso_local constant
+// CHECK-EH-IMPORT: @_ZTS4base = linkonce_odr dso_local constant
struct __declspec(dllimport) gatekeeper {};
struct zuul : gatekeeper {
diff --git a/clang/test/CodeGenObjCXX/rtti.mm b/clang/test/CodeGenObjCXX/rtti.mm
index ee3df34..2fc6f87 100644
--- a/clang/test/CodeGenObjCXX/rtti.mm
+++ b/clang/test/CodeGenObjCXX/rtti.mm
@@ -4,19 +4,20 @@
namespace std { class type_info; }
-// CHECK: @_ZTI1A = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv117__class_type_infoE{{.*}}@_ZTS1A
@interface A
@end
-// CHECK: @_ZTI1B = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv120__si_class_type_infoE{{.*}}@_ZTS1B{{.*}}@_ZTI1A
@interface B : A
@end
// CHECK: @_ZTIP1B = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv119__pointer_type_infoE{{.*}}@_ZTSP1B{{.*}}, i32 0, {{.*}}@_ZTI1B
-// CHECK: @_ZTI11objc_object = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv117__class_type_infoE{{.*}}@_ZTS11objc_object
+// CHECK: @_ZTI1B = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv120__si_class_type_infoE{{.*}}@_ZTS1B{{.*}}@_ZTI1A
+// CHECK: @_ZTI1A = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv117__class_type_infoE{{.*}}@_ZTS1A
+
// CHECK: @_ZTIP11objc_object = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv119__pointer_type_infoE{{.*}}@_ZTSP11objc_object{{.*}}@_ZTI11objc_object
-// CHECK: @_ZTI10objc_class = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv117__class_type_infoE{{.*}}@_ZTS10objc_class
+// CHECK: @_ZTI11objc_object = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv117__class_type_infoE{{.*}}@_ZTS11objc_object
// CHECK: @_ZTIP10objc_class = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv119__pointer_type_infoE{{.*}}@_ZTSP10objc_class{{.*}}@_ZTI10objc_class
+// CHECK: @_ZTI10objc_class = linkonce_odr constant {{.*}}@_ZTVN10__cxxabiv117__class_type_infoE{{.*}}@_ZTS10objc_class
@protocol P;
diff --git a/clang/test/Driver/stack-protector-guard.c b/clang/test/Driver/stack-protector-guard.c
index d8475a7..666c830 100644
--- a/clang/test/Driver/stack-protector-guard.c
+++ b/clang/test/Driver/stack-protector-guard.c
@@ -17,15 +17,15 @@
// RUN: FileCheck -check-prefix=CHECK-SYM %s
// Invalid arch
-// RUN: not %clang -target powerpc64le-linux-gnu -mstack-protector-guard=tls %s 2>&1 | \
+// RUN: not %clang -target mipsel-linux-gnu -mstack-protector-guard=tls %s 2>&1 | \
// RUN: FileCheck -check-prefix=INVALID-ARCH %s
// INVALID-ARCH: unsupported option '-mstack-protector-guard=tls' for target
-// RUN: not %clang -target powerpc64le-linux-gnu -mstack-protector-guard-reg=fs %s 2>&1 | \
+// RUN: not %clang -target mipsel-linux-gnu -mstack-protector-guard-reg=fs %s 2>&1 | \
// RUN: FileCheck -check-prefix=INVALID-ARCH2 %s
// INVALID-ARCH2: unsupported option '-mstack-protector-guard-reg=fs' for target
-// RUN: not %clang -target powerpc64le-linux-gnu -mstack-protector-guard-offset=10 %s 2>&1 | \
+// RUN: not %clang -target mipsel-linux-gnu -mstack-protector-guard-offset=10 %s 2>&1 | \
// RUN: FileCheck -check-prefix=INVALID-ARCH3 %s
// INVALID-ARCH3: unsupported option '-mstack-protector-guard-offset=10' for target
@@ -104,3 +104,54 @@
// RUN: FileCheck -check-prefix=INVALID-REG-RISCV %s
// INVALID-REG-RISCV: error: invalid value 'sp' in 'mstack-protector-guard-reg=', expected one of: tp
+
+// RUN: %clang -### -target powerpc64-unknown-elf -mstack-protector-guard=tls -mstack-protector-guard-offset=24 -mstack-protector-guard-reg=r13 %s 2>&1 | \
+// RUN: FileCheck -v -check-prefix=CHECK-TLS-POWERPC64 %s
+// RUN: %clang -### -target powerpc64-unknown-linux-gnu -mstack-protector-guard=global %s 2>&1 | \
+// RUN: FileCheck -check-prefix=CHECK-GLOBAL %s
+
+// RUN: not %clang -target powerpc64-unknown-linux-gnu -mstack-protector-guard=tls %s 2>&1 | \
+// RUN: FileCheck -check-prefix=MISSING-OFFSET %s
+
+// RUN: not %clang -target powerpc64-unknown-elf -mstack-protector-guard=sysreg %s 2>&1 | \
+// RUN: FileCheck -check-prefix=INVALID-VALUE2 %s
+
+// RUN: not %clang -target powerpc64-unknown-elf -mstack-protector-guard=tls \
+// RUN: -mstack-protector-guard-offset=20 -mstack-protector-guard-reg=r12 %s 2>&1 | \
+// RUN: FileCheck -check-prefix=INVALID-REG-POWERPC64 %s
+
+// CHECK-TLS-POWERPC64: "-cc1" {{.*}}"-mstack-protector-guard=tls" "-mstack-protector-guard-offset=24" "-mstack-protector-guard-reg=r13"
+// INVALID-REG-POWERPC64: error: invalid value 'r12' in 'mstack-protector-guard-reg=', expected one of: r13
+
+// RUN: %clang -### -target powerpc64le-unknown-elf -mstack-protector-guard=tls -mstack-protector-guard-offset=24 -mstack-protector-guard-reg=r13 %s 2>&1 | \
+// RUN: FileCheck -v -check-prefix=CHECK-TLS-POWERPC64 %s
+// RUN: %clang -### -target powerpc64le-unknown-elf -mstack-protector-guard=global %s 2>&1 | \
+// RUN: FileCheck -check-prefix=CHECK-GLOBAL %s
+
+// RUN: not %clang -target powerpc64le-unknown-elf -mstack-protector-guard=tls %s 2>&1 | \
+// RUN: FileCheck -check-prefix=MISSING-OFFSET %s
+
+// RUN: not %clang -target powerpc64le-unknown-elf -mstack-protector-guard=sysreg %s 2>&1 | \
+// RUN: FileCheck -check-prefix=INVALID-VALUE2 %s
+
+// RUN: not %clang -target powerpc64le-unknown-elf -mstack-protector-guard=tls \
+// RUN: -mstack-protector-guard-offset=20 -mstack-protector-guard-reg=r12 %s 2>&1 | \
+// RUN: FileCheck -check-prefix=INVALID-REG-POWERPC64 %s
+
+// RUN: %clang -### -target ppc32-unknown-elf -mstack-protector-guard=tls -mstack-protector-guard-offset=24 -mstack-protector-guard-reg=r2 %s 2>&1 | \
+// RUN: FileCheck -v -check-prefix=CHECK-TLS-POWERPC32 %s
+// RUN: %clang -### -target ppc32-unknown-elf -mstack-protector-guard=global %s 2>&1 | \
+// RUN: FileCheck -check-prefix=CHECK-GLOBAL %s
+
+// RUN: not %clang -target ppc32-unknown-elf -mstack-protector-guard=tls %s 2>&1 | \
+// RUN: FileCheck -check-prefix=MISSING-OFFSET %s
+
+// RUN: not %clang -target ppc32-unknown-elf -mstack-protector-guard=sysreg %s 2>&1 | \
+// RUN: FileCheck -check-prefix=INVALID-VALUE2 %s
+
+// RUN: not %clang -target ppc32-unknown-elf -mstack-protector-guard=tls \
+// RUN: -mstack-protector-guard-offset=20 -mstack-protector-guard-reg=r3 %s 2>&1 | \
+// RUN: FileCheck -check-prefix=INVALID-REG-POWERPC32 %s
+
+// CHECK-TLS-POWERPC32: "-cc1" {{.*}}"-mstack-protector-guard=tls" "-mstack-protector-guard-offset=24" "-mstack-protector-guard-reg=r2"
+// INVALID-REG-POWERPC32: error: invalid value 'r3' in 'mstack-protector-guard-reg=', expected one of: r2
diff --git a/clang/test/Modules/no-external-type-id.cppm b/clang/test/Modules/no-external-type-id.cppm
index a4ca389..577b97f 100644
--- a/clang/test/Modules/no-external-type-id.cppm
+++ b/clang/test/Modules/no-external-type-id.cppm
@@ -23,7 +23,7 @@ export module b;
import a;
export int b();
-// CHECK: <DECL_FUNCTION {{.*}} op8=4064
+// CHECK: <DECL_FUNCTION {{.*}} op8=4088
// CHECK: <TYPE_FUNCTION_PROTO
//--- a.v1.cppm
diff --git a/clang/test/Modules/pr97313.cppm b/clang/test/Modules/pr97313.cppm
index ebbd0ee..32c7112 100644
--- a/clang/test/Modules/pr97313.cppm
+++ b/clang/test/Modules/pr97313.cppm
@@ -107,12 +107,12 @@ auto v6 = new Template<NonTemplate>();
// CHECK: @_ZTVW3Mod11NonTemplate = {{.*}}external
// CHECK: @_ZTVW3Mod8TemplateIcE = {{.*}}external
// CHECK: @_ZTVW3Mod8TemplateIjE = {{.*}}weak_odr
-// CHECK: @_ZTSW3Mod8TemplateIjE = {{.*}}weak_odr
// CHECK: @_ZTIW3Mod8TemplateIjE = {{.*}}weak_odr
+// CHECK: @_ZTSW3Mod8TemplateIjE = {{.*}}weak_odr
// CHECK: @_ZTVW3Mod8TemplateIdE = {{.*}}external
// CHECK: @_ZTVW3Mod8TemplateIiE = {{.*}}linkonce_odr
-// CHECK: @_ZTSW3Mod8TemplateIiE = {{.*}}linkonce_odr
// CHECK: @_ZTIW3Mod8TemplateIiE = {{.*}}linkonce_odr
+// CHECK: @_ZTSW3Mod8TemplateIiE = {{.*}}linkonce_odr
// CHECK: @_ZTVW3Mod8TemplateIS_11NonTemplateE = {{.*}}linkonce_odr
-// CHECK: @_ZTSW3Mod8TemplateIS_11NonTemplateE = {{.*}}linkonce_odr
// CHECK: @_ZTIW3Mod8TemplateIS_11NonTemplateE = {{.*}}linkonce_odr
+// CHECK: @_ZTSW3Mod8TemplateIS_11NonTemplateE = {{.*}}linkonce_odr
diff --git a/clang/test/Sema/constant-builtins-2.c b/clang/test/Sema/constant-builtins-2.c
index da22645..e465a3c5f 100644
--- a/clang/test/Sema/constant-builtins-2.c
+++ b/clang/test/Sema/constant-builtins-2.c
@@ -35,7 +35,7 @@ long double g11 = __builtin_nansl("");
__float128 g11_2 = __builtin_nansf128("");
#endif
-//int g12 = __builtin_abs(-12);
+int g12 = __builtin_abs(-12);
double g13 = __builtin_fabs(-12.);
double g13_0 = __builtin_fabs(-0.);
@@ -456,6 +456,17 @@ char clrsb9[__builtin_clrsb(1 << (BITSIZE(int) - 1)) == 0 ? 1 : -1];
char clrsb10[__builtin_clrsb(~(1 << (BITSIZE(int) - 1))) == 0 ? 1 : -1];
char clrsb11[__builtin_clrsb(0xf) == BITSIZE(int) - 5 ? 1 : -1];
char clrsb12[__builtin_clrsb(~0x1f) == BITSIZE(int) - 6 ? 1 : -1];
+
+char abs1[__builtin_abs(-12)];
+char abs2[__builtin_labs(-12L)];
+char abs3[__builtin_llabs(-12LL)];
+int abs4 = __builtin_abs(1 << (BITSIZE(int) - 1)); // expected-error {{not a compile-time constant}}
+char abs5[__builtin_abs((1 << (BITSIZE(int) - 1)) + 1)];
+long abs6 = __builtin_labs(1L << (BITSIZE(long) - 1)); // expected-error {{not a compile-time constant}}
+long abs7 = __builtin_labs((1L << (BITSIZE(long) - 1)) + 1);
+long long abs8 = __builtin_llabs(1LL << (BITSIZE(long long) - 1)); // expected-error {{not a compile-time constant}}
+long long abs9 = __builtin_llabs((1LL << (BITSIZE(long long) - 1)) + 1);
+
#undef BITSIZE
// GCC misc stuff
diff --git a/clang/test/Sema/constexpr.c b/clang/test/Sema/constexpr.c
index eaa000b3..3dcb0b3 100644
--- a/clang/test/Sema/constexpr.c
+++ b/clang/test/Sema/constexpr.c
@@ -374,3 +374,20 @@ void constexprif() {
void constevalif() {
if consteval (300) {} //expected-error {{expected '(' after 'if'}}
}
+
+struct S11 {
+ int len;
+};
+void ghissue112516() {
+ struct S11 *s11 = 0;
+ constexpr int num = s11->len; // expected-error {{constexpr variable 'num' must be initialized by a constant expression}}
+ void *Arr[num];
+}
+
+void ghissue109095() {
+ constexpr char c[] = { 'a' };
+ constexpr int i = c[1]; // expected-error {{constexpr variable 'i' must be initialized by a constant expression}}\
+ // expected-note {{declared here}}
+ _Static_assert(i == c[0]); // expected-error {{static assertion expression is not an integral constant expression}}\
+ // expected-note {{initializer of 'i' is not a constant expression}}
+}
diff --git a/clang/test/SemaCXX/cxx2b-deducing-this.cpp b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
index 2a984a7..520052a 100644
--- a/clang/test/SemaCXX/cxx2b-deducing-this.cpp
+++ b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
@@ -1097,3 +1097,20 @@ struct C4 {
// expected-warning {{volatile-qualified parameter type 'const volatile C4' is deprecated}}
};
}
+
+
+namespace GH112559 {
+struct Wrap {};
+struct S {
+ constexpr operator Wrap (this const S& self) {
+ return Wrap{};
+ };
+ constexpr int operator <<(this Wrap self, int i) {
+ return 0;
+ }
+};
+// Purposefully invalid expression to check an assertion in the
+// expression recovery machinery.
+static_assert((S{} << 11) == a);
+// expected-error@-1 {{use of undeclared identifier 'a'}}
+}
diff --git a/clang/test/SemaCXX/typeid-ref.cpp b/clang/test/SemaCXX/typeid-ref.cpp
index f788b04..025816c 100644
--- a/clang/test/SemaCXX/typeid-ref.cpp
+++ b/clang/test/SemaCXX/typeid-ref.cpp
@@ -6,7 +6,7 @@ namespace std {
struct X { };
void f() {
- // CHECK: @_ZTS1X = linkonce_odr {{(dso_local |hidden )?}}constant
// CHECK: @_ZTI1X = linkonce_odr {{(dso_local |hidden )?}}constant
+ // CHECK: @_ZTS1X = linkonce_odr {{(dso_local |hidden )?}}constant
(void)typeid(X&);
}
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index aacecd3..bf7313f 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -9986,6 +9986,34 @@ TEST_P(ImportTemplateParmDeclDefaultValue, InvisibleInheritedFrom) {
ToFDef->getTemplateParameters()->getParam(0));
}
+TEST_P(ImportTemplateParmDeclDefaultValue, DefValImportError) {
+ const char *ToCode =
+ R"(
+ class X {
+ int A;
+ };
+ )";
+ getToTuDecl(ToCode, Lang_CXX14);
+
+ const char *FromCode =
+ R"(
+ class X;
+
+ template <typename P = X>
+ void f() {}
+
+ class X {
+ char A;
+ };
+ )";
+ TranslationUnitDecl *FromTU = getTuDecl(FromCode, Lang_CXX14);
+ auto *FromF = FirstDeclMatcher<FunctionTemplateDecl>().match(
+ FromTU, functionTemplateDecl(hasName("f")));
+
+ auto *ToFImported = Import(FromF, Lang_CXX14);
+ EXPECT_FALSE(ToFImported);
+}
+
TEST_P(ImportTemplateParmDeclDefaultValue, ImportFunctionTemplate) {
TranslationUnitDecl *FromTU = getTuDecl(CodeFunction, Lang_CXX14);
auto *D3 = LastDeclMatcher<FunctionTemplateDecl>().match(
diff --git a/clang/unittests/Basic/DiagnosticTest.cpp b/clang/unittests/Basic/DiagnosticTest.cpp
index 691d74f..d8d23e3 100644
--- a/clang/unittests/Basic/DiagnosticTest.cpp
+++ b/clang/unittests/Basic/DiagnosticTest.cpp
@@ -16,6 +16,11 @@
using namespace llvm;
using namespace clang;
+// Declare DiagnosticsTestHelper to avoid GCC warning
+namespace clang {
+void DiagnosticsTestHelper(DiagnosticsEngine &diag);
+}
+
void clang::DiagnosticsTestHelper(DiagnosticsEngine &diag) {
EXPECT_FALSE(diag.DiagStates.empty());
EXPECT_TRUE(diag.DiagStatesByLoc.empty());
diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp
index 318f08c..9e85290 100644
--- a/clang/unittests/Format/ConfigParseTest.cpp
+++ b/clang/unittests/Format/ConfigParseTest.cpp
@@ -184,6 +184,7 @@ TEST(ConfigParseTest, ParsesConfigurationBools) {
CHECK_PARSE_BOOL(ObjCSpaceBeforeProtocolList);
CHECK_PARSE_BOOL(Cpp11BracedListStyle);
CHECK_PARSE_BOOL(RemoveBracesLLVM);
+ CHECK_PARSE_BOOL(RemoveEmptyLinesInUnwrappedLines);
CHECK_PARSE_BOOL(RemoveSemicolon);
CHECK_PARSE_BOOL(SkipMacroDefinitionBody);
CHECK_PARSE_BOOL(SpacesInSquareBrackets);
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 43513f1..8f4c921 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -28135,6 +28135,83 @@ TEST_F(FormatTest, BreakBinaryOperations) {
Style);
}
+TEST_F(FormatTest, RemovesEmptyLinesInUnwrappedLines) {
+ auto Style = getLLVMStyle();
+ Style.RemoveEmptyLinesInUnwrappedLines = true;
+
+ verifyFormat("int c = a + b;",
+ "int c\n"
+ "\n"
+ " = a + b;",
+ Style);
+
+ verifyFormat("enum : unsigned { AA = 0, BB } myEnum;",
+ "enum : unsigned\n"
+ "\n"
+ "{\n"
+ " AA = 0,\n"
+ " BB\n"
+ "} myEnum;",
+ Style);
+
+ verifyFormat("class B : public E {\n"
+ "private:\n"
+ "};",
+ "class B : public E\n"
+ "\n"
+ "{\n"
+ "private:\n"
+ "};",
+ Style);
+
+ verifyFormat(
+ "struct AAAAAAAAAAAAAAA test[3] = {{56, 23, \"hello\"}, {7, 5, \"!!\"}};",
+ "struct AAAAAAAAAAAAAAA test[3] = {{56,\n"
+ "\n"
+ " 23, \"hello\"},\n"
+ " {7, 5, \"!!\"}};",
+ Style);
+
+ verifyFormat("int myFunction(int aaaaaaaaaaaaa, int ccccccccccccc, int d);",
+ "int myFunction(\n"
+ "\n"
+ " int aaaaaaaaaaaaa,\n"
+ "\n"
+ " int ccccccccccccc, int d);",
+ Style);
+
+ verifyFormat("switch (e) {\n"
+ "case 1:\n"
+ " return e;\n"
+ "case 2:\n"
+ " return 2;\n"
+ "}",
+ "switch (\n"
+ "\n"
+ " e) {\n"
+ "case 1:\n"
+ " return e;\n"
+ "case 2:\n"
+ " return 2;\n"
+ "}",
+ Style);
+
+ verifyFormat("while (true) {\n"
+ "}",
+ "while (\n"
+ "\n"
+ " true) {\n"
+ "}",
+ Style);
+
+ verifyFormat("void loooonFunctionIsVeryLongButNotAsLongAsJavaTypeNames(\n"
+ " std::map<int, std::string> *outputMap);",
+ "void loooonFunctionIsVeryLongButNotAsLongAsJavaTypeNames\n"
+ "\n"
+ " (std::map<int, std::string> *outputMap);",
+ Style);
+}
+
} // namespace
} // namespace test
} // namespace format
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 00776da..60deae0 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -3554,6 +3554,12 @@ TEST_F(TokenAnnotatorTest, TemplateInstantiation) {
ASSERT_EQ(Tokens.size(), 21u) << Tokens;
EXPECT_TOKEN(Tokens[4], tok::less, TT_TemplateOpener);
EXPECT_TOKEN(Tokens[16], tok::greater, TT_TemplateCloser);
+
+ Tokens =
+ annotate("auto x{std::conditional_t<T::value == U::value, T, U>{}};");
+ ASSERT_EQ(Tokens.size(), 24u) << Tokens;
+ EXPECT_TOKEN(Tokens[6], tok::less, TT_TemplateOpener);
+ EXPECT_TOKEN(Tokens[18], tok::greater, TT_TemplateCloser);
}
} // namespace
diff --git a/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/clang/utils/TableGen/ClangOptionDocEmitter.cpp
index b67c5d1..ba8840c 100644
--- a/clang/utils/TableGen/ClangOptionDocEmitter.cpp
+++ b/clang/utils/TableGen/ClangOptionDocEmitter.cpp
@@ -367,13 +367,13 @@ void emitOption(const DocumentedOption &Option, const Record *DocInfo,
for (const Record *VisibilityHelp :
R->getValueAsListOfDefs("HelpTextsForVariants")) {
// This is a list of visibilities.
- ArrayRef<Init *> Visibilities =
+ ArrayRef<const Init *> Visibilities =
VisibilityHelp->getValueAsListInit("Visibilities")->getValues();
// See if any of the program's visibilities are in the list.
for (StringRef DocInfoMask :
DocInfo->getValueAsListOfStrings("VisibilityMask")) {
- for (Init *Visibility : Visibilities) {
+ for (const Init *Visibility : Visibilities) {
if (Visibility->getAsUnquotedString() == DocInfoMask) {
// Use the first one we find.
Description = escapeRST(VisibilityHelp->getValueAsString("Text"));
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 50f161f..aecca0f 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -169,7 +169,7 @@ static VectorTypeModifier getTupleVTM(unsigned NF) {
static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) {
// We need a special rule for segment load/store since the data width is not
- // encoded in the instrinsic name itself.
+ // encoded in the intrinsic name itself.
const StringRef IRName = RVVI->getIRName();
constexpr unsigned RVV_VTA = 0x1;
constexpr unsigned RVV_VMA = 0x2;
@@ -192,7 +192,7 @@ static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) {
static unsigned getSegInstLog2SEW(StringRef InstName) {
// clang-format off
// We need a special rule for indexed segment load/store since the data width
- // is not encoded in the instrinsic name itself.
+ // is not encoded in the intrinsic name itself.
if (InstName.starts_with("vloxseg") || InstName.starts_with("vluxseg") ||
InstName.starts_with("vsoxseg") || InstName.starts_with("vsuxseg"))
return (unsigned)-1;
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index 1d79cc7..c9bf5d3 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -1373,6 +1373,10 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "typedef __clang_svbfloat16x3_t svbfloat16x3_t;\n";
OS << "typedef __clang_svbfloat16x4_t svbfloat16x4_t;\n";
+ OS << "typedef __clang_svmfloat8x2_t svmfloat8x2_t;\n";
+ OS << "typedef __clang_svmfloat8x3_t svmfloat8x3_t;\n";
+ OS << "typedef __clang_svmfloat8x4_t svmfloat8x4_t;\n";
+
OS << "typedef __SVCount_t svcount_t;\n\n";
OS << "enum svpattern\n";
diff --git a/compiler-rt/lib/interception/interception_win.cpp b/compiler-rt/lib/interception/interception_win.cpp
index 4a6ff66..4f60d42 100644
--- a/compiler-rt/lib/interception/interception_win.cpp
+++ b/compiler-rt/lib/interception/interception_win.cpp
@@ -768,6 +768,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
switch (*(u32*)(address)) {
+ case 0x1ab60f44: // 44 0f b6 1a : movzx r11d, BYTE PTR [rdx]
case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
diff --git a/compiler-rt/test/lsan/TestCases/print_threads.c b/compiler-rt/test/lsan/TestCases/print_threads.c
index b3072da..a938941 100644
--- a/compiler-rt/test/lsan/TestCases/print_threads.c
+++ b/compiler-rt/test/lsan/TestCases/print_threads.c
@@ -2,6 +2,9 @@
// XFAIL: hwasan
+// No pthread barriers on Darwin.
+// UNSUPPORTED: darwin
+
#include <assert.h>
#include <pthread.h>
#include <sanitizer/lsan_interface.h>
diff --git a/flang/docs/OptionComparison.md b/flang/docs/OptionComparison.md
index 9d6916e..fb65498 100644
--- a/flang/docs/OptionComparison.md
+++ b/flang/docs/OptionComparison.md
@@ -53,7 +53,7 @@ eN
</td>
<td>fdec,
<p>
-fall-instrinsics
+fall-intrinsics
</td>
<td><a href="https://www-01.ibm.com/support/docview.wss?uid=swg27024803&aid=1#page=297">qxlf77</a>,
<p>
diff --git a/flang/include/flang/Common/LangOptions.def b/flang/include/flang/Common/LangOptions.def
index d3e1e97..1bfdba9 100644
--- a/flang/include/flang/Common/LangOptions.def
+++ b/flang/include/flang/Common/LangOptions.def
@@ -20,6 +20,8 @@ LANGOPT(Name, Bits, Default)
#endif
ENUM_LANGOPT(FPContractMode, FPModeKind, 2, FPM_Fast) ///< FP Contract Mode (off/fast)
+/// signed integer overflow handling
+ENUM_LANGOPT(SignedOverflowBehavior, SignedOverflowBehaviorTy, 1, SOB_Undefined)
/// Indicate a build without the standard GPU libraries.
LANGOPT(NoGPULib , 1, false)
diff --git a/flang/include/flang/Common/LangOptions.h b/flang/include/flang/Common/LangOptions.h
index 52a4504..83f25cfb 100644
--- a/flang/include/flang/Common/LangOptions.h
+++ b/flang/include/flang/Common/LangOptions.h
@@ -27,6 +27,14 @@ namespace Fortran::common {
class LangOptionsBase {
public:
+ enum SignedOverflowBehaviorTy {
+ // -fno-wrapv (default behavior in Flang)
+ SOB_Undefined,
+
+ // -fwrapv
+ SOB_Defined,
+ };
+
enum FPModeKind {
// Do not fuse FP ops
FPM_Off,
diff --git a/flang/include/flang/Lower/LoweringOptions.def b/flang/include/flang/Lower/LoweringOptions.def
index d3f17c3..231de53 100644
--- a/flang/include/flang/Lower/LoweringOptions.def
+++ b/flang/include/flang/Lower/LoweringOptions.def
@@ -35,9 +35,8 @@ ENUM_LOWERINGOPT(NoPPCNativeVecElemOrder, unsigned, 1, 0)
ENUM_LOWERINGOPT(Underscoring, unsigned, 1, 1)
/// If true, assume the behavior of integer overflow is defined
-/// (i.e. wraps around as two's complement). On by default.
-/// TODO: make the default off
-ENUM_LOWERINGOPT(IntegerWrapAround, unsigned, 1, 1)
+/// (i.e. wraps around as two's complement). Off by default.
+ENUM_LOWERINGOPT(IntegerWrapAround, unsigned, 1, 0)
/// If true, add nsw flags to loop variable increments.
/// Off by default.
diff --git a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
index a7c4c07..5ae32f70 100644
--- a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
+++ b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
@@ -182,7 +182,7 @@ struct VecTypeInfo {
static inline VecTypeInfo getVecTypeFromFirType(mlir::Type firTy) {
assert(mlir::isa<fir::VectorType>(firTy));
VecTypeInfo vecTyInfo;
- vecTyInfo.eleTy = mlir::dyn_cast<fir::VectorType>(firTy).getEleTy();
+ vecTyInfo.eleTy = mlir::dyn_cast<fir::VectorType>(firTy).getElementType();
vecTyInfo.len = mlir::dyn_cast<fir::VectorType>(firTy).getLen();
return vecTyInfo;
}
diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td
index 7ac8e08..bfd00c3 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td
+++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td
@@ -465,6 +465,8 @@ def fir_SequenceType : FIR_Type<"Sequence", "array"> {
size = size * static_cast<std::uint64_t>(extent);
return size;
}
+
+ mlir::Type getElementType() const { return getEleTy(); }
}];
}
@@ -519,6 +521,8 @@ def fir_VectorType : FIR_Type<"Vector", "vector"> {
let extraClassDeclaration = [{
static bool isValidElementType(mlir::Type t);
+
+ mlir::Type getElementType() const { return getEleTy(); }
}];
let skipDefaultBuilders = 1;
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index 21b4a34..4a3c992 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -26,6 +26,7 @@
#include "flang/Common/idioms.h"
#include "flang/Common/indirection.h"
#include "llvm/Frontend/OpenACC/ACC.h.inc"
+#include "llvm/Frontend/OpenMP/OMP.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <cinttypes>
#include <list>
@@ -3660,6 +3661,7 @@ struct OmpLastprivateClause {
// OpenMP Clauses
struct OmpClause {
UNION_CLASS_BOILERPLATE(OmpClause);
+ llvm::omp::Clause Id() const;
#define GEN_FLANG_CLAUSE_PARSER_CLASSES
#include "llvm/Frontend/OpenMP/OMP.inc"
diff --git a/flang/include/flang/Runtime/magic-numbers.h b/flang/include/flang/Runtime/magic-numbers.h
index bab0e9a..1d3c5dc 100644
--- a/flang/include/flang/Runtime/magic-numbers.h
+++ b/flang/include/flang/Runtime/magic-numbers.h
@@ -107,7 +107,7 @@ The denorm value is a nonstandard extension.
#if 0
ieee_round_type values
-The values are those of the llvm.get.rounding instrinsic, which is assumed by
+The values are those of the llvm.get.rounding intrinsic, which is assumed by
ieee_arithmetic module rounding procedures.
#endif
#define _FORTRAN_RUNTIME_IEEE_TO_ZERO 0
diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp
index 4271faa..aa44967 100644
--- a/flang/lib/Evaluate/intrinsics.cpp
+++ b/flang/lib/Evaluate/intrinsics.cpp
@@ -1690,7 +1690,7 @@ std::optional<SpecificCall> IntrinsicInterface::Match(
// MAX and MIN (and others that map to them) allow their last argument to
// be repeated indefinitely. The actualForDummy vector is sized
// and null-initialized to the non-repeated dummy argument count
- // for other instrinsics.
+ // for other intrinsics.
bool isMaxMin{dummyArgPatterns > 0 &&
dummy[dummyArgPatterns - 1].optionality == Optionality::repeats};
std::vector<ActualArgument *> actualForDummy(
diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp
index 4607a33..94d3d11 100644
--- a/flang/lib/Frontend/CompilerInvocation.cpp
+++ b/flang/lib/Frontend/CompilerInvocation.cpp
@@ -1115,6 +1115,24 @@ static bool parseOpenMPArgs(CompilerInvocation &res, llvm::opt::ArgList &args,
return diags.getNumErrors() == numErrorsBefore;
}
+/// Parses signed integer overflow options and populates the
+/// CompilerInvocation accordingly.
+/// Returns false if new errors are generated.
+///
+/// \param [out] invoc Stores the processed arguments
+/// \param [in] args The compiler invocation arguments to parse
+/// \param [out] diags DiagnosticsEngine to report erros with
+static bool parseIntegerOverflowArgs(CompilerInvocation &invoc,
+ llvm::opt::ArgList &args,
+ clang::DiagnosticsEngine &diags) {
+ Fortran::common::LangOptions &opts = invoc.getLangOpts();
+
+ if (args.getLastArg(clang::driver::options::OPT_fwrapv))
+ opts.setSignedOverflowBehavior(Fortran::common::LangOptions::SOB_Defined);
+
+ return true;
+}
+
/// Parses all floating point related arguments and populates the
/// CompilerInvocation accordingly.
/// Returns false if new errors are generated.
@@ -1255,6 +1273,18 @@ static bool parseLinkerOptionsArgs(CompilerInvocation &invoc,
return true;
}
+static bool parseLangOptionsArgs(CompilerInvocation &invoc,
+ llvm::opt::ArgList &args,
+ clang::DiagnosticsEngine &diags) {
+ bool success = true;
+
+ success &= parseIntegerOverflowArgs(invoc, args, diags);
+ success &= parseFloatingPointArgs(invoc, args, diags);
+ success &= parseVScaleArgs(invoc, args, diags);
+
+ return success;
+}
+
bool CompilerInvocation::createFromArgs(
CompilerInvocation &invoc, llvm::ArrayRef<const char *> commandLineArgs,
clang::DiagnosticsEngine &diags, const char *argv0) {
@@ -1363,9 +1393,7 @@ bool CompilerInvocation::createFromArgs(
invoc.frontendOpts.mlirArgs =
args.getAllArgValues(clang::driver::options::OPT_mmlir);
- success &= parseFloatingPointArgs(invoc, args, diags);
-
- success &= parseVScaleArgs(invoc, args, diags);
+ success &= parseLangOptionsArgs(invoc, args, diags);
success &= parseLinkerOptionsArgs(invoc, args, diags);
@@ -1577,6 +1605,8 @@ void CompilerInvocation::setLoweringOptions() {
loweringOpts.setUnderscoring(codegenOpts.Underscoring);
const Fortran::common::LangOptions &langOptions = getLangOpts();
+ loweringOpts.setIntegerWrapAround(langOptions.getSignedOverflowBehavior() ==
+ Fortran::common::LangOptions::SOB_Defined);
Fortran::common::MathOptionsBase &mathOpts = loweringOpts.getMathOptions();
// TODO: when LangOptions are finalized, we can represent
// the math related options using Fortran::commmon::MathOptionsBase,
diff --git a/flang/lib/Lower/ConvertConstant.cpp b/flang/lib/Lower/ConvertConstant.cpp
index 748be50..556b330 100644
--- a/flang/lib/Lower/ConvertConstant.cpp
+++ b/flang/lib/Lower/ConvertConstant.cpp
@@ -584,7 +584,8 @@ genInlinedArrayLit(Fortran::lower::AbstractConverter &converter,
} while (con.IncrementSubscripts(subscripts));
} else if constexpr (T::category == Fortran::common::TypeCategory::Derived) {
do {
- mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrayTy).getEleTy();
+ mlir::Type eleTy =
+ mlir::cast<fir::SequenceType>(arrayTy).getElementType();
mlir::Value elementVal =
genScalarLit(converter, loc, con.At(subscripts), eleTy,
/*outlineInReadOnlyMemory=*/false);
@@ -594,7 +595,7 @@ genInlinedArrayLit(Fortran::lower::AbstractConverter &converter,
} else {
llvm::SmallVector<mlir::Attribute> rangeStartIdx;
uint64_t rangeSize = 0;
- mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrayTy).getEleTy();
+ mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrayTy).getElementType();
do {
auto getElementVal = [&]() {
return builder.createConvert(loc, eleTy,
@@ -643,7 +644,7 @@ genOutlineArrayLit(Fortran::lower::AbstractConverter &converter,
mlir::Location loc, mlir::Type arrayTy,
const Fortran::evaluate::Constant<T> &constant) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrayTy).getEleTy();
+ mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrayTy).getElementType();
llvm::StringRef globalName = converter.getUniqueLitName(
loc, std::make_unique<Fortran::lower::SomeExpr>(toEvExpr(constant)),
eleTy);
diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp
index 87e2114..46168b8 100644
--- a/flang/lib/Lower/ConvertExpr.cpp
+++ b/flang/lib/Lower/ConvertExpr.cpp
@@ -1574,7 +1574,7 @@ public:
mlir::Location loc = getLoc();
mlir::Value addr = fir::getBase(array);
mlir::Type arrTy = fir::dyn_cast_ptrEleTy(addr.getType());
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
mlir::Type seqTy = builder.getRefType(builder.getVarLenSeqTy(eleTy));
mlir::Type refTy = builder.getRefType(eleTy);
mlir::Value base = builder.createConvert(loc, seqTy, addr);
@@ -1659,7 +1659,7 @@ public:
mlir::Location loc = getLoc();
mlir::Value addr = fir::getBase(exv);
mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(addr.getType());
- mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
mlir::Type refTy = builder.getRefType(eleTy);
mlir::IndexType idxTy = builder.getIndexType();
llvm::SmallVector<mlir::Value> arrayCoorArgs;
@@ -4145,7 +4145,7 @@ private:
mlir::Location loc = getLoc();
return [=, builder = &converter.getFirOpBuilder()](IterSpace iters) {
mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(tmp.getType());
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
mlir::Type eleRefTy = builder->getRefType(eleTy);
mlir::IntegerType i1Ty = builder->getI1Type();
// Adjust indices for any shift of the origin of the array.
@@ -5759,7 +5759,7 @@ private:
return fir::BoxValue(embox, lbounds, nonDeferredLenParams);
};
}
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
if (isReferentiallyOpaque()) {
// Semantics are an opaque reference to an array.
// This case forwards a continuation that will generate the address
diff --git a/flang/lib/Lower/ConvertExprToHLFIR.cpp b/flang/lib/Lower/ConvertExprToHLFIR.cpp
index 93b78fd..e93fbc5 100644
--- a/flang/lib/Lower/ConvertExprToHLFIR.cpp
+++ b/flang/lib/Lower/ConvertExprToHLFIR.cpp
@@ -579,7 +579,8 @@ private:
return createVectorSubscriptElementAddrOp(partInfo, baseType,
resultExtents);
- mlir::Type resultType = mlir::cast<fir::SequenceType>(baseType).getEleTy();
+ mlir::Type resultType =
+ mlir::cast<fir::SequenceType>(baseType).getElementType();
if (!resultTypeShape.empty()) {
// Ranked array section. The result shape comes from the array section
// subscripts.
@@ -811,7 +812,7 @@ private:
}
}
builder.setInsertionPoint(elementalAddrOp);
- return mlir::cast<fir::SequenceType>(baseType).getEleTy();
+ return mlir::cast<fir::SequenceType>(baseType).getElementType();
}
/// Yield the designator for the final part-ref inside the
diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp
index 8b03d60..cc51d5a 100644
--- a/flang/lib/Lower/ConvertVariable.cpp
+++ b/flang/lib/Lower/ConvertVariable.cpp
@@ -518,7 +518,7 @@ static fir::GlobalOp defineGlobal(Fortran::lower::AbstractConverter &converter,
// type does not support nested structures.
if (mlir::isa<fir::SequenceType>(symTy) &&
!Fortran::semantics::IsAllocatableOrPointer(sym)) {
- mlir::Type eleTy = mlir::cast<fir::SequenceType>(symTy).getEleTy();
+ mlir::Type eleTy = mlir::cast<fir::SequenceType>(symTy).getElementType();
if (mlir::isa<mlir::IntegerType, mlir::FloatType, mlir::ComplexType,
fir::LogicalType>(eleTy)) {
const auto *details =
diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp
index 812551d..64d6612 100644
--- a/flang/lib/Lower/OpenMP/Clauses.cpp
+++ b/flang/lib/Lower/OpenMP/Clauses.cpp
@@ -22,26 +22,6 @@
#include <utility>
#include <variant>
-namespace detail {
-template <typename C>
-llvm::omp::Clause getClauseIdForClass(C &&) {
- using namespace Fortran;
- using A = llvm::remove_cvref_t<C>; // A is referenced in OMP.inc
- // The code included below contains a sequence of checks like the following
- // for each OpenMP clause
- // if constexpr (std::is_same_v<A, parser::OmpClause::AcqRel>)
- // return llvm::omp::Clause::OMPC_acq_rel;
- // [...]
-#define GEN_FLANG_CLAUSE_PARSER_KIND_MAP
-#include "llvm/Frontend/OpenMP/OMP.inc"
-}
-} // namespace detail
-
-static llvm::omp::Clause getClauseId(const Fortran::parser::OmpClause &clause) {
- return Fortran::common::visit(
- [](auto &&s) { return detail::getClauseIdForClass(s); }, clause.u);
-}
-
namespace Fortran::lower::omp {
using SymbolWithDesignator = std::tuple<semantics::Symbol *, MaybeExpr>;
@@ -1253,8 +1233,7 @@ Clause makeClause(const parser::OmpClause &cls,
semantics::SemanticsContext &semaCtx) {
return Fortran::common::visit(
[&](auto &&s) {
- return makeClause(getClauseId(cls), clause::make(s, semaCtx),
- cls.source);
+ return makeClause(cls.Id(), clause::make(s, semaCtx), cls.source);
},
cls.u);
}
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index e614327..462193a 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -3824,7 +3824,7 @@ IntrinsicLibrary::genReduction(FN func, FD funcDim, llvm::StringRef errMsg,
if (absentDim || rank == 1) {
mlir::Type ty = array.getType();
mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
if (fir::isa_complex(eleTy)) {
mlir::Value result = builder.createTemporary(loc, eleTy);
func(builder, loc, array, mask, result);
@@ -6137,7 +6137,7 @@ IntrinsicLibrary::genReduce(mlir::Type resultType,
mlir::Type ty = array.getType();
mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ mlir::Type eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
// Handle optional arguments
bool absentDim = isStaticallyAbsent(args[2]);
diff --git a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
index 7f09e88..b3b07d1 100644
--- a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
@@ -2797,7 +2797,7 @@ void PPCIntrinsicLibrary::genMmaIntr(llvm::ArrayRef<fir::ExtendedValue> args) {
if (vType != targetType) {
if (mlir::isa<mlir::VectorType>(targetType)) {
// Perform vector type conversion for arguments passed by value.
- auto eleTy{mlir::dyn_cast<fir::VectorType>(vType).getEleTy()};
+ auto eleTy{mlir::dyn_cast<fir::VectorType>(vType).getElementType()};
auto len{mlir::dyn_cast<fir::VectorType>(vType).getLen()};
mlir::VectorType mlirType = mlir::VectorType::get(len, eleTy);
auto v0{builder.createConvert(loc, mlirType, v)};
diff --git a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
index c13064a..d0092ad 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
@@ -284,7 +284,7 @@ struct ForcedSpacing16 {
}
};
-/// Generate call to Exponent instrinsic runtime routine.
+/// Generate call to Exponent intrinsic runtime routine.
mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Type resultType,
mlir::Value x) {
@@ -320,7 +320,7 @@ mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
}
-/// Generate call to Fraction instrinsic runtime routine.
+/// Generate call to Fraction intrinsic runtime routine.
mlir::Value fir::runtime::genFraction(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x) {
mlir::func::FuncOp func;
@@ -596,7 +596,7 @@ mlir::Value fir::runtime::genSelectedRealKind(fir::FirOpBuilder &builder,
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
}
-/// Generate call to Set_exponent instrinsic runtime routine.
+/// Generate call to Set_exponent intrinsic runtime routine.
mlir::Value fir::runtime::genSetExponent(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x,
mlir::Value i) {
diff --git a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
index b3982442..b768733 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
@@ -1157,7 +1157,7 @@ void fir::runtime::genMaxloc(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value back) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
fir::factory::CharacterExprHelper charHelper{builder, loc};
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
mlir::func::FuncOp func;
@@ -1189,7 +1189,7 @@ mlir::Value fir::runtime::genMaxval(fir::FirOpBuilder &builder,
mlir::Value maskBox) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getIndexType(), 0);
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
mlir::func::FuncOp func;
@@ -1241,7 +1241,7 @@ void fir::runtime::genMinloc(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value back) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
mlir::func::FuncOp func;
REAL_INTRINSIC_INSTANCES(Minloc, )
@@ -1298,7 +1298,7 @@ mlir::Value fir::runtime::genMinval(fir::FirOpBuilder &builder,
mlir::Value maskBox) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getIndexType(), 0);
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
@@ -1326,7 +1326,7 @@ void fir::runtime::genNorm2Dim(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::func::FuncOp func;
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
if (eleTy.isF128())
func = fir::runtime::getRuntimeFunc<ForcedNorm2DimReal16>(loc, builder);
else
@@ -1348,7 +1348,7 @@ mlir::Value fir::runtime::genNorm2(fir::FirOpBuilder &builder,
mlir::func::FuncOp func;
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getIndexType(), 0);
if (eleTy.isF32())
@@ -1398,7 +1398,7 @@ mlir::Value fir::runtime::genProduct(fir::FirOpBuilder &builder,
mlir::Value resultBox) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getIndexType(), 0);
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
@@ -1482,7 +1482,7 @@ mlir::Value fir::runtime::genSum(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getIndexType(), 0);
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
@@ -1513,7 +1513,7 @@ mlir::Value fir::runtime::genSum(fir::FirOpBuilder &builder, mlir::Location loc,
// The IAll, IAny and IParity intrinsics have essentially the same
// implementation. This macro will generate the function body given the
-// instrinsic name.
+// intrinsic name.
#define GEN_IALL_IANY_IPARITY(F) \
mlir::Value fir::runtime::JOIN2(gen, F)( \
fir::FirOpBuilder & builder, mlir::Location loc, mlir::Value arrayBox, \
@@ -1521,7 +1521,7 @@ mlir::Value fir::runtime::genSum(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::func::FuncOp func; \
auto ty = arrayBox.getType(); \
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty); \
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy(); \
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType(); \
auto dim = builder.createIntegerConstant(loc, builder.getIndexType(), 0); \
\
if (eleTy.isInteger(builder.getKindMap().getIntegerBitsize(1))) \
@@ -1596,7 +1596,7 @@ void fir::runtime::genReduce(fir::FirOpBuilder &builder, mlir::Location loc,
bool argByRef) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getI32Type(), 1);
assert(resultBox && "expect non null value for the result");
@@ -1646,7 +1646,7 @@ mlir::Value fir::runtime::genReduce(fir::FirOpBuilder &builder,
bool argByRef) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto dim = builder.createIntegerConstant(loc, builder.getI32Type(), 1);
assert((fir::isa_real(eleTy) || fir::isa_integer(eleTy) ||
@@ -1687,7 +1687,7 @@ void fir::runtime::genReduceDim(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox, bool argByRef) {
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
- auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(arrTy).getElementType();
auto [cat, kind] = fir::mlirTypeToCategoryKind(loc, eleTy);
mlir::func::FuncOp func;
diff --git a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp
index 8f08b01..50f14ab 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp
@@ -365,11 +365,11 @@ void fir::runtime::genMatmul(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::func::FuncOp func;
auto boxATy = matrixABox.getType();
auto arrATy = fir::dyn_cast_ptrOrBoxEleTy(boxATy);
- auto arrAEleTy = mlir::cast<fir::SequenceType>(arrATy).getEleTy();
+ auto arrAEleTy = mlir::cast<fir::SequenceType>(arrATy).getElementType();
auto [aCat, aKind] = fir::mlirTypeToCategoryKind(loc, arrAEleTy);
auto boxBTy = matrixBBox.getType();
auto arrBTy = fir::dyn_cast_ptrOrBoxEleTy(boxBTy);
- auto arrBEleTy = mlir::cast<fir::SequenceType>(arrBTy).getEleTy();
+ auto arrBEleTy = mlir::cast<fir::SequenceType>(arrBTy).getElementType();
auto [bCat, bKind] = fir::mlirTypeToCategoryKind(loc, arrBEleTy);
#define MATMUL_INSTANCE(ACAT, AKIND, BCAT, BKIND) \
@@ -417,11 +417,11 @@ void fir::runtime::genMatmulTranspose(fir::FirOpBuilder &builder,
mlir::func::FuncOp func;
auto boxATy = matrixABox.getType();
auto arrATy = fir::dyn_cast_ptrOrBoxEleTy(boxATy);
- auto arrAEleTy = mlir::cast<fir::SequenceType>(arrATy).getEleTy();
+ auto arrAEleTy = mlir::cast<fir::SequenceType>(arrATy).getElementType();
auto [aCat, aKind] = fir::mlirTypeToCategoryKind(loc, arrAEleTy);
auto boxBTy = matrixBBox.getType();
auto arrBTy = fir::dyn_cast_ptrOrBoxEleTy(boxBTy);
- auto arrBEleTy = mlir::cast<fir::SequenceType>(arrBTy).getEleTy();
+ auto arrBEleTy = mlir::cast<fir::SequenceType>(arrBTy).getElementType();
auto [bCat, bKind] = fir::mlirTypeToCategoryKind(loc, arrBEleTy);
#define MATMUL_INSTANCE(ACAT, AKIND, BCAT, BKIND) \
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 68b8c66..e6eeb0d 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -2619,7 +2619,7 @@ private:
dims = dimsLeft - 1;
continue;
}
- cpnTy = mlir::cast<fir::SequenceType>(cpnTy).getEleTy();
+ cpnTy = mlir::cast<fir::SequenceType>(cpnTy).getElementType();
// append array range in reverse (FIR arrays are column-major)
offs.append(arrIdx.rbegin(), arrIdx.rend());
arrIdx.clear();
@@ -2633,7 +2633,7 @@ private:
arrIdx.push_back(nxtOpnd);
continue;
}
- cpnTy = mlir::cast<fir::SequenceType>(cpnTy).getEleTy();
+ cpnTy = mlir::cast<fir::SequenceType>(cpnTy).getElementType();
offs.push_back(nxtOpnd);
continue;
}
diff --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp
index 90ce8b8..cdcf9bd 100644
--- a/flang/lib/Optimizer/Dialect/FIROps.cpp
+++ b/flang/lib/Optimizer/Dialect/FIROps.cpp
@@ -1359,7 +1359,7 @@ bool fir::ConvertOp::isPointerCompatible(mlir::Type ty) {
static std::optional<mlir::Type> getVectorElementType(mlir::Type ty) {
mlir::Type elemTy;
if (mlir::isa<fir::VectorType>(ty))
- elemTy = mlir::dyn_cast<fir::VectorType>(ty).getEleTy();
+ elemTy = mlir::dyn_cast<fir::VectorType>(ty).getElementType();
else if (mlir::isa<mlir::VectorType>(ty))
elemTy = mlir::dyn_cast<mlir::VectorType>(ty).getElementType();
else
@@ -1533,7 +1533,7 @@ llvm::LogicalResult fir::CoordinateOp::verify() {
}
if (dimension) {
if (--dimension == 0)
- eleTy = mlir::cast<fir::SequenceType>(eleTy).getEleTy();
+ eleTy = mlir::cast<fir::SequenceType>(eleTy).getElementType();
} else {
if (auto t = mlir::dyn_cast<mlir::TupleType>(eleTy)) {
// FIXME: Generally, we don't know which field of the tuple is being
@@ -3817,7 +3817,7 @@ void fir::StoreOp::build(mlir::OpBuilder &builder, mlir::OperationState &result,
//===----------------------------------------------------------------------===//
inline fir::CharacterType::KindTy stringLitOpGetKind(fir::StringLitOp op) {
- auto eleTy = mlir::cast<fir::SequenceType>(op.getType()).getEleTy();
+ auto eleTy = mlir::cast<fir::SequenceType>(op.getType()).getElementType();
return mlir::cast<fir::CharacterType>(eleTy).getFKind();
}
diff --git a/flang/lib/Parser/parse-tree.cpp b/flang/lib/Parser/parse-tree.cpp
index 7f0899a..948ad04 100644
--- a/flang/lib/Parser/parse-tree.cpp
+++ b/flang/lib/Parser/parse-tree.cpp
@@ -253,3 +253,21 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const Name &x) {
return os << x.ToString();
}
} // namespace Fortran::parser
+
+template <typename C> static llvm::omp::Clause getClauseIdForClass(C &&) {
+ using namespace Fortran;
+ using A = llvm::remove_cvref_t<C>; // A is referenced in OMP.inc
+ // The code included below contains a sequence of checks like the following
+ // for each OpenMP clause
+ // if constexpr (std::is_same_v<A, parser::OmpClause::AcqRel>)
+ // return llvm::omp::Clause::OMPC_acq_rel;
+ // [...]
+#define GEN_FLANG_CLAUSE_PARSER_KIND_MAP
+#include "llvm/Frontend/OpenMP/OMP.inc"
+}
+
+namespace Fortran::parser {
+llvm::omp::Clause OmpClause::Id() const {
+ return std::visit([](auto &&s) { return getClauseIdForClass(s); }, u);
+}
+} // namespace Fortran::parser
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index bdb8a72..461a99f 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -68,11 +68,23 @@ public:
if (const auto *e{GetExpr(context_, expr)}) {
for (const Symbol &symbol : evaluate::CollectSymbols(*e)) {
const Symbol &root{GetAssociationRoot(symbol)};
- if (IsFunction(root) && !IsElementalProcedure(root)) {
- context_.Say(expr.source,
- "User defined non-ELEMENTAL function "
- "'%s' is not allowed in a WORKSHARE construct"_err_en_US,
- root.name());
+ if (IsFunction(root)) {
+ std::string attrs{""};
+ if (!IsElementalProcedure(root)) {
+ attrs = " non-ELEMENTAL";
+ }
+ if (root.attrs().test(Attr::IMPURE)) {
+ if (attrs != "") {
+ attrs = "," + attrs;
+ }
+ attrs = " IMPURE" + attrs;
+ }
+ if (attrs != "") {
+ context_.Say(expr.source,
+ "User defined%s function '%s' is not allowed in a "
+ "WORKSHARE construct"_err_en_US,
+ attrs, root.name());
+ }
}
}
}
@@ -2273,6 +2285,21 @@ void OmpStructureChecker::Leave(const parser::OmpClauseList &) {
}
}
}
+
+ // 2.11.5 Simd construct restriction (OpenMP 5.1)
+ if (auto *sl_clause{FindClause(llvm::omp::Clause::OMPC_safelen)}) {
+ if (auto *o_clause{FindClause(llvm::omp::Clause::OMPC_order)}) {
+ const auto &orderClause{
+ std::get<parser::OmpClause::Order>(o_clause->u)};
+ if (std::get<parser::OmpOrderClause::Type>(orderClause.v.t) ==
+ parser::OmpOrderClause::Type::Concurrent) {
+ context_.Say(sl_clause->source,
+ "The `SAFELEN` clause cannot appear in the `SIMD` directive "
+ "with `ORDER(CONCURRENT)` clause"_err_en_US);
+ }
+ }
+ }
+
// Sema checks related to presence of multiple list items within the same
// clause
CheckMultListItems();
@@ -2336,11 +2363,8 @@ void OmpStructureChecker::Leave(const parser::OmpClauseList &) {
void OmpStructureChecker::Enter(const parser::OmpClause &x) {
SetContextClause(x);
- llvm::omp::Clause clauseId = std::visit(
- [this](auto &&s) { return GetClauseKindForParserClass(s); }, x.u);
-
// The visitors for these clauses do their own checks.
- switch (clauseId) {
+ switch (x.Id()) {
case llvm::omp::Clause::OMPC_copyprivate:
case llvm::omp::Clause::OMPC_enter:
case llvm::omp::Clause::OMPC_lastprivate:
@@ -3217,7 +3241,7 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Lastprivate &x) {
DirectivesClauseTriple dirClauseTriple;
SymbolSourceMap currSymbols;
GetSymbolsInObjectList(objectList, currSymbols);
- CheckDefinableObjects(currSymbols, GetClauseKindForParserClass(x));
+ CheckDefinableObjects(currSymbols, llvm::omp::Clause::OMPC_lastprivate);
CheckCopyingPolymorphicAllocatable(
currSymbols, llvm::omp::Clause::OMPC_lastprivate);
@@ -3230,7 +3254,7 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Lastprivate &x) {
llvm::omp::Directive::OMPD_parallel, llvm::omp::privateReductionSet));
CheckPrivateSymbolsInOuterCxt(
- currSymbols, dirClauseTriple, GetClauseKindForParserClass(x));
+ currSymbols, dirClauseTriple, llvm::omp::Clause::OMPC_lastprivate);
using LastprivateModifier = parser::OmpLastprivateClause::LastprivateModifier;
const auto &maybeMod{std::get<std::optional<LastprivateModifier>>(x.v.t)};
diff --git a/flang/lib/Semantics/check-omp-structure.h b/flang/lib/Semantics/check-omp-structure.h
index cce9fa4..70a7779 100644
--- a/flang/lib/Semantics/check-omp-structure.h
+++ b/flang/lib/Semantics/check-omp-structure.h
@@ -132,13 +132,6 @@ public:
#define GEN_FLANG_CLAUSE_CHECK_ENTER
#include "llvm/Frontend/OpenMP/OMP.inc"
- // Get the OpenMP Clause Kind for the corresponding Parser class
- template <typename A>
- llvm::omp::Clause GetClauseKindForParserClass(const A &) {
-#define GEN_FLANG_CLAUSE_PARSER_KIND_MAP
-#include "llvm/Frontend/OpenMP/OMP.inc"
- }
-
private:
bool CheckAllowedClause(llvmOmpClause clause);
bool IsVariableListItem(const Symbol &sym);
diff --git a/flang/test/Driver/frontend-forwarding.f90 b/flang/test/Driver/frontend-forwarding.f90
index 0a56a1e..ff2d660 100644
--- a/flang/test/Driver/frontend-forwarding.f90
+++ b/flang/test/Driver/frontend-forwarding.f90
@@ -14,6 +14,7 @@
! RUN: -fno-signed-zeros \
! RUN: -fassociative-math \
! RUN: -freciprocal-math \
+! RUN: -fno-strict-overflow \
! RUN: -fomit-frame-pointer \
! RUN: -fpass-plugin=Bye%pluginext \
! RUN: -fversion-loops-for-stride \
@@ -63,4 +64,5 @@
! CHECK: "-Rpass=inline"
! CHECK: "-mframe-pointer=none"
! CHECK: "-mllvm" "-print-before-all"
+! CHECK: "-fwrapv"
! CHECK: "-save-temps=obj"
diff --git a/flang/test/Driver/integer-overflow.f90 b/flang/test/Driver/integer-overflow.f90
new file mode 100644
index 0000000..023f39f
--- /dev/null
+++ b/flang/test/Driver/integer-overflow.f90
@@ -0,0 +1,10 @@
+! Test for correct forwarding of integer overflow flags from the compiler driver
+! to the frontend driver
+
+! RUN: %flang -### -fno-strict-overflow %s 2>&1 | FileCheck %s --check-prefix=INDUCED
+! RUN: %flang -### -fstrict-overflow %s 2>&1 | FileCheck %s
+! RUN: %flang -### -fno-wrapv %s 2>&1 | FileCheck %s
+! RUN: %flang -### -fno-wrapv -fno-strict-overflow %s 2>&1 | FileCheck %s
+
+! CHECK-NOT: "-fno-wrapv"
+! INDUCED: "-fwrapv"
diff --git a/flang/test/Integration/OpenMP/atomic-capture-complex.f90 b/flang/test/Integration/OpenMP/atomic-capture-complex.f90
new file mode 100644
index 0000000..4ffd180
--- /dev/null
+++ b/flang/test/Integration/OpenMP/atomic-capture-complex.f90
@@ -0,0 +1,50 @@
+!===----------------------------------------------------------------------===!
+! This directory can be used to add Integration tests involving multiple
+! stages of the compiler (for eg. from Fortran to LLVM IR). It should not
+! contain executable tests. We should only add tests here sparingly and only
+! if there is no other way to test. Repeat this message in each test that is
+! added to this directory and sub-directories.
+!===----------------------------------------------------------------------===!
+
+!RUN: %if x86-registered-target %{ %flang_fc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fopenmp %s -o - | FileCheck --check-prefixes=CHECK,X86 %s %}
+!RUN: %if aarch64-registerd-target %{ %flang_fc1 -triple aarch64-unknown-linux-gnu -emit-llvm -fopenmp %s -o - | FileCheck --check-prefixes=CHECK,AARCH64 %s %}
+
+!CHECK: %[[X_NEW_VAL:.*]] = alloca { float, float }, align 8
+!CHECK: %[[VAL_1:.*]] = alloca { float, float }, i64 1, align 8
+!CHECK: %[[ORIG_VAL:.*]] = alloca { float, float }, i64 1, align 8
+!CHECK: store { float, float } { float 2.000000e+00, float 2.000000e+00 }, ptr %[[ORIG_VAL]], align 4
+!CHECK: br label %entry
+
+!CHECK: entry:
+!CHECK: %[[ATOMIC_TEMP_LOAD:.*]] = alloca { float, float }, align 8
+!CHECK: call void @__atomic_load(i64 8, ptr %[[ORIG_VAL]], ptr %[[ATOMIC_TEMP_LOAD]], i32 0)
+!CHECK: %[[PHI_NODE_ENTRY_1:.*]] = load { float, float }, ptr %[[ATOMIC_TEMP_LOAD]], align 8
+!CHECK: br label %.atomic.cont
+
+!CHECK: .atomic.cont
+!CHECK: %[[VAL_4:.*]] = phi { float, float } [ %[[PHI_NODE_ENTRY_1]], %entry ], [ %{{.*}}, %.atomic.cont ]
+!CHECK: %[[VAL_5:.*]] = extractvalue { float, float } %[[VAL_4]], 0
+!CHECK: %[[VAL_6:.*]] = extractvalue { float, float } %[[VAL_4]], 1
+!CHECK: %[[VAL_7:.*]] = fadd contract float %[[VAL_5]], 1.000000e+00
+!CHECK: %[[VAL_8:.*]] = fadd contract float %[[VAL_6]], 1.000000e+00
+!CHECK: %[[VAL_9:.*]] = insertvalue { float, float } undef, float %[[VAL_7]], 0
+!CHECK: %[[VAL_10:.*]] = insertvalue { float, float } %[[VAL_9]], float %[[VAL_8]], 1
+!CHECK: store { float, float } %[[VAL_10]], ptr %[[X_NEW_VAL]], align 4
+!CHECK: %[[VAL_11:.*]] = call i1 @__atomic_compare_exchange(i64 8, ptr %[[ORIG_VAL]], ptr %[[ATOMIC_TEMP_LOAD]], ptr %[[X_NEW_VAL]],
+!i32 2, i32 2)
+!CHECK: %[[VAL_12:.*]] = load { float, float }, ptr %[[ATOMIC_TEMP_LOAD]], align 4
+!CHECK: br i1 %[[VAL_11]], label %.atomic.exit, label %.atomic.cont
+
+!CHECK: .atomic.exit
+!AARCH64: %[[LCSSA:.*]] = phi { float, float } [ %[[VAL_10]], %.atomic.cont ]
+!AARCH64: store { float, float } %[[LCSSA]], ptr %[[VAL_1]], align 4
+!X86: store { float, float } %[[VAL_10]], ptr %[[VAL_1]], align 4
+
+program main
+ complex*8 ia, ib
+ ia = (2, 2)
+ !$omp atomic capture
+ ia = ia + (1, 1)
+ ib = ia
+ !$omp end atomic
+end program
diff --git a/flang/test/Semantics/OpenMP/clause-validity01.f90 b/flang/test/Semantics/OpenMP/clause-validity01.f90
index 2454049..1a7a57b 100644
--- a/flang/test/Semantics/OpenMP/clause-validity01.f90
+++ b/flang/test/Semantics/OpenMP/clause-validity01.f90
@@ -390,6 +390,12 @@ use omp_lib
enddo
!$omp end parallel
+ !ERROR: The `SAFELEN` clause cannot appear in the `SIMD` directive with `ORDER(CONCURRENT)` clause
+ !$omp simd order(concurrent) safelen(1+2)
+ do i = 1, N
+ a = 3.14
+ enddo
+
! 2.11.1 parallel-do-clause -> parallel-clause |
! do-clause
diff --git a/flang/test/Semantics/OpenMP/do-collapse.f90 b/flang/test/Semantics/OpenMP/do-collapse.f90
index 4f25129..480bd45 100644
--- a/flang/test/Semantics/OpenMP/do-collapse.f90
+++ b/flang/test/Semantics/OpenMP/do-collapse.f90
@@ -30,5 +30,11 @@ program omp_doCollapse
do
end do
end do
-end program omp_doCollapse
+ !ERROR: At most one COLLAPSE clause can appear on the SIMD directive
+ !$omp simd collapse(2) collapse(1)
+ do i = 1, 4
+ j = j + i + 1
+ end do
+ !$omp end simd
+end program omp_doCollapse
diff --git a/flang/test/Semantics/OpenMP/loop-association.f90 b/flang/test/Semantics/OpenMP/loop-association.f90
index d216766..9fac508 100644
--- a/flang/test/Semantics/OpenMP/loop-association.f90
+++ b/flang/test/Semantics/OpenMP/loop-association.f90
@@ -131,4 +131,10 @@
!$omp end parallel do simd
!ERROR: The END PARALLEL DO SIMD directive must follow the DO loop associated with the loop construct
!$omp end parallel do simd
+
+ !ERROR: A DO loop must follow the SIMD directive
+ !$omp simd
+ a = i + 1
+ !ERROR: The END SIMD directive must follow the DO loop associated with the loop construct
+ !$omp end simd
end
diff --git a/flang/test/Semantics/OpenMP/workshare02.f90 b/flang/test/Semantics/OpenMP/workshare02.f90
index 11f33d6..dddaa35 100644
--- a/flang/test/Semantics/OpenMP/workshare02.f90
+++ b/flang/test/Semantics/OpenMP/workshare02.f90
@@ -9,6 +9,14 @@ module my_mod
integer function my_func()
my_func = 10
end function my_func
+
+ impure integer function impure_my_func()
+ impure_my_func = 20
+ end function impure_my_func
+
+ impure elemental integer function impure_ele_my_func()
+ impure_ele_my_func = 20
+ end function impure_ele_my_func
end module my_mod
subroutine workshare(aa, bb, cc, dd, ee, ff, n)
@@ -61,6 +69,16 @@ subroutine workshare(aa, bb, cc, dd, ee, ff, n)
j = j - my_func()
!$omp end atomic
+ !ERROR: User defined IMPURE, non-ELEMENTAL function 'impure_my_func' is not allowed in a WORKSHARE construct
+ cc = impure_my_func()
+ !ERROR: User defined IMPURE function 'impure_ele_my_func' is not allowed in a WORKSHARE construct
+ aa(1) = impure_ele_my_func()
+
!$omp end workshare
+ !$omp workshare
+ j = j + 1
+ !ERROR: At most one NOWAIT clause can appear on the END WORKSHARE directive
+ !$omp end workshare nowait nowait
+
end subroutine workshare
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index 737ac87..0c658c6 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -79,6 +79,14 @@ function(_get_compile_options_from_config output_var)
list(APPEND config_options "-DLIBC_ADD_NULL_CHECKS")
endif()
+ if(NOT "${LIBC_CONF_FREXP_INF_NAN_EXPONENT}" STREQUAL "")
+ list(APPEND config_options "-DLIBC_FREXP_INF_NAN_EXPONENT=${LIBC_CONF_FREXP_INF_NAN_EXPONENT}")
+ endif()
+
+ if(LIBC_CONF_MATH_OPTIMIZATIONS)
+ list(APPEND compile_options "-DLIBC_MATH=${LIBC_CONF_MATH_OPTIMIZATIONS}")
+ endif()
+
set(${output_var} ${config_options} PARENT_SCOPE)
endfunction(_get_compile_options_from_config)
@@ -170,9 +178,6 @@ function(_get_common_compile_options output_var flags)
list(APPEND compile_options "-Wthread-safety")
list(APPEND compile_options "-Wglobal-constructors")
endif()
- if(LIBC_CONF_MATH_OPTIMIZATIONS)
- list(APPEND compile_options "-DLIBC_MATH=${LIBC_CONF_MATH_OPTIMIZATIONS}")
- endif()
elseif(MSVC)
list(APPEND compile_options "/EHs-c-")
list(APPEND compile_options "/GR-")
diff --git a/libc/config/config.json b/libc/config/config.json
index 2e4f878..9a5d5c3 100644
--- a/libc/config/config.json
+++ b/libc/config/config.json
@@ -87,6 +87,10 @@
"LIBC_CONF_MATH_OPTIMIZATIONS": {
"value": 0,
"doc": "Configures optimizations for math functions. Values accepted are LIBC_MATH_SKIP_ACCURATE_PASS, LIBC_MATH_SMALL_TABLES, LIBC_MATH_NO_ERRNO, LIBC_MATH_NO_EXCEPT, and LIBC_MATH_FAST."
+ },
+ "LIBC_CONF_FREXP_INF_NAN_EXPONENT": {
+ "value": "",
+ "doc": "The value written back to the second parameter when calling frexp/frexpf/frexpl` with `+/-Inf`/`NaN` is unspecified. Configue an explicit exp value for Inf/NaN inputs."
}
},
"qsort": {
diff --git a/libc/config/gpu/entrypoints.txt b/libc/config/gpu/entrypoints.txt
index 4bb81f5..d89093b 100644
--- a/libc/config/gpu/entrypoints.txt
+++ b/libc/config/gpu/entrypoints.txt
@@ -587,6 +587,7 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.setpayloadf16
libc.src.math.setpayloadsigf16
libc.src.math.sinhf16
+ libc.src.math.tanhf16
libc.src.math.totalorderf16
libc.src.math.totalordermagf16
libc.src.math.truncf16
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 39f451d..7314dbc 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -681,6 +681,7 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.setpayloadsigf16
libc.src.math.sinhf16
libc.src.math.sinpif16
+ libc.src.math.tanhf16
libc.src.math.totalorderf16
libc.src.math.totalordermagf16
libc.src.math.truncf16
diff --git a/libc/docs/configure.rst b/libc/docs/configure.rst
index 867bb80..3db750b 100644
--- a/libc/docs/configure.rst
+++ b/libc/docs/configure.rst
@@ -33,6 +33,7 @@ to learn about the defaults for your platform and target.
* **"general" options**
- ``LIBC_ADD_NULL_CHECKS``: Add nullptr checks in the library's implementations to some functions for which passing nullptr is undefined behavior.
* **"math" options**
+ - ``LIBC_CONF_FREXP_INF_NAN_EXPONENT``: The value written back to the second parameter when calling frexp/frexpf/frexpl` with `+/-Inf`/`NaN` is unspecified. Configue an explicit exp value for Inf/NaN inputs.
- ``LIBC_CONF_MATH_OPTIMIZATIONS``: Configures optimizations for math functions. Values accepted are LIBC_MATH_SKIP_ACCURATE_PASS, LIBC_MATH_SMALL_TABLES, LIBC_MATH_NO_ERRNO, LIBC_MATH_NO_EXCEPT, and LIBC_MATH_FAST.
* **"printf" options**
- ``LIBC_CONF_PRINTF_DISABLE_FIXED_POINT``: Disable printing fixed point values in printf and friends.
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index 902645c..010377a 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -348,7 +348,7 @@ Higher Math Functions
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| tan | |check| | |check| | | | | 7.12.4.7 | F.10.1.7 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| tanh | |check| | | | | | 7.12.5.6 | F.10.2.6 |
+| tanh | |check| | | | |check| | | 7.12.5.6 | F.10.2.6 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| tanpi | | | | | | 7.12.4.14 | F.10.1.14 |
+-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt
index a4cf4631..836e8a5 100644
--- a/libc/include/llvm-libc-types/CMakeLists.txt
+++ b/libc/include/llvm-libc-types/CMakeLists.txt
@@ -134,6 +134,14 @@ add_header(
DEPENDS
libc.include.llvm-libc-macros.float_macros
)
+add_header(
+ cfloat128
+ HDR
+ cfloat128.h
+ DEPENDS
+ libc.include.llvm-libc-macros.float_macros
+)
+add_header(cfloat16 HDR cfloat16.h)
add_header(fsblkcnt_t HDR fsblkcnt_t.h)
add_header(fsfilcnt_t HDR fsfilcnt_t.h)
add_header(
diff --git a/libc/include/llvm-libc-types/cfloat128.h b/libc/include/llvm-libc-types/cfloat128.h
new file mode 100644
index 0000000..a371671
--- /dev/null
+++ b/libc/include/llvm-libc-types/cfloat128.h
@@ -0,0 +1,41 @@
+//===-- Definition of cfloat128 type --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_CFLOAT128_H
+#define LLVM_LIBC_TYPES_CFLOAT128_H
+
+#include "../llvm-libc-macros/float-macros.h" // LDBL_MANT_DIG
+
+// Currently, the complex variant of C23 `_Float128` type is only defined as a
+// built-in type in GCC 7 or later, for C and in GCC 13 or later, for C++. For
+// clang, the complex variant of `__float128` is defined instead, and only on
+// x86-64 targets for clang 11 or later.
+//
+// TODO: Update the complex variant of C23 `_Float128` type detection again when
+// clang supports it.
+#if defined(__STDC_IEC_60559_COMPLEX__) && !defined(__clang__)
+#if !defined(__cplusplus)
+#define LIBC_TYPES_HAS_CFLOAT128
+typedef _Complex _Float128 cfloat128;
+#elif defined(__GNUC__) && __GNUC__ >= 13
+#define LIBC_TYPES_HAS_CFLOAT128
+typedef _Complex _Float128 cfloat128;
+#endif
+#elif __clang_major__ >= 11 && \
+ (defined(__FLOAT128__) || defined(__SIZEOF_FLOAT128__))
+// Use _Complex __float128 type. clang uses __SIZEOF_FLOAT128__ or __FLOAT128__
+// macro to notify the availability of __float128 type:
+// https://reviews.llvm.org/D15120
+#define LIBC_TYPES_HAS_CFLOAT128
+typedef _Complex __float128 cfloat128;
+#elif (LDBL_MANT_DIG == 113)
+#define LIBC_TYPES_HAS_CFLOAT128
+typedef _Complex long double cfloat128;
+#endif
+
+#endif // LLVM_LIBC_TYPES_CFLOAT128_H
diff --git a/libc/include/llvm-libc-types/cfloat16.h b/libc/include/llvm-libc-types/cfloat16.h
new file mode 100644
index 0000000..2d4cef7
--- /dev/null
+++ b/libc/include/llvm-libc-types/cfloat16.h
@@ -0,0 +1,21 @@
+//===-- Definition of cfloat16 type ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_CFLOAT16_H
+#define LLVM_LIBC_TYPES_CFLOAT16_H
+
+#if defined(__FLT16_MANT_DIG__) && \
+ (!defined(__GNUC__) || __GNUC__ >= 13 || \
+ (defined(__clang__) && __clang_major__ >= 14)) && \
+ !defined(__arm__) && !defined(_M_ARM) && !defined(__riscv) && \
+ !defined(_WIN32)
+#define LIBC_TYPES_HAS_CFLOAT16
+typedef _Complex _Float16 cfloat16;
+#endif
+
+#endif // LLVM_LIBC_TYPES_CFLOAT16_H
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index e4e46e7..196dab9 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -798,6 +798,7 @@ def StdC : StandardSpec<"stdc"> {
GuardedFunctionSpec<"sinhf16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
FunctionSpec<"tanhf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
+ GuardedFunctionSpec<"tanhf16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
FunctionSpec<"acosf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt
index c1981b8..774668b 100644
--- a/libc/src/__support/CPP/CMakeLists.txt
+++ b/libc/src/__support/CPP/CMakeLists.txt
@@ -126,6 +126,7 @@ add_header_library(
type_traits/is_array.h
type_traits/is_base_of.h
type_traits/is_class.h
+ type_traits/is_complex.h
type_traits/is_const.h
type_traits/is_constant_evaluated.h
type_traits/is_convertible.h
@@ -165,6 +166,7 @@ add_header_library(
libc.include.llvm-libc-macros.stdfix_macros
libc.src.__support.macros.attributes
libc.src.__support.macros.properties.types
+ libc.src.__support.macros.properties.complex_types
)
add_header_library(
diff --git a/libc/src/__support/CPP/type_traits.h b/libc/src/__support/CPP/type_traits.h
index cef4e5d..d50b661 100644
--- a/libc/src/__support/CPP/type_traits.h
+++ b/libc/src/__support/CPP/type_traits.h
@@ -25,7 +25,6 @@
#include "src/__support/CPP/type_traits/is_array.h"
#include "src/__support/CPP/type_traits/is_base_of.h"
#include "src/__support/CPP/type_traits/is_class.h"
-#include "src/__support/CPP/type_traits/is_complex.h"
#include "src/__support/CPP/type_traits/is_const.h"
#include "src/__support/CPP/type_traits/is_constant_evaluated.h"
#include "src/__support/CPP/type_traits/is_convertible.h"
diff --git a/libc/src/__support/CPP/type_traits/is_complex.h b/libc/src/__support/CPP/type_traits/is_complex.h
index 4f5ee9a..23f05c0 100644
--- a/libc/src/__support/CPP/type_traits/is_complex.h
+++ b/libc/src/__support/CPP/type_traits/is_complex.h
@@ -10,6 +10,10 @@
#include "src/__support/CPP/type_traits/is_same.h"
#include "src/__support/CPP/type_traits/remove_cv.h"
+#include "src/__support/macros/attributes.h"
+#include "src/__support/macros/config.h"
+// LIBC_TYPES_HAS_CFLOAT16 && LIBC_TYPES_HAS_CFLOAT128
+#include "src/__support/macros/properties/complex_types.h"
namespace LIBC_NAMESPACE_DECL {
namespace cpp {
@@ -25,7 +29,16 @@ private:
public:
LIBC_INLINE_VAR static constexpr bool value =
__is_unqualified_any_of<T, _Complex float, _Complex double,
- _Complex long double>();
+ _Complex long double
+#ifdef LIBC_TYPES_HAS_CFLOAT16
+ ,
+ cfloat16
+#endif
+#ifdef LIBC_TYPES_HAS_CFLOAT128
+ ,
+ cfloat128
+#endif
+ >();
};
template <typename T>
LIBC_INLINE_VAR constexpr bool is_complex_v = is_complex<T>::value;
diff --git a/libc/src/__support/FPUtil/ManipulationFunctions.h b/libc/src/__support/FPUtil/ManipulationFunctions.h
index 66bfe2a..9c10011 100644
--- a/libc/src/__support/FPUtil/ManipulationFunctions.h
+++ b/libc/src/__support/FPUtil/ManipulationFunctions.h
@@ -31,8 +31,16 @@ namespace fputil {
template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
LIBC_INLINE T frexp(T x, int &exp) {
FPBits<T> bits(x);
- if (bits.is_inf_or_nan())
+ if (bits.is_inf_or_nan()) {
+#ifdef LIBC_FREXP_INF_NAN_EXPONENT
+ // The value written back to the second parameter when calling
+ // frexp/frexpf/frexpl` with `+/-Inf`/`NaN` is unspecified in the standard.
+ // Set the exp value for Inf/NaN inputs explicitly to
+ // LIBC_FREXP_INF_NAN_EXPONENT if it is defined.
+ exp = LIBC_FREXP_INF_NAN_EXPONENT;
+#endif // LIBC_FREXP_INF_NAN_EXPONENT
return x;
+ }
if (bits.is_zero()) {
exp = 0;
return x;
diff --git a/libc/src/__support/macros/properties/CMakeLists.txt b/libc/src/__support/macros/properties/CMakeLists.txt
index c69f3a8..80ed63a 100644
--- a/libc/src/__support/macros/properties/CMakeLists.txt
+++ b/libc/src/__support/macros/properties/CMakeLists.txt
@@ -37,3 +37,13 @@ add_header_library(
libc.include.llvm-libc-macros.float16_macros
libc.include.llvm-libc-types.float128
)
+
+add_header_library(
+ complex_types
+ HDRS
+ complex_types.h
+ DEPENDS
+ .types
+ libc.include.llvm-libc-types.cfloat16
+ libc.include.llvm-libc-types.cfloat128
+)
diff --git a/libc/src/__support/macros/properties/complex_types.h b/libc/src/__support/macros/properties/complex_types.h
new file mode 100644
index 0000000..3f4a764
--- /dev/null
+++ b/libc/src/__support/macros/properties/complex_types.h
@@ -0,0 +1,25 @@
+//===-- Complex Types support -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Complex Types detection and support.
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_CTYPES_H
+#define LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_CTYPES_H
+
+#include "include/llvm-libc-types/cfloat128.h"
+#include "include/llvm-libc-types/cfloat16.h"
+#include "types.h"
+
+// -- cfloat16 support --------------------------------------------------------
+// LIBC_TYPES_HAS_CFLOAT16 and 'cfloat16' type is provided by
+// "include/llvm-libc-types/cfloat16.h"
+
+// -- cfloat128 support -------------------------------------------------------
+// LIBC_TYPES_HAS_CFLOAT128 and 'cfloat128' type are provided by
+// "include/llvm-libc-types/cfloat128.h"
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_CTYPES_H
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 2f76b57..8427b55 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -496,6 +496,7 @@ add_math_entrypoint_object(tanf)
add_math_entrypoint_object(tanh)
add_math_entrypoint_object(tanhf)
+add_math_entrypoint_object(tanhf16)
add_math_entrypoint_object(tgamma)
add_math_entrypoint_object(tgammaf)
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 4a3de8f..81b3e44 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -4289,6 +4289,29 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ tanhf16
+ SRCS
+ tanhf16.cpp
+ HDRS
+ ../tanhf16.h
+ DEPENDS
+ .expxf16
+ libc.hdr.fenv_macros
+ libc.src.__support.CPP.array
+ libc.src.__support.FPUtil.cast
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.nearest_integer
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.FPUtil.rounding_mode
+ libc.src.__support.macros.optimization
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
acoshf
SRCS
acoshf.cpp
diff --git a/libc/src/math/generic/exp10f16.cpp b/libc/src/math/generic/exp10f16.cpp
index f7a8ee3..006dd5c 100644
--- a/libc/src/math/generic/exp10f16.cpp
+++ b/libc/src/math/generic/exp10f16.cpp
@@ -124,7 +124,7 @@ LLVM_LIBC_FUNCTION(float16, exp10f16, (float16 x)) {
// 10^x = 2^((hi + mid) * log2(10)) * 10^lo
auto [exp2_hi_mid, exp10_lo] = exp10_range_reduction(x);
- return static_cast<float16>(exp2_hi_mid * exp10_lo);
+ return fputil::cast<float16>(exp2_hi_mid * exp10_lo);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/powf.cpp b/libc/src/math/generic/powf.cpp
index 8ce2465ba..83477c6 100644
--- a/libc/src/math/generic/powf.cpp
+++ b/libc/src/math/generic/powf.cpp
@@ -855,9 +855,9 @@ LLVM_LIBC_FUNCTION(float, powf, (float x, float y)) {
: 0.0;
exp2_hi_mid_dd.hi = exp2_hi_mid;
- return static_cast<float>(
- powf_double_double(idx_x, dx, y6, lo6_hi, exp2_hi_mid_dd)) +
- 0.0f;
+ double r_dd = powf_double_double(idx_x, dx, y6, lo6_hi, exp2_hi_mid_dd);
+
+ return static_cast<float>(r_dd);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/tanhf16.cpp b/libc/src/math/generic/tanhf16.cpp
new file mode 100644
index 0000000..ae9b4be
--- /dev/null
+++ b/libc/src/math/generic/tanhf16.cpp
@@ -0,0 +1,144 @@
+//===-- Half-precision tanh(x) function -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/tanhf16.h"
+#include "expxf16.h"
+#include "hdr/fenv_macros.h"
+#include "src/__support/CPP/array.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/nearest_integer.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+static constexpr fputil::ExceptValues<float16, 2> TANHF16_EXCEPTS = {{
+ // x = 0x1.f54p+0, tanhf16(x) = 0x1.ecp-1 (RZ)
+ {0x3fd5U, 0x3bb0U, 1U, 0U, 0U},
+ // x = -0x1.f54p+0, tanhf16(x) = -0x1.ecp-1 (RZ)
+ {0xbfd5U, 0xbbb0U, 0U, 1U, 0U},
+}};
+
+LLVM_LIBC_FUNCTION(float16, tanhf16, (float16 x)) {
+ using FPBits = fputil::FPBits<float16>;
+ FPBits x_bits(x);
+
+ uint16_t x_u = x_bits.uintval();
+ uint16_t x_abs = x_u & 0x7fffU;
+
+ // When -2^(-14) <= x <= -2^(-9), or |x| <= 0x1.d2p-4,
+ // or |x| >= atanh(1 - 2^(-11)), or x is NaN.
+ if (LIBC_UNLIKELY(x_abs <= 0x2f48U || x_abs >= 0x4429U)) {
+ // tanh(NaN) = NaN
+ if (x_bits.is_nan()) {
+ if (x_bits.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+
+ return x;
+ }
+
+ // When -2^(-14) <= x <= -2^(-9).
+ if (x_u >= 0x8400U && x_u <= 0x9800U) {
+ switch (fputil::quick_get_round()) {
+ case FE_TONEAREST:
+ case FE_DOWNWARD:
+ return x;
+ default:
+ return FPBits(static_cast<uint16_t>(x_u - 1U)).get_val();
+ }
+ }
+
+ // When |x| <= 0x1.d2p-4.
+ if (x_abs <= 0x2f48U) {
+ float xf = x;
+ float xf_sq = xf * xf;
+ // Degree-7 Taylor expansion generated by Sollya with the following
+ // commands:
+ // > taylor(tanh(x), 7, 0);
+ // > display = hexadecimal;
+ // > // For each coefficient:
+ // > round(/* put coefficient here */, SG, RN);
+ return fputil::cast<float16>(
+ xf * fputil::polyeval(xf_sq, 0x1p+0f, -0x1.555556p-2f, 0x1.111112p-3f,
+ -0x1.ba1ba2p-5f));
+ }
+
+ // tanh(+/-inf) = +/-1
+ if (x_bits.is_inf())
+ return FPBits::one(x_bits.sign()).get_val();
+
+ // When |x| >= atanh(1 - 2^(-11)).
+ fputil::raise_except_if_required(FE_INEXACT);
+
+ int rounding_mode = fputil::quick_get_round();
+ if ((rounding_mode == FE_TONEAREST && x_abs >= 0x4482U) ||
+ (rounding_mode == FE_UPWARD && x_bits.is_pos()) ||
+ (rounding_mode == FE_DOWNWARD && x_bits.is_neg())) {
+ return FPBits::one(x_bits.sign()).get_val();
+ }
+ if (x_bits.is_pos())
+ return fputil::cast<float16>(0x1.ffcp-1);
+ return fputil::cast<float16>(-0x1.ffcp-1);
+ }
+
+ if (auto r = TANHF16_EXCEPTS.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+
+ // For atanh(-1 + 2^(-11)) < x < atanh(1 - 2^(-11)), to compute tanh(x), we
+ // perform the following range reduction: find hi, mid, lo, such that:
+ // x = (hi + mid) * log(2) * 0.5 + lo, in which
+ // hi is an integer,
+ // mid * 2^5 is an integer,
+ // -2^(-5) <= lo < 2^(-5).
+ // In particular,
+ // hi + mid = round(x * log2(e) * 2 * 2^5) * 2^(-5).
+ // Then,
+ // tanh(x) = sinh(x)/cosh(x)
+ // = (e^x - e^(-x)) / (e^x + e^(-x))
+ // = (e^(2x) - 1) / (e^(2x) + 1)
+ // = (2^(hi + mid) * e^(2*lo) - 1) / (2^(hi + mid) * e^(2*lo) + 1)
+ // = (e^(2*lo) - 2^(-hi - mid)) / (e^(2*lo) + 2^(-hi - mid))
+ // We store 2^(-mid) in the lookup table EXP2_MID_5_BITS, and compute
+ // 2^(-hi - mid) by adding -hi to the exponent field of 2^(-mid).
+ // e^lo is computed using a degree-3 minimax polynomial generated by Sollya.
+
+ float xf = x;
+ float kf = fputil::nearest_integer(xf * (LOG2F_E * 2.0f * 0x1.0p+5f));
+ int x_hi_mid = -static_cast<int>(kf);
+ unsigned x_hi = static_cast<unsigned>(x_hi_mid) >> 5;
+ unsigned x_mid = static_cast<unsigned>(x_hi_mid) & 0x1f;
+ // lo = x - (hi + mid)
+ // = round(x * log2(e) * 2 * 2^5) * log(2) * 0.5 * (-2^(-5)) + x
+ float lo = fputil::multiply_add(kf, LOGF_2 * 0.5f * -0x1.0p-5f, xf);
+
+ uint32_t exp2_hi_mid_bits =
+ EXP2_MID_5_BITS[x_mid] +
+ static_cast<uint32_t>(x_hi << fputil::FPBits<float>::FRACTION_LEN);
+ // exp2_hi_mid = 2^(-hi - mid)
+ float exp2_hi_mid = fputil::FPBits<float>(exp2_hi_mid_bits).get_val();
+ // Degree-3 minimax polynomial generated by Sollya with the following
+ // commands:
+ // > display = hexadecimal;
+ // > P = fpminimax(expm1(2*x)/x, 2, [|SG...|], [-2^-5, 2^-5]);
+ // > 1 + x * P;
+ float exp_2lo =
+ fputil::polyeval(lo, 0x1p+0f, 0x1p+1f, 0x1.001p+1f, 0x1.555ddep+0f);
+ return fputil::cast<float16>((exp_2lo - exp2_hi_mid) /
+ (exp_2lo + exp2_hi_mid));
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/tanhf16.h b/libc/src/math/tanhf16.h
new file mode 100644
index 0000000..6749870
--- /dev/null
+++ b/libc/src/math/tanhf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for tanhf16 -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_TANHF16_H
+#define LLVM_LIBC_SRC_MATH_TANHF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+float16 tanhf16(float16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_TANHF16_H
diff --git a/libc/test/UnitTest/FPMatcher.h b/libc/test/UnitTest/FPMatcher.h
index 5220b12..07e2cd5 100644
--- a/libc/test/UnitTest/FPMatcher.h
+++ b/libc/test/UnitTest/FPMatcher.h
@@ -11,6 +11,7 @@
#include "src/__support/CPP/array.h"
#include "src/__support/CPP/type_traits.h"
+#include "src/__support/CPP/type_traits/is_complex.h"
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/fpbits_str.h"
@@ -128,6 +129,14 @@ public:
return matchComplex<double>();
else if (cpp::is_complex_type_same<T, _Complex long double>())
return matchComplex<long double>();
+#ifdef LIBC_TYPES_HAS_CFLOAT16
+ else if (cpp::is_complex_type_same<T, cfloat16>)
+ return matchComplex<float16>();
+#endif
+#ifdef LIBC_TYPES_HAS_CFLOAT128
+ else if (cpp::is_complex_type_same<T, cfloat128>)
+ return matchComplex<float128>();
+#endif
}
void explainError() override {
@@ -137,6 +146,14 @@ public:
return explainErrorComplex<double>();
else if (cpp::is_complex_type_same<T, _Complex long double>())
return explainErrorComplex<long double>();
+#ifdef LIBC_TYPES_HAS_CFLOAT16
+ else if (cpp::is_complex_type_same<T, cfloat16>)
+ return explainErrorComplex<float16>();
+#endif
+#ifdef LIBC_TYPES_HAS_CFLOAT128
+ else if (cpp::is_complex_type_same<T, cfloat128>)
+ return explainErrorComplex<float128>();
+#endif
}
};
diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt
index 381a3f4..11342e6 100644
--- a/libc/test/src/math/CMakeLists.txt
+++ b/libc/test/src/math/CMakeLists.txt
@@ -1967,6 +1967,17 @@ add_fp_unittest(
)
add_fp_unittest(
+ tanhf16_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ tanhf16_test.cpp
+ DEPENDS
+ libc.src.math.tanhf16
+)
+
+add_fp_unittest(
atanhf_test
NEED_MPFR
SUITE
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index f713430..899c9d2 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -3779,6 +3779,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ tanhf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ tanhf16_test.cpp
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.errno.errno
+ libc.src.math.tanhf16
+ libc.src.__support.FPUtil.cast
+)
+
+add_fp_unittest(
atanhf_test
SUITE
libc-math-smoke-tests
diff --git a/libc/test/src/math/smoke/FrexpTest.h b/libc/test/src/math/smoke/FrexpTest.h
index 11641fc..3fb3a2e 100644
--- a/libc/test/src/math/smoke/FrexpTest.h
+++ b/libc/test/src/math/smoke/FrexpTest.h
@@ -21,8 +21,19 @@ public:
void testSpecialNumbers(FrexpFunc func) {
int exponent;
EXPECT_FP_EQ_ALL_ROUNDING(aNaN, func(aNaN, &exponent));
+#ifdef LIBC_FREXP_INF_NAN_EXPONENT
+ EXPECT_EQ(LIBC_FREXP_INF_NAN_EXPONENT, exponent);
+#endif // LIBC_FREXP_INF_NAN_EXPONENT
+
EXPECT_FP_EQ_ALL_ROUNDING(inf, func(inf, &exponent));
+#ifdef LIBC_FREXP_INF_NAN_EXPONENT
+ EXPECT_EQ(LIBC_FREXP_INF_NAN_EXPONENT, exponent);
+#endif // LIBC_FREXP_INF_NAN_EXPONENT
+
EXPECT_FP_EQ_ALL_ROUNDING(neg_inf, func(neg_inf, &exponent));
+#ifdef LIBC_FREXP_INF_NAN_EXPONENT
+ EXPECT_EQ(LIBC_FREXP_INF_NAN_EXPONENT, exponent);
+#endif // LIBC_FREXP_INF_NAN_EXPONENT
EXPECT_FP_EQ_ALL_ROUNDING(zero, func(zero, &exponent));
EXPECT_EQ(exponent, 0);
diff --git a/libc/test/src/math/smoke/powf_test.cpp b/libc/test/src/math/smoke/powf_test.cpp
index bd4f98e..a0f66f2 100644
--- a/libc/test/src/math/smoke/powf_test.cpp
+++ b/libc/test/src/math/smoke/powf_test.cpp
@@ -190,4 +190,7 @@ TEST_F(LlvmLibcPowfTest, SpecialNumbers) {
FE_UNDERFLOW);
}
}
+
+ EXPECT_FP_EQ(-0.0f, LIBC_NAMESPACE::powf(-0.015625f, 25.0f));
+ EXPECT_FP_EQ(0.0f, LIBC_NAMESPACE::powf(-0.015625f, 26.0f));
}
diff --git a/libc/test/src/math/smoke/tanhf16_test.cpp b/libc/test/src/math/smoke/tanhf16_test.cpp
new file mode 100644
index 0000000..fa6328e
--- /dev/null
+++ b/libc/test/src/math/smoke/tanhf16_test.cpp
@@ -0,0 +1,143 @@
+//===-- Unittests for tanhf16 ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "hdr/fenv_macros.h"
+#include "src/__support/FPUtil/cast.h"
+#include "src/errno/libc_errno.h"
+#include "src/math/tanhf16.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+using LlvmLibcTanhf16Test = LIBC_NAMESPACE::testing::FPTest<float16>;
+
+TEST_F(LlvmLibcTanhf16Test, SpecialNumbers) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::tanhf16(aNaN));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, LIBC_NAMESPACE::tanhf16(sNaN), FE_INVALID);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0),
+ LIBC_NAMESPACE::tanhf16(inf));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(-1.0),
+ LIBC_NAMESPACE::tanhf16(neg_inf));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::tanhf16(zero));
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_ALL_ROUNDING(neg_zero, LIBC_NAMESPACE::tanhf16(neg_zero));
+ EXPECT_MATH_ERRNO(0);
+}
+
+TEST_F(LlvmLibcTanhf16Test, ResultNearBounds) {
+ LIBC_NAMESPACE::libc_errno = 0;
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(LIBC_NAMESPACE::fputil::cast<float16>(1.0),
+ LIBC_NAMESPACE::tanhf16(max_normal), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(LIBC_NAMESPACE::fputil::cast<float16>(-1.0),
+ LIBC_NAMESPACE::tanhf16(neg_max_normal),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ // round(atanh(1 - 2^-11), HP, RU);
+ float16 x = LIBC_NAMESPACE::fputil::cast<float16>(0x1.0a4p+2);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(
+ LIBC_NAMESPACE::fputil::cast<float16>(0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(1.0), LIBC_NAMESPACE::tanhf16(x),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_DOWNWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_TOWARD_ZERO(
+ LIBC_NAMESPACE::fputil::cast<float16>(0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ x = LIBC_NAMESPACE::fputil::cast<float16>(0x1.208p+2);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(
+ LIBC_NAMESPACE::fputil::cast<float16>(1.0), LIBC_NAMESPACE::tanhf16(x),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(1.0), LIBC_NAMESPACE::tanhf16(x),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_DOWNWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_TOWARD_ZERO(
+ LIBC_NAMESPACE::fputil::cast<float16>(0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ // round(atanh(-1 + 2^-11), HP, RD);
+ x = LIBC_NAMESPACE::fputil::cast<float16>(-0x1.0a4p+2);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_DOWNWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(-1.0), LIBC_NAMESPACE::tanhf16(x),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_TOWARD_ZERO(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ x = LIBC_NAMESPACE::fputil::cast<float16>(-0x1.208p+2);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(
+ LIBC_NAMESPACE::fputil::cast<float16>(-1.0), LIBC_NAMESPACE::tanhf16(x),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_DOWNWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(-1.0), LIBC_NAMESPACE::tanhf16(x),
+ FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_TOWARD_ZERO(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::tanhf16(x), FE_INEXACT);
+ EXPECT_MATH_ERRNO(0);
+}
diff --git a/libc/test/src/math/tanhf16_test.cpp b/libc/test/src/math/tanhf16_test.cpp
new file mode 100644
index 0000000..7124a83
--- /dev/null
+++ b/libc/test/src/math/tanhf16_test.cpp
@@ -0,0 +1,40 @@
+//===-- Exhaustive test for tanhf16 ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/tanhf16.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+#include "utils/MPFRWrapper/MPFRUtils.h"
+
+using LlvmLibcTanhf16Test = LIBC_NAMESPACE::testing::FPTest<float16>;
+
+namespace mpfr = LIBC_NAMESPACE::testing::mpfr;
+
+// Range: [0, Inf];
+static constexpr uint16_t POS_START = 0x0000U;
+static constexpr uint16_t POS_STOP = 0x7c00U;
+
+// Range: [-Inf, 0];
+static constexpr uint16_t NEG_START = 0x8000U;
+static constexpr uint16_t NEG_STOP = 0xfc00U;
+
+TEST_F(LlvmLibcTanhf16Test, PositiveRange) {
+ for (uint16_t v = POS_START; v <= POS_STOP; ++v) {
+ float16 x = FPBits(v).get_val();
+ EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tanh, x,
+ LIBC_NAMESPACE::tanhf16(x), 0.5);
+ }
+}
+
+TEST_F(LlvmLibcTanhf16Test, NegativeRange) {
+ for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) {
+ float16 x = FPBits(v).get_val();
+ EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tanh, x,
+ LIBC_NAMESPACE::tanhf16(x), 0.5);
+ }
+}
diff --git a/libcxx/.clang-format b/libcxx/.clang-format
index a6154c7..f548119 100644
--- a/libcxx/.clang-format
+++ b/libcxx/.clang-format
@@ -30,6 +30,7 @@ AttributeMacros: [
'_LIBCPP_DEPRECATED_IN_CXX20',
'_LIBCPP_DEPRECATED_IN_CXX23',
'_LIBCPP_DEPRECATED',
+ '_LIBCPP_DISABLE_EXTENSION_WARNING',
'_LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION',
'_LIBCPP_EXPORTED_FROM_ABI',
'_LIBCPP_EXTERN_TEMPLATE_TYPE_VIS',
diff --git a/libcxx/docs/ReleaseNotes/20.rst b/libcxx/docs/ReleaseNotes/20.rst
index abd6764..44912d2 100644
--- a/libcxx/docs/ReleaseNotes/20.rst
+++ b/libcxx/docs/ReleaseNotes/20.rst
@@ -78,9 +78,9 @@ Deprecations and Removals
supported as an extension anymore, please migrate any code that uses e.g. ``std::vector<const T>`` to be
standards conforming.
-- Non-conforming member typedefs ``iterator`` and ``const_iterator`` of ``std::bitset`` are removed. Previously, they
- were private but could cause ambiguity in name lookup. Code that expects such ambiguity will possibly not compile in
- LLVM 20.
+- Non-conforming member typedefs ``base``, ``iterator`` and ``const_iterator`` of ``std::bitset``, and member typedef
+ ``base`` of ``std::forward_list`` and ``std::list`` are removed. Previously, they were private but could cause
+ ambiguity in name lookup. Code that expects such ambiguity will possibly not compile in LLVM 20.
- The function ``__libcpp_verbose_abort()`` is now ``noexcept``, to match ``std::terminate()``. (The combination of
``noexcept`` and ``[[noreturn]]`` has special significance for function effects analysis.)
diff --git a/libcxx/include/__memory/addressof.h b/libcxx/include/__memory/addressof.h
index ecb68e0..98b08958a 100644
--- a/libcxx/include/__memory/addressof.h
+++ b/libcxx/include/__memory/addressof.h
@@ -23,11 +23,9 @@ inline _LIBCPP_CONSTEXPR_SINCE_CXX17 _LIBCPP_NO_CFI _LIBCPP_HIDE_FROM_ABI _Tp* a
return __builtin_addressof(__x);
}
-#if _LIBCPP_HAS_OBJC_ARC && !defined(_LIBCPP_PREDEFINED_OBJC_ARC_ADDRESSOF)
+#if _LIBCPP_HAS_OBJC_ARC
// Objective-C++ Automatic Reference Counting uses qualified pointers
-// that require special addressof() signatures. When
-// _LIBCPP_PREDEFINED_OBJC_ARC_ADDRESSOF is defined, the compiler
-// itself is providing these definitions. Otherwise, we provide them.
+// that require special addressof() signatures.
template <class _Tp>
inline _LIBCPP_HIDE_FROM_ABI __strong _Tp* addressof(__strong _Tp& __x) _NOEXCEPT {
return &__x;
diff --git a/libcxx/include/bitset b/libcxx/include/bitset
index f90ceaa..645c172 100644
--- a/libcxx/include/bitset
+++ b/libcxx/include/bitset
@@ -612,15 +612,15 @@ class _LIBCPP_TEMPLATE_VIS bitset
: private __bitset<_Size == 0 ? 0 : (_Size - 1) / (sizeof(size_t) * CHAR_BIT) + 1, _Size> {
public:
static const unsigned __n_words = _Size == 0 ? 0 : (_Size - 1) / (sizeof(size_t) * CHAR_BIT) + 1;
- typedef __bitset<__n_words, _Size> base;
+ typedef __bitset<__n_words, _Size> __base;
public:
- typedef typename base::reference reference;
- typedef typename base::const_reference const_reference;
+ typedef typename __base::reference reference;
+ typedef typename __base::const_reference const_reference;
// 23.3.5.1 constructors:
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bitset() _NOEXCEPT {}
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bitset(unsigned long long __v) _NOEXCEPT : base(__v) {}
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bitset(unsigned long long __v) _NOEXCEPT : __base(__v) {}
template <class _CharT, __enable_if_t<_IsCharLikeType<_CharT>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit bitset(
const _CharT* __str,
@@ -681,11 +681,15 @@ public:
// element access:
#ifdef _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool operator[](size_t __p) const { return base::__make_ref(__p); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool operator[](size_t __p) const { return __base::__make_ref(__p); }
#else
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR const_reference operator[](size_t __p) const { return base::__make_ref(__p); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR const_reference operator[](size_t __p) const {
+ return __base::__make_ref(__p);
+ }
#endif
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 reference operator[](size_t __p) { return base::__make_ref(__p); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 reference operator[](size_t __p) {
+ return __base::__make_ref(__p);
+ }
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unsigned long to_ulong() const;
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unsigned long long to_ullong() const;
template <class _CharT, class _Traits, class _Allocator>
@@ -726,10 +730,10 @@ private:
_CharT __c = __str[__mp - 1 - __i];
(*this)[__i] = _Traits::eq(__c, __one);
}
- std::fill(base::__make_iter(__i), base::__make_iter(_Size), false);
+ std::fill(__base::__make_iter(__i), __base::__make_iter(_Size), false);
}
- _LIBCPP_HIDE_FROM_ABI size_t __hash_code() const _NOEXCEPT { return base::__hash_code(); }
+ _LIBCPP_HIDE_FROM_ABI size_t __hash_code() const _NOEXCEPT { return __base::__hash_code(); }
friend struct hash<bitset>;
};
@@ -737,43 +741,43 @@ private:
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>&
bitset<_Size>::operator&=(const bitset& __rhs) _NOEXCEPT {
- base::operator&=(__rhs);
+ __base::operator&=(__rhs);
return *this;
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>&
bitset<_Size>::operator|=(const bitset& __rhs) _NOEXCEPT {
- base::operator|=(__rhs);
+ __base::operator|=(__rhs);
return *this;
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>&
bitset<_Size>::operator^=(const bitset& __rhs) _NOEXCEPT {
- base::operator^=(__rhs);
+ __base::operator^=(__rhs);
return *this;
}
template <size_t _Size>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>::operator<<=(size_t __pos) _NOEXCEPT {
__pos = std::min(__pos, _Size);
- std::copy_backward(base::__make_iter(0), base::__make_iter(_Size - __pos), base::__make_iter(_Size));
- std::fill_n(base::__make_iter(0), __pos, false);
+ std::copy_backward(__base::__make_iter(0), __base::__make_iter(_Size - __pos), __base::__make_iter(_Size));
+ std::fill_n(__base::__make_iter(0), __pos, false);
return *this;
}
template <size_t _Size>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>::operator>>=(size_t __pos) _NOEXCEPT {
__pos = std::min(__pos, _Size);
- std::copy(base::__make_iter(__pos), base::__make_iter(_Size), base::__make_iter(0));
- std::fill_n(base::__make_iter(_Size - __pos), __pos, false);
+ std::copy(__base::__make_iter(__pos), __base::__make_iter(_Size), __base::__make_iter(0));
+ std::fill_n(__base::__make_iter(_Size - __pos), __pos, false);
return *this;
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>::set() _NOEXCEPT {
- std::fill_n(base::__make_iter(0), _Size, true);
+ std::fill_n(__base::__make_iter(0), _Size, true);
return *this;
}
@@ -788,7 +792,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>::reset() _NOEXCEPT {
- std::fill_n(base::__make_iter(0), _Size, false);
+ std::fill_n(__base::__make_iter(0), _Size, false);
return *this;
}
@@ -810,7 +814,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size> bitset<
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>::flip() _NOEXCEPT {
- base::flip();
+ __base::flip();
return *this;
}
@@ -819,19 +823,19 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bitset<_Size>& bitset<_Size>
if (__pos >= _Size)
__throw_out_of_range("bitset flip argument out of range");
- reference __r = base::__make_ref(__pos);
+ reference __r = __base::__make_ref(__pos);
__r = ~__r;
return *this;
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unsigned long bitset<_Size>::to_ulong() const {
- return base::to_ulong();
+ return __base::to_ulong();
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unsigned long long bitset<_Size>::to_ullong() const {
- return base::to_ullong();
+ return __base::to_ullong();
}
template <size_t _Size>
@@ -868,13 +872,13 @@ bitset<_Size>::to_string(char __zero, char __one) const {
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 size_t bitset<_Size>::count() const _NOEXCEPT {
- return static_cast<size_t>(std::count(base::__make_iter(0), base::__make_iter(_Size), true));
+ return static_cast<size_t>(std::count(__base::__make_iter(0), __base::__make_iter(_Size), true));
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bool
bitset<_Size>::operator==(const bitset& __rhs) const _NOEXCEPT {
- return std::equal(base::__make_iter(0), base::__make_iter(_Size), __rhs.__make_iter(0));
+ return std::equal(__base::__make_iter(0), __base::__make_iter(_Size), __rhs.__make_iter(0));
}
#if _LIBCPP_STD_VER <= 17
@@ -896,12 +900,12 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bool bitset<_Size>::test(siz
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bool bitset<_Size>::all() const _NOEXCEPT {
- return base::all();
+ return __base::all();
}
template <size_t _Size>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 bool bitset<_Size>::any() const _NOEXCEPT {
- return base::any();
+ return __base::any();
}
template <size_t _Size>
diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list
index d3262fb..04466d9 100644
--- a/libcxx/include/forward_list
+++ b/libcxx/include/forward_list
@@ -640,12 +640,12 @@ void __forward_list_base<_Tp, _Alloc>::clear() _NOEXCEPT {
template <class _Tp, class _Alloc /*= allocator<_Tp>*/>
class _LIBCPP_TEMPLATE_VIS forward_list : private __forward_list_base<_Tp, _Alloc> {
- typedef __forward_list_base<_Tp, _Alloc> base;
- typedef typename base::__node_allocator __node_allocator;
- typedef typename base::__node_type __node_type;
- typedef typename base::__node_traits __node_traits;
- typedef typename base::__node_pointer __node_pointer;
- typedef typename base::__begin_node_pointer __begin_node_pointer;
+ typedef __forward_list_base<_Tp, _Alloc> __base;
+ typedef typename __base::__node_allocator __node_allocator;
+ typedef typename __base::__node_type __node_type;
+ typedef typename __base::__node_traits __node_traits;
+ typedef typename __base::__node_pointer __node_pointer;
+ typedef typename __base::__begin_node_pointer __begin_node_pointer;
public:
typedef _Tp value_type;
@@ -666,8 +666,8 @@ public:
typedef typename allocator_traits<allocator_type>::size_type size_type;
typedef typename allocator_traits<allocator_type>::difference_type difference_type;
- typedef typename base::iterator iterator;
- typedef typename base::const_iterator const_iterator;
+ typedef typename __base::iterator iterator;
+ typedef typename __base::const_iterator const_iterator;
#if _LIBCPP_STD_VER >= 20
typedef size_type __remove_return_type;
#else
@@ -684,7 +684,7 @@ public:
_LIBCPP_HIDE_FROM_ABI forward_list(size_type __n, const value_type& __v);
template <__enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
- _LIBCPP_HIDE_FROM_ABI forward_list(size_type __n, const value_type& __v, const allocator_type& __a) : base(__a) {
+ _LIBCPP_HIDE_FROM_ABI forward_list(size_type __n, const value_type& __v, const allocator_type& __a) : __base(__a) {
insert_after(cbefore_begin(), __n, __v);
}
@@ -697,7 +697,7 @@ public:
#if _LIBCPP_STD_VER >= 23
template <_ContainerCompatibleRange<_Tp> _Range>
_LIBCPP_HIDE_FROM_ABI forward_list(from_range_t, _Range&& __range, const allocator_type& __a = allocator_type())
- : base(__a) {
+ : __base(__a) {
prepend_range(std::forward<_Range>(__range));
}
#endif
@@ -708,8 +708,8 @@ public:
_LIBCPP_HIDE_FROM_ABI forward_list& operator=(const forward_list& __x);
#ifndef _LIBCPP_CXX03_LANG
- _LIBCPP_HIDE_FROM_ABI forward_list(forward_list&& __x) noexcept(is_nothrow_move_constructible<base>::value)
- : base(std::move(__x)) {}
+ _LIBCPP_HIDE_FROM_ABI forward_list(forward_list&& __x) noexcept(is_nothrow_move_constructible<__base>::value)
+ : __base(std::move(__x)) {}
_LIBCPP_HIDE_FROM_ABI forward_list(forward_list&& __x, const __type_identity_t<allocator_type>& __a);
_LIBCPP_HIDE_FROM_ABI forward_list(initializer_list<value_type> __il);
@@ -738,35 +738,37 @@ public:
_LIBCPP_HIDE_FROM_ABI void assign(size_type __n, const value_type& __v);
- _LIBCPP_HIDE_FROM_ABI allocator_type get_allocator() const _NOEXCEPT { return allocator_type(base::__alloc()); }
+ _LIBCPP_HIDE_FROM_ABI allocator_type get_allocator() const _NOEXCEPT { return allocator_type(__base::__alloc()); }
- _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { return iterator(base::__before_begin()->__next_); }
+ _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { return iterator(__base::__before_begin()->__next_); }
_LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT {
- return const_iterator(base::__before_begin()->__next_);
+ return const_iterator(__base::__before_begin()->__next_);
}
_LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { return iterator(nullptr); }
_LIBCPP_HIDE_FROM_ABI const_iterator end() const _NOEXCEPT { return const_iterator(nullptr); }
_LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const _NOEXCEPT {
- return const_iterator(base::__before_begin()->__next_);
+ return const_iterator(__base::__before_begin()->__next_);
}
_LIBCPP_HIDE_FROM_ABI const_iterator cend() const _NOEXCEPT { return const_iterator(nullptr); }
- _LIBCPP_HIDE_FROM_ABI iterator before_begin() _NOEXCEPT { return iterator(base::__before_begin()); }
- _LIBCPP_HIDE_FROM_ABI const_iterator before_begin() const _NOEXCEPT { return const_iterator(base::__before_begin()); }
+ _LIBCPP_HIDE_FROM_ABI iterator before_begin() _NOEXCEPT { return iterator(__base::__before_begin()); }
+ _LIBCPP_HIDE_FROM_ABI const_iterator before_begin() const _NOEXCEPT {
+ return const_iterator(__base::__before_begin());
+ }
_LIBCPP_HIDE_FROM_ABI const_iterator cbefore_begin() const _NOEXCEPT {
- return const_iterator(base::__before_begin());
+ return const_iterator(__base::__before_begin());
}
[[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool empty() const _NOEXCEPT {
- return base::__before_begin()->__next_ == nullptr;
+ return __base::__before_begin()->__next_ == nullptr;
}
_LIBCPP_HIDE_FROM_ABI size_type max_size() const _NOEXCEPT {
- return std::min<size_type>(__node_traits::max_size(base::__alloc()), numeric_limits<difference_type>::max());
+ return std::min<size_type>(__node_traits::max_size(__base::__alloc()), numeric_limits<difference_type>::max());
}
- _LIBCPP_HIDE_FROM_ABI reference front() { return base::__before_begin()->__next_->__get_value(); }
- _LIBCPP_HIDE_FROM_ABI const_reference front() const { return base::__before_begin()->__next_->__get_value(); }
+ _LIBCPP_HIDE_FROM_ABI reference front() { return __base::__before_begin()->__next_->__get_value(); }
+ _LIBCPP_HIDE_FROM_ABI const_reference front() const { return __base::__before_begin()->__next_->__get_value(); }
#ifndef _LIBCPP_CXX03_LANG
# if _LIBCPP_STD_VER >= 17
@@ -823,12 +825,12 @@ public:
_NOEXCEPT_(!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable_v<__node_allocator>)
#endif
{
- base::swap(__x);
+ __base::swap(__x);
}
_LIBCPP_HIDE_FROM_ABI void resize(size_type __n);
_LIBCPP_HIDE_FROM_ABI void resize(size_type __n, const value_type& __v);
- _LIBCPP_HIDE_FROM_ABI void clear() _NOEXCEPT { base::clear(); }
+ _LIBCPP_HIDE_FROM_ABI void clear() _NOEXCEPT { __base::clear(); }
_LIBCPP_HIDE_FROM_ABI void splice_after(const_iterator __p, forward_list&& __x);
_LIBCPP_HIDE_FROM_ABI void splice_after(const_iterator __p, forward_list&& __x, const_iterator __i);
@@ -899,12 +901,12 @@ forward_list(from_range_t, _Range&&, _Alloc = _Alloc()) -> forward_list<ranges::
#endif
template <class _Tp, class _Alloc>
-inline forward_list<_Tp, _Alloc>::forward_list(const allocator_type& __a) : base(__a) {}
+inline forward_list<_Tp, _Alloc>::forward_list(const allocator_type& __a) : __base(__a) {}
template <class _Tp, class _Alloc>
forward_list<_Tp, _Alloc>::forward_list(size_type __n) {
if (__n > 0) {
- for (__begin_node_pointer __p = base::__before_begin(); __n > 0; --__n, __p = __p->__next_as_begin()) {
+ for (__begin_node_pointer __p = __base::__before_begin(); __n > 0; --__n, __p = __p->__next_as_begin()) {
__p->__next_ = this->__create_node(/* next = */ nullptr);
}
}
@@ -912,9 +914,9 @@ forward_list<_Tp, _Alloc>::forward_list(size_type __n) {
#if _LIBCPP_STD_VER >= 14
template <class _Tp, class _Alloc>
-forward_list<_Tp, _Alloc>::forward_list(size_type __n, const allocator_type& __base_alloc) : base(__base_alloc) {
+forward_list<_Tp, _Alloc>::forward_list(size_type __n, const allocator_type& __base_alloc) : __base(__base_alloc) {
if (__n > 0) {
- for (__begin_node_pointer __p = base::__before_begin(); __n > 0; --__n, __p = __p->__next_as_begin()) {
+ for (__begin_node_pointer __p = __base::__before_begin(); __n > 0; --__n, __p = __p->__next_as_begin()) {
__p->__next_ = this->__create_node(/* next = */ nullptr);
}
}
@@ -934,26 +936,27 @@ forward_list<_Tp, _Alloc>::forward_list(_InputIterator __f, _InputIterator __l)
template <class _Tp, class _Alloc>
template <class _InputIterator, __enable_if_t<__has_input_iterator_category<_InputIterator>::value, int> >
-forward_list<_Tp, _Alloc>::forward_list(_InputIterator __f, _InputIterator __l, const allocator_type& __a) : base(__a) {
+forward_list<_Tp, _Alloc>::forward_list(_InputIterator __f, _InputIterator __l, const allocator_type& __a)
+ : __base(__a) {
insert_after(cbefore_begin(), __f, __l);
}
template <class _Tp, class _Alloc>
forward_list<_Tp, _Alloc>::forward_list(const forward_list& __x)
- : base(__node_traits::select_on_container_copy_construction(__x.__alloc())) {
+ : __base(__node_traits::select_on_container_copy_construction(__x.__alloc())) {
insert_after(cbefore_begin(), __x.begin(), __x.end());
}
template <class _Tp, class _Alloc>
forward_list<_Tp, _Alloc>::forward_list(const forward_list& __x, const __type_identity_t<allocator_type>& __a)
- : base(__a) {
+ : __base(__a) {
insert_after(cbefore_begin(), __x.begin(), __x.end());
}
template <class _Tp, class _Alloc>
forward_list<_Tp, _Alloc>& forward_list<_Tp, _Alloc>::operator=(const forward_list& __x) {
if (this != std::addressof(__x)) {
- base::__copy_assign_alloc(__x);
+ __base::__copy_assign_alloc(__x);
assign(__x.begin(), __x.end());
}
return *this;
@@ -962,8 +965,8 @@ forward_list<_Tp, _Alloc>& forward_list<_Tp, _Alloc>::operator=(const forward_li
#ifndef _LIBCPP_CXX03_LANG
template <class _Tp, class _Alloc>
forward_list<_Tp, _Alloc>::forward_list(forward_list&& __x, const __type_identity_t<allocator_type>& __a)
- : base(std::move(__x), __a) {
- if (base::__alloc() != __x.__alloc()) {
+ : __base(std::move(__x), __a) {
+ if (__base::__alloc() != __x.__alloc()) {
typedef move_iterator<iterator> _Ip;
insert_after(cbefore_begin(), _Ip(__x.begin()), _Ip(__x.end()));
}
@@ -975,7 +978,7 @@ forward_list<_Tp, _Alloc>::forward_list(initializer_list<value_type> __il) {
}
template <class _Tp, class _Alloc>
-forward_list<_Tp, _Alloc>::forward_list(initializer_list<value_type> __il, const allocator_type& __a) : base(__a) {
+forward_list<_Tp, _Alloc>::forward_list(initializer_list<value_type> __il, const allocator_type& __a) : __base(__a) {
insert_after(cbefore_begin(), __il.begin(), __il.end());
}
@@ -983,14 +986,14 @@ template <class _Tp, class _Alloc>
void forward_list<_Tp, _Alloc>::__move_assign(forward_list& __x, true_type)
_NOEXCEPT_(is_nothrow_move_assignable<allocator_type>::value) {
clear();
- base::__move_assign_alloc(__x);
- base::__before_begin()->__next_ = __x.__before_begin()->__next_;
- __x.__before_begin()->__next_ = nullptr;
+ __base::__move_assign_alloc(__x);
+ __base::__before_begin()->__next_ = __x.__before_begin()->__next_;
+ __x.__before_begin()->__next_ = nullptr;
}
template <class _Tp, class _Alloc>
void forward_list<_Tp, _Alloc>::__move_assign(forward_list& __x, false_type) {
- if (base::__alloc() == __x.__alloc())
+ if (__base::__alloc() == __x.__alloc())
__move_assign(__x, true_type());
else {
typedef move_iterator<iterator> _Ip;
@@ -1061,29 +1064,30 @@ typename forward_list<_Tp, _Alloc>::reference
void
# endif
forward_list<_Tp, _Alloc>::emplace_front(_Args&&... __args) {
- base::__before_begin()->__next_ =
- this->__create_node(/* next = */ base::__before_begin()->__next_, std::forward<_Args>(__args)...);
+ __base::__before_begin()->__next_ =
+ this->__create_node(/* next = */ __base::__before_begin()->__next_, std::forward<_Args>(__args)...);
# if _LIBCPP_STD_VER >= 17
- return base::__before_begin()->__next_->__get_value();
+ return __base::__before_begin()->__next_->__get_value();
# endif
}
template <class _Tp, class _Alloc>
void forward_list<_Tp, _Alloc>::push_front(value_type&& __v) {
- base::__before_begin()->__next_ = this->__create_node(/* next = */ base::__before_begin()->__next_, std::move(__v));
+ __base::__before_begin()->__next_ =
+ this->__create_node(/* next = */ __base::__before_begin()->__next_, std::move(__v));
}
#endif // _LIBCPP_CXX03_LANG
template <class _Tp, class _Alloc>
void forward_list<_Tp, _Alloc>::push_front(const value_type& __v) {
- base::__before_begin()->__next_ = this->__create_node(/* next = */ base::__before_begin()->__next_, __v);
+ __base::__before_begin()->__next_ = this->__create_node(/* next = */ __base::__before_begin()->__next_, __v);
}
template <class _Tp, class _Alloc>
void forward_list<_Tp, _Alloc>::pop_front() {
- __node_pointer __p = base::__before_begin()->__next_;
- base::__before_begin()->__next_ = __p->__next_;
+ __node_pointer __p = __base::__before_begin()->__next_;
+ __base::__before_begin()->__next_ = __p->__next_;
this->__delete_node(__p);
}
@@ -1380,8 +1384,9 @@ template <class _Tp, class _Alloc>
template <class _Compare>
void forward_list<_Tp, _Alloc>::merge(forward_list& __x, _Compare __comp) {
if (this != std::addressof(__x)) {
- base::__before_begin()->__next_ = __merge(base::__before_begin()->__next_, __x.__before_begin()->__next_, __comp);
- __x.__before_begin()->__next_ = nullptr;
+ __base::__before_begin()->__next_ =
+ __merge(__base::__before_begin()->__next_, __x.__before_begin()->__next_, __comp);
+ __x.__before_begin()->__next_ = nullptr;
}
}
@@ -1425,7 +1430,7 @@ forward_list<_Tp, _Alloc>::__merge(__node_pointer __f1, __node_pointer __f2, _Co
template <class _Tp, class _Alloc>
template <class _Compare>
inline void forward_list<_Tp, _Alloc>::sort(_Compare __comp) {
- base::__before_begin()->__next_ = __sort(base::__before_begin()->__next_, std::distance(begin(), end()), __comp);
+ __base::__before_begin()->__next_ = __sort(__base::__before_begin()->__next_, std::distance(begin(), end()), __comp);
}
template <class _Tp, class _Alloc>
@@ -1455,7 +1460,7 @@ forward_list<_Tp, _Alloc>::__sort(__node_pointer __f1, difference_type __sz, _Co
template <class _Tp, class _Alloc>
void forward_list<_Tp, _Alloc>::reverse() _NOEXCEPT {
- __node_pointer __p = base::__before_begin()->__next_;
+ __node_pointer __p = __base::__before_begin()->__next_;
if (__p != nullptr) {
__node_pointer __f = __p->__next_;
__p->__next_ = nullptr;
@@ -1465,7 +1470,7 @@ void forward_list<_Tp, _Alloc>::reverse() _NOEXCEPT {
__p = __f;
__f = __t;
}
- base::__before_begin()->__next_ = __p;
+ __base::__before_begin()->__next_ = __p;
}
}
diff --git a/libcxx/include/list b/libcxx/include/list
index 4a169b0..9530275 100644
--- a/libcxx/include/list
+++ b/libcxx/include/list
@@ -665,14 +665,14 @@ void __list_imp<_Tp, _Alloc>::swap(__list_imp& __c)
template <class _Tp, class _Alloc /*= allocator<_Tp>*/>
class _LIBCPP_TEMPLATE_VIS list : private __list_imp<_Tp, _Alloc> {
- typedef __list_imp<_Tp, _Alloc> base;
- typedef typename base::__node_type __node_type;
- typedef typename base::__node_allocator __node_allocator;
- typedef typename base::__node_pointer __node_pointer;
- typedef typename base::__node_alloc_traits __node_alloc_traits;
- typedef typename base::__node_base __node_base;
- typedef typename base::__node_base_pointer __node_base_pointer;
- typedef typename base::__base_pointer __base_pointer;
+ typedef __list_imp<_Tp, _Alloc> __base;
+ typedef typename __base::__node_type __node_type;
+ typedef typename __base::__node_allocator __node_allocator;
+ typedef typename __base::__node_pointer __node_pointer;
+ typedef typename __base::__node_alloc_traits __node_alloc_traits;
+ typedef typename __base::__node_base __node_base;
+ typedef typename __base::__node_base_pointer __node_base_pointer;
+ typedef typename __base::__base_pointer __base_pointer;
public:
typedef _Tp value_type;
@@ -682,12 +682,12 @@ public:
"Allocator::value_type must be same type as value_type");
typedef value_type& reference;
typedef const value_type& const_reference;
- typedef typename base::pointer pointer;
- typedef typename base::const_pointer const_pointer;
- typedef typename base::size_type size_type;
- typedef typename base::difference_type difference_type;
- typedef typename base::iterator iterator;
- typedef typename base::const_iterator const_iterator;
+ typedef typename __base::pointer pointer;
+ typedef typename __base::const_pointer const_pointer;
+ typedef typename __base::size_type size_type;
+ typedef typename __base::difference_type difference_type;
+ typedef typename __base::iterator iterator;
+ typedef typename __base::const_iterator const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
#if _LIBCPP_STD_VER >= 20
@@ -697,14 +697,14 @@ public:
#endif
_LIBCPP_HIDE_FROM_ABI list() _NOEXCEPT_(is_nothrow_default_constructible<__node_allocator>::value) {}
- _LIBCPP_HIDE_FROM_ABI explicit list(const allocator_type& __a) : base(__a) {}
+ _LIBCPP_HIDE_FROM_ABI explicit list(const allocator_type& __a) : __base(__a) {}
_LIBCPP_HIDE_FROM_ABI explicit list(size_type __n);
#if _LIBCPP_STD_VER >= 14
_LIBCPP_HIDE_FROM_ABI explicit list(size_type __n, const allocator_type& __a);
#endif
_LIBCPP_HIDE_FROM_ABI list(size_type __n, const value_type& __x);
template <__enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
- _LIBCPP_HIDE_FROM_ABI list(size_type __n, const value_type& __x, const allocator_type& __a) : base(__a) {
+ _LIBCPP_HIDE_FROM_ABI list(size_type __n, const value_type& __x, const allocator_type& __a) : __base(__a) {
for (; __n > 0; --__n)
push_back(__x);
}
@@ -717,7 +717,8 @@ public:
#if _LIBCPP_STD_VER >= 23
template <_ContainerCompatibleRange<_Tp> _Range>
- _LIBCPP_HIDE_FROM_ABI list(from_range_t, _Range&& __range, const allocator_type& __a = allocator_type()) : base(__a) {
+ _LIBCPP_HIDE_FROM_ABI list(from_range_t, _Range&& __range, const allocator_type& __a = allocator_type())
+ : __base(__a) {
prepend_range(std::forward<_Range>(__range));
}
#endif
@@ -757,18 +758,18 @@ public:
_LIBCPP_HIDE_FROM_ABI allocator_type get_allocator() const _NOEXCEPT;
- _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return base::__sz(); }
- [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool empty() const _NOEXCEPT { return base::empty(); }
+ _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __base::__sz(); }
+ [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool empty() const _NOEXCEPT { return __base::empty(); }
_LIBCPP_HIDE_FROM_ABI size_type max_size() const _NOEXCEPT {
- return std::min<size_type>(base::__node_alloc_max_size(), numeric_limits<difference_type >::max());
+ return std::min<size_type>(__base::__node_alloc_max_size(), numeric_limits<difference_type >::max());
}
- _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { return base::begin(); }
- _LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT { return base::begin(); }
- _LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { return base::end(); }
- _LIBCPP_HIDE_FROM_ABI const_iterator end() const _NOEXCEPT { return base::end(); }
- _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const _NOEXCEPT { return base::begin(); }
- _LIBCPP_HIDE_FROM_ABI const_iterator cend() const _NOEXCEPT { return base::end(); }
+ _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { return __base::begin(); }
+ _LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT { return __base::begin(); }
+ _LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { return __base::end(); }
+ _LIBCPP_HIDE_FROM_ABI const_iterator end() const _NOEXCEPT { return __base::end(); }
+ _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const _NOEXCEPT { return __base::begin(); }
+ _LIBCPP_HIDE_FROM_ABI const_iterator cend() const _NOEXCEPT { return __base::end(); }
_LIBCPP_HIDE_FROM_ABI reverse_iterator rbegin() _NOEXCEPT { return reverse_iterator(end()); }
_LIBCPP_HIDE_FROM_ABI const_reverse_iterator rbegin() const _NOEXCEPT { return const_reverse_iterator(end()); }
@@ -779,19 +780,19 @@ public:
_LIBCPP_HIDE_FROM_ABI reference front() {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::front called on empty list");
- return base::__end_.__next_->__as_node()->__get_value();
+ return __base::__end_.__next_->__as_node()->__get_value();
}
_LIBCPP_HIDE_FROM_ABI const_reference front() const {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::front called on empty list");
- return base::__end_.__next_->__as_node()->__get_value();
+ return __base::__end_.__next_->__as_node()->__get_value();
}
_LIBCPP_HIDE_FROM_ABI reference back() {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::back called on empty list");
- return base::__end_.__prev_->__as_node()->__get_value();
+ return __base::__end_.__prev_->__as_node()->__get_value();
}
_LIBCPP_HIDE_FROM_ABI const_reference back() const {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::back called on empty list");
- return base::__end_.__prev_->__as_node()->__get_value();
+ return __base::__end_.__prev_->__as_node()->__get_value();
}
#ifndef _LIBCPP_CXX03_LANG
@@ -864,9 +865,9 @@ public:
_NOEXCEPT_(!__node_alloc_traits::propagate_on_container_swap::value || __is_nothrow_swappable_v<__node_allocator>)
#endif
{
- base::swap(__c);
+ __base::swap(__c);
}
- _LIBCPP_HIDE_FROM_ABI void clear() _NOEXCEPT { base::clear(); }
+ _LIBCPP_HIDE_FROM_ABI void clear() _NOEXCEPT { __base::clear(); }
_LIBCPP_HIDE_FROM_ABI void pop_front();
_LIBCPP_HIDE_FROM_ABI void pop_back();
@@ -967,24 +968,24 @@ inline void list<_Tp, _Alloc>::__link_nodes(__base_pointer __p, __base_pointer _
// Link in nodes [__f, __l] at the front of the list
template <class _Tp, class _Alloc>
inline void list<_Tp, _Alloc>::__link_nodes_at_front(__base_pointer __f, __base_pointer __l) {
- __f->__prev_ = base::__end_as_link();
- __l->__next_ = base::__end_.__next_;
- __l->__next_->__prev_ = __l;
- base::__end_.__next_ = __f;
+ __f->__prev_ = __base::__end_as_link();
+ __l->__next_ = __base::__end_.__next_;
+ __l->__next_->__prev_ = __l;
+ __base::__end_.__next_ = __f;
}
// Link in nodes [__f, __l] at the back of the list
template <class _Tp, class _Alloc>
inline void list<_Tp, _Alloc>::__link_nodes_at_back(__base_pointer __f, __base_pointer __l) {
- __l->__next_ = base::__end_as_link();
- __f->__prev_ = base::__end_.__prev_;
- __f->__prev_->__next_ = __f;
- base::__end_.__prev_ = __l;
+ __l->__next_ = __base::__end_as_link();
+ __f->__prev_ = __base::__end_.__prev_;
+ __f->__prev_->__next_ = __f;
+ __base::__end_.__prev_ = __l;
}
template <class _Tp, class _Alloc>
inline typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::__iterator(size_type __n) {
- return __n <= base::__sz() / 2 ? std::next(begin(), __n) : std::prev(end(), base::__sz() - __n);
+ return __n <= __base::__sz() / 2 ? std::next(begin(), __n) : std::prev(end(), __base::__sz() - __n);
}
template <class _Tp, class _Alloc>
@@ -999,7 +1000,7 @@ list<_Tp, _Alloc>::list(size_type __n) {
#if _LIBCPP_STD_VER >= 14
template <class _Tp, class _Alloc>
-list<_Tp, _Alloc>::list(size_type __n, const allocator_type& __a) : base(__a) {
+list<_Tp, _Alloc>::list(size_type __n, const allocator_type& __a) : __base(__a) {
for (; __n > 0; --__n)
emplace_back();
}
@@ -1020,20 +1021,20 @@ list<_Tp, _Alloc>::list(_InpIter __f, _InpIter __l) {
template <class _Tp, class _Alloc>
template <class _InpIter, __enable_if_t<__has_input_iterator_category<_InpIter>::value, int> >
-list<_Tp, _Alloc>::list(_InpIter __f, _InpIter __l, const allocator_type& __a) : base(__a) {
+list<_Tp, _Alloc>::list(_InpIter __f, _InpIter __l, const allocator_type& __a) : __base(__a) {
for (; __f != __l; ++__f)
__emplace_back(*__f);
}
template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(const list& __c)
- : base(__node_alloc_traits::select_on_container_copy_construction(__c.__node_alloc())) {
+ : __base(__node_alloc_traits::select_on_container_copy_construction(__c.__node_alloc())) {
for (const_iterator __i = __c.begin(), __e = __c.end(); __i != __e; ++__i)
push_back(*__i);
}
template <class _Tp, class _Alloc>
-list<_Tp, _Alloc>::list(const list& __c, const __type_identity_t<allocator_type>& __a) : base(__a) {
+list<_Tp, _Alloc>::list(const list& __c, const __type_identity_t<allocator_type>& __a) : __base(__a) {
for (const_iterator __i = __c.begin(), __e = __c.end(); __i != __e; ++__i)
push_back(*__i);
}
@@ -1041,7 +1042,7 @@ list<_Tp, _Alloc>::list(const list& __c, const __type_identity_t<allocator_type>
#ifndef _LIBCPP_CXX03_LANG
template <class _Tp, class _Alloc>
-list<_Tp, _Alloc>::list(initializer_list<value_type> __il, const allocator_type& __a) : base(__a) {
+list<_Tp, _Alloc>::list(initializer_list<value_type> __il, const allocator_type& __a) : __base(__a) {
for (typename initializer_list<value_type>::const_iterator __i = __il.begin(), __e = __il.end(); __i != __e; ++__i)
push_back(*__i);
}
@@ -1054,12 +1055,12 @@ list<_Tp, _Alloc>::list(initializer_list<value_type> __il) {
template <class _Tp, class _Alloc>
inline list<_Tp, _Alloc>::list(list&& __c) noexcept(is_nothrow_move_constructible<__node_allocator>::value)
- : base(std::move(__c.__node_alloc())) {
+ : __base(std::move(__c.__node_alloc())) {
splice(end(), __c);
}
template <class _Tp, class _Alloc>
-inline list<_Tp, _Alloc>::list(list&& __c, const __type_identity_t<allocator_type>& __a) : base(__a) {
+inline list<_Tp, _Alloc>::list(list&& __c, const __type_identity_t<allocator_type>& __a) : __base(__a) {
if (__a == __c.get_allocator())
splice(end(), __c);
else {
@@ -1078,7 +1079,7 @@ inline list<_Tp, _Alloc>& list<_Tp, _Alloc>::operator=(list&& __c) noexcept(
template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::__move_assign(list& __c, false_type) {
- if (base::__node_alloc() != __c.__node_alloc()) {
+ if (__base::__node_alloc() != __c.__node_alloc()) {
typedef move_iterator<iterator> _Ip;
assign(_Ip(__c.begin()), _Ip(__c.end()));
} else
@@ -1089,7 +1090,7 @@ template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::__move_assign(list& __c,
true_type) noexcept(is_nothrow_move_assignable<__node_allocator>::value) {
clear();
- base::__move_assign_alloc(__c);
+ __base::__move_assign_alloc(__c);
splice(end(), __c);
}
@@ -1098,7 +1099,7 @@ void list<_Tp, _Alloc>::__move_assign(list& __c,
template <class _Tp, class _Alloc>
inline list<_Tp, _Alloc>& list<_Tp, _Alloc>::operator=(const list& __c) {
if (this != std::addressof(__c)) {
- base::__copy_assign_alloc(__c);
+ __base::__copy_assign_alloc(__c);
assign(__c.begin(), __c.end());
}
return *this;
@@ -1137,14 +1138,14 @@ void list<_Tp, _Alloc>::assign(size_type __n, const value_type& __x) {
template <class _Tp, class _Alloc>
inline _Alloc list<_Tp, _Alloc>::get_allocator() const _NOEXCEPT {
- return allocator_type(base::__node_alloc());
+ return allocator_type(__base::__node_alloc());
}
template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::insert(const_iterator __p, const value_type& __x) {
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x);
__link_nodes(__p.__ptr_, __node->__as_link(), __node->__as_link());
- ++base::__sz();
+ ++__base::__sz();
return iterator(__node->__as_link());
}
@@ -1178,7 +1179,7 @@ list<_Tp, _Alloc>::insert(const_iterator __p, size_type __n, const value_type& _
}
#endif // _LIBCPP_HAS_EXCEPTIONS
__link_nodes(__p.__ptr_, __r.__ptr_, __e.__ptr_);
- base::__sz() += __ds;
+ __base::__sz() += __ds;
}
return __r;
}
@@ -1220,7 +1221,7 @@ list<_Tp, _Alloc>::__insert_with_sentinel(const_iterator __p, _Iterator __f, _Se
}
#endif // _LIBCPP_HAS_EXCEPTIONS
__link_nodes(__p.__ptr_, __r.__ptr_, __e.__ptr_);
- base::__sz() += __ds;
+ __base::__sz() += __ds;
}
return __r;
}
@@ -1230,7 +1231,7 @@ void list<_Tp, _Alloc>::push_front(const value_type& __x) {
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x);
__base_pointer __nl = __node->__as_link();
__link_nodes_at_front(__nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
}
template <class _Tp, class _Alloc>
@@ -1238,7 +1239,7 @@ void list<_Tp, _Alloc>::push_back(const value_type& __x) {
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x);
__base_pointer __nl = __node->__as_link();
__link_nodes_at_back(__nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
}
#ifndef _LIBCPP_CXX03_LANG
@@ -1248,7 +1249,7 @@ void list<_Tp, _Alloc>::push_front(value_type&& __x) {
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::move(__x));
__base_pointer __nl = __node->__as_link();
__link_nodes_at_front(__nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
}
template <class _Tp, class _Alloc>
@@ -1256,7 +1257,7 @@ void list<_Tp, _Alloc>::push_back(value_type&& __x) {
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::move(__x));
__base_pointer __nl = __node->__as_link();
__link_nodes_at_back(__nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
}
template <class _Tp, class _Alloc>
@@ -1271,7 +1272,7 @@ list<_Tp, _Alloc>::emplace_front(_Args&&... __args) {
this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::forward<_Args>(__args)...);
__base_pointer __nl = __node->__as_link();
__link_nodes_at_front(__nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
# if _LIBCPP_STD_VER >= 17
return __node->__get_value();
# endif
@@ -1289,7 +1290,7 @@ list<_Tp, _Alloc>::emplace_back(_Args&&... __args) {
this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::forward<_Args>(__args)...);
__base_pointer __nl = __node->__as_link();
__link_nodes_at_back(__nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
# if _LIBCPP_STD_VER >= 17
return __node->__get_value();
# endif
@@ -1302,7 +1303,7 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::emplace(const_iterator _
this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::forward<_Args>(__args)...);
__base_pointer __nl = __node->__as_link();
__link_nodes(__p.__ptr_, __nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
return iterator(__nl);
}
@@ -1311,7 +1312,7 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::insert(const_iterator __
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::move(__x));
__base_pointer __nl = __node->__as_link();
__link_nodes(__p.__ptr_, __nl, __nl);
- ++base::__sz();
+ ++__base::__sz();
return iterator(__nl);
}
@@ -1320,18 +1321,18 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::insert(const_iterator __
template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::pop_front() {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::pop_front() called with empty list");
- __base_pointer __n = base::__end_.__next_;
- base::__unlink_nodes(__n, __n);
- --base::__sz();
+ __base_pointer __n = __base::__end_.__next_;
+ __base::__unlink_nodes(__n, __n);
+ --__base::__sz();
this->__delete_node(__n->__as_node());
}
template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::pop_back() {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::pop_back() called on an empty list");
- __base_pointer __n = base::__end_.__prev_;
- base::__unlink_nodes(__n, __n);
- --base::__sz();
+ __base_pointer __n = __base::__end_.__prev_;
+ __base::__unlink_nodes(__n, __n);
+ --__base::__sz();
this->__delete_node(__n->__as_node());
}
@@ -1340,8 +1341,8 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::erase(const_iterator __p
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__p != end(), "list::erase(iterator) called with a non-dereferenceable iterator");
__base_pointer __n = __p.__ptr_;
__base_pointer __r = __n->__next_;
- base::__unlink_nodes(__n, __n);
- --base::__sz();
+ __base::__unlink_nodes(__n, __n);
+ --__base::__sz();
this->__delete_node(__n->__as_node());
return iterator(__r);
}
@@ -1349,11 +1350,11 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::erase(const_iterator __p
template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::erase(const_iterator __f, const_iterator __l) {
if (__f != __l) {
- base::__unlink_nodes(__f.__ptr_, __l.__ptr_->__prev_);
+ __base::__unlink_nodes(__f.__ptr_, __l.__ptr_->__prev_);
while (__f != __l) {
__base_pointer __n = __f.__ptr_;
++__f;
- --base::__sz();
+ --__base::__sz();
this->__delete_node(__n->__as_node());
}
}
@@ -1362,10 +1363,10 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::erase(const_iterator __f
template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::resize(size_type __n) {
- if (__n < base::__sz())
+ if (__n < __base::__sz())
erase(__iterator(__n), end());
- else if (__n > base::__sz()) {
- __n -= base::__sz();
+ else if (__n > __base::__sz()) {
+ __n -= __base::__sz();
size_type __ds = 0;
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr);
++__ds;
@@ -1391,16 +1392,16 @@ void list<_Tp, _Alloc>::resize(size_type __n) {
}
#endif // _LIBCPP_HAS_EXCEPTIONS
__link_nodes_at_back(__r.__ptr_, __e.__ptr_);
- base::__sz() += __ds;
+ __base::__sz() += __ds;
}
}
template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::resize(size_type __n, const value_type& __x) {
- if (__n < base::__sz())
+ if (__n < __base::__sz())
erase(__iterator(__n), end());
- else if (__n > base::__sz()) {
- __n -= base::__sz();
+ else if (__n > __base::__sz()) {
+ __n -= __base::__sz();
size_type __ds = 0;
__node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x);
++__ds;
@@ -1426,8 +1427,8 @@ void list<_Tp, _Alloc>::resize(size_type __n, const value_type& __x) {
throw;
}
#endif // _LIBCPP_HAS_EXCEPTIONS
- __link_nodes(base::__end_as_link(), __r.__ptr_, __e.__ptr_);
- base::__sz() += __ds;
+ __link_nodes(__base::__end_as_link(), __r.__ptr_, __e.__ptr_);
+ __base::__sz() += __ds;
}
}
@@ -1438,9 +1439,9 @@ void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c) {
if (!__c.empty()) {
__base_pointer __f = __c.__end_.__next_;
__base_pointer __l = __c.__end_.__prev_;
- base::__unlink_nodes(__f, __l);
+ __base::__unlink_nodes(__f, __l);
__link_nodes(__p.__ptr_, __f, __l);
- base::__sz() += __c.__sz();
+ __base::__sz() += __c.__sz();
__c.__sz() = 0;
}
}
@@ -1449,10 +1450,10 @@ template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __i) {
if (__p.__ptr_ != __i.__ptr_ && __p.__ptr_ != __i.__ptr_->__next_) {
__base_pointer __f = __i.__ptr_;
- base::__unlink_nodes(__f, __f);
+ __base::__unlink_nodes(__f, __f);
__link_nodes(__p.__ptr_, __f, __f);
--__c.__sz();
- ++base::__sz();
+ ++__base::__sz();
}
}
@@ -1465,9 +1466,9 @@ void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __f
if (this != std::addressof(__c)) {
size_type __s = std::distance(__f, __l) + 1;
__c.__sz() -= __s;
- base::__sz() += __s;
+ __base::__sz() += __s;
}
- base::__unlink_nodes(__first, __last);
+ __base::__unlink_nodes(__first, __last);
__link_nodes(__p.__ptr_, __first, __last);
}
}
@@ -1547,12 +1548,12 @@ void list<_Tp, _Alloc>::merge(list& __c, _Comp __comp) {
iterator __m2 = std::next(__f2);
for (; __m2 != __e2 && __comp(*__m2, *__f1); ++__m2, (void)++__ds)
;
- base::__sz() += __ds;
+ __base::__sz() += __ds;
__c.__sz() -= __ds;
__base_pointer __f = __f2.__ptr_;
__base_pointer __l = __m2.__ptr_->__prev_;
__f2 = __m2;
- base::__unlink_nodes(__f, __l);
+ __base::__unlink_nodes(__f, __l);
__m2 = std::next(__f1);
__link_nodes(__f1.__ptr_, __f, __l);
__f1 = __m2;
@@ -1571,7 +1572,7 @@ inline void list<_Tp, _Alloc>::sort() {
template <class _Tp, class _Alloc>
template <class _Comp>
inline void list<_Tp, _Alloc>::sort(_Comp __comp) {
- __sort(begin(), end(), base::__sz(), __comp);
+ __sort(begin(), end(), __base::__sz(), __comp);
}
template <class _Tp, class _Alloc>
@@ -1585,7 +1586,7 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __
case 2:
if (__comp(*--__e2, *__f1)) {
__base_pointer __f = __e2.__ptr_;
- base::__unlink_nodes(__f, __f);
+ __base::__unlink_nodes(__f, __f);
__link_nodes(__f1.__ptr_, __f, __f);
return __e2;
}
@@ -1603,7 +1604,7 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __
__base_pointer __l = __m2.__ptr_->__prev_;
__r = __f2;
__e1 = __f2 = __m2;
- base::__unlink_nodes(__f, __l);
+ __base::__unlink_nodes(__f, __l);
__m2 = std::next(__f1);
__link_nodes(__f1.__ptr_, __f, __l);
__f1 = __m2;
@@ -1619,7 +1620,7 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __
if (__e1 == __f2)
__e1 = __m2;
__f2 = __m2;
- base::__unlink_nodes(__f, __l);
+ __base::__unlink_nodes(__f, __l);
__m2 = std::next(__f1);
__link_nodes(__f1.__ptr_, __f, __l);
__f1 = __m2;
@@ -1631,7 +1632,7 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __
template <class _Tp, class _Alloc>
void list<_Tp, _Alloc>::reverse() _NOEXCEPT {
- if (base::__sz() > 1) {
+ if (__base::__sz() > 1) {
iterator __e = end();
for (iterator __i = begin(); __i.__ptr_ != __e.__ptr_;) {
std::swap(__i.__ptr_->__prev_, __i.__ptr_->__next_);
diff --git a/libcxx/test/std/containers/sequences/forwardlist/types.pass.cpp b/libcxx/test/std/containers/sequences/forwardlist/types.pass.cpp
index 9867bf8..5476601 100644
--- a/libcxx/test/std/containers/sequences/forwardlist/types.pass.cpp
+++ b/libcxx/test/std/containers/sequences/forwardlist/types.pass.cpp
@@ -30,6 +30,24 @@
#include "test_macros.h"
#include "min_allocator.h"
+// Ensures that we don't use a non-uglified name 'base' in the implementation of 'forward_list'.
+
+struct my_base {
+ typedef my_base base;
+};
+
+template <class T, class A = std::allocator<T> >
+struct my_derived : my_base, std::forward_list<T, A> {};
+
+static_assert(std::is_same<my_derived<char>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<int>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<my_base>::base, my_base>::value, "");
+#if TEST_STD_VER >= 11
+static_assert(std::is_same<my_derived<char, min_allocator<char>>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<int, min_allocator<int>>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<my_base, min_allocator<my_base>>::base, my_base>::value, "");
+#endif
+
struct A { std::forward_list<A> v; }; // incomplete type support
int main(int, char**)
diff --git a/libcxx/test/std/containers/sequences/list/types.pass.cpp b/libcxx/test/std/containers/sequences/list/types.pass.cpp
index 8fe31e3..0c0a127 100644
--- a/libcxx/test/std/containers/sequences/list/types.pass.cpp
+++ b/libcxx/test/std/containers/sequences/list/types.pass.cpp
@@ -27,6 +27,24 @@
#include "test_macros.h"
#include "min_allocator.h"
+// Ensures that we don't use a non-uglified name 'base' in the implementation of 'list'.
+
+struct my_base {
+ typedef my_base base;
+};
+
+template <class T, class A = std::allocator<T> >
+struct my_derived : my_base, std::list<T, A> {};
+
+static_assert(std::is_same<my_derived<char>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<int>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<my_base>::base, my_base>::value, "");
+#if TEST_STD_VER >= 11
+static_assert(std::is_same<my_derived<char, min_allocator<char>>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<int, min_allocator<int>>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<my_base, min_allocator<my_base>>::base, my_base>::value, "");
+#endif
+
struct A { std::list<A> v; }; // incomplete type support
int main(int, char**)
diff --git a/libcxx/test/std/utilities/template.bitset/bitset.members/nonstdmem.uglified.compile.pass.cpp b/libcxx/test/std/utilities/template.bitset/bitset.members/nonstdmem.uglified.compile.pass.cpp
index c9dd923..f1daa7c 100644
--- a/libcxx/test/std/utilities/template.bitset/bitset.members/nonstdmem.uglified.compile.pass.cpp
+++ b/libcxx/test/std/utilities/template.bitset/bitset.members/nonstdmem.uglified.compile.pass.cpp
@@ -8,8 +8,8 @@
// <bitset>
-// This test ensures that we don't use a non-uglified name 'iterator' and
-// 'const_iterator' in the implementation of bitset.
+// This test ensures that we don't use a non-uglified name 'iterator',
+// 'const_iterator', and 'base' in the implementation of bitset.
//
// See https://github.com/llvm/llvm-project/issues/111125.
@@ -20,6 +20,7 @@
struct my_base {
typedef int* iterator;
typedef const int* const_iterator;
+ typedef my_base base;
};
template <std::size_t N>
@@ -44,3 +45,13 @@ static_assert(std::is_same<my_derived<32>::const_iterator, const int*>::value, "
static_assert(std::is_same<my_derived<48>::const_iterator, const int*>::value, "");
static_assert(std::is_same<my_derived<64>::const_iterator, const int*>::value, "");
static_assert(std::is_same<my_derived<96>::const_iterator, const int*>::value, "");
+
+static_assert(std::is_same<my_derived<0>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<1>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<8>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<12>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<16>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<32>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<48>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<64>::base, my_base>::value, "");
+static_assert(std::is_same<my_derived<96>::base, my_base>::value, "");
diff --git a/lld/MachO/ObjC.cpp b/lld/MachO/ObjC.cpp
index b9f7592..ff13e8e 100644
--- a/lld/MachO/ObjC.cpp
+++ b/lld/MachO/ObjC.cpp
@@ -423,7 +423,7 @@ public:
private:
DenseSet<const Symbol *> collectNlCategories();
void collectAndValidateCategoriesData();
- void
+ bool
mergeCategoriesIntoSingleCategory(std::vector<InfoInputCategory> &categories);
void eraseISec(ConcatInputSection *isec);
@@ -434,8 +434,8 @@ private:
catListToErasedOffsets);
void collectSectionWriteInfoFromIsec(const InputSection *isec,
InfoWriteSection &catWriteInfo);
- void collectCategoryWriterInfoFromCategory(const InfoInputCategory &catInfo);
- void parseCatInfoToExtInfo(const InfoInputCategory &catInfo,
+ bool collectCategoryWriterInfoFromCategory(const InfoInputCategory &catInfo);
+ bool parseCatInfoToExtInfo(const InfoInputCategory &catInfo,
ClassExtensionInfo &extInfo);
void parseProtocolListInfo(const ConcatInputSection *isec, uint32_t secOffset,
@@ -446,7 +446,7 @@ private:
uint32_t secOffset,
SourceLanguage sourceLang);
- void parsePointerListInfo(const ConcatInputSection *isec, uint32_t secOffset,
+ bool parsePointerListInfo(const ConcatInputSection *isec, uint32_t secOffset,
PointerListInfo &ptrList);
void emitAndLinkPointerList(Defined *parentSym, uint32_t linkAtOffset,
@@ -474,7 +474,7 @@ private:
uint32_t offset);
Defined *getClassRo(const Defined *classSym, bool getMetaRo);
SourceLanguage getClassSymSourceLang(const Defined *classSym);
- void mergeCategoriesIntoBaseClass(const Defined *baseClass,
+ bool mergeCategoriesIntoBaseClass(const Defined *baseClass,
std::vector<InfoInputCategory> &categories);
void eraseSymbolAtIsecOffset(ConcatInputSection *isec, uint32_t offset);
void tryEraseDefinedAtIsecOffset(const ConcatInputSection *isec,
@@ -543,9 +543,9 @@ ObjcCategoryMerger::tryGetSymbolAtIsecOffset(const ConcatInputSection *isec,
if (!reloc)
return nullptr;
- Symbol *sym = reloc->referent.get<Symbol *>();
+ Symbol *sym = reloc->referent.dyn_cast<Symbol *>();
- if (reloc->addend) {
+ if (reloc->addend && sym) {
assert(isa<Defined>(sym) && "Expected defined for non-zero addend");
Defined *definedSym = cast<Defined>(sym);
sym = tryFindDefinedOnIsec(definedSym->isec(),
@@ -618,7 +618,7 @@ void ObjcCategoryMerger::tryEraseDefinedAtIsecOffset(
}
}
-void ObjcCategoryMerger::collectCategoryWriterInfoFromCategory(
+bool ObjcCategoryMerger::collectCategoryWriterInfoFromCategory(
const InfoInputCategory &catInfo) {
if (!infoCategoryWriter.catListInfo.valid)
@@ -631,7 +631,14 @@ void ObjcCategoryMerger::collectCategoryWriterInfoFromCategory(
if (!infoCategoryWriter.catNameInfo.valid) {
lld::macho::Defined *catNameSym =
tryGetDefinedAtIsecOffset(catInfo.catBodyIsec, catLayout.nameOffset);
- assert(catNameSym && "Category does not have a valid name Symbol");
+
+ if (!catNameSym) {
+ // This is an unhandeled case where the category name is not a symbol but
+ // instead points to an CStringInputSection (that doesn't have any symbol)
+ // TODO: Find a small repro and either fix or add a test case for this
+ // scenario
+ return false;
+ }
collectSectionWriteInfoFromIsec(catNameSym->isec(),
infoCategoryWriter.catNameInfo);
@@ -651,6 +658,8 @@ void ObjcCategoryMerger::collectCategoryWriterInfoFromCategory(
}
}
}
+
+ return true;
}
// Parse a protocol list that might be linked to ConcatInputSection at a given
@@ -723,7 +732,7 @@ ObjcCategoryMerger::parseProtocolListInfo(const ConcatInputSection *isec,
// Parse a pointer list that might be linked to ConcatInputSection at a given
// offset. This can be used for instance methods, class methods, instance props
// and class props since they have the same format.
-void ObjcCategoryMerger::parsePointerListInfo(const ConcatInputSection *isec,
+bool ObjcCategoryMerger::parsePointerListInfo(const ConcatInputSection *isec,
uint32_t secOffset,
PointerListInfo &ptrList) {
assert(ptrList.pointersPerStruct == 2 || ptrList.pointersPerStruct == 3);
@@ -732,8 +741,9 @@ void ObjcCategoryMerger::parsePointerListInfo(const ConcatInputSection *isec,
"Trying to read pointer list beyond section end");
const Reloc *reloc = isec->getRelocAt(secOffset);
+ // Empty list is a valid case, return true.
if (!reloc)
- return;
+ return true;
auto *ptrListSym = dyn_cast_or_null<Defined>(reloc->referent.get<Symbol *>());
assert(ptrListSym && "Reloc does not have a valid Defined");
@@ -759,17 +769,24 @@ void ObjcCategoryMerger::parsePointerListInfo(const ConcatInputSection *isec,
const Reloc *reloc = ptrListSym->isec()->getRelocAt(off);
assert(reloc && "No reloc found at pointer list offset");
- auto *listSym = dyn_cast_or_null<Defined>(reloc->referent.get<Symbol *>());
- assert(listSym && "Reloc does not have a valid Defined");
+ auto *listSym =
+ dyn_cast_or_null<Defined>(reloc->referent.dyn_cast<Symbol *>());
+ // Sometimes, the reloc points to a StringPiece (InputSection + addend)
+ // instead of a symbol.
+ // TODO: Skip these cases for now, but we should fix this.
+ if (!listSym)
+ return false;
ptrList.allPtrs.push_back(listSym);
}
+
+ return true;
}
// Here we parse all the information of an input category (catInfo) and
// append the parsed info into the structure which will contain all the
// information about how a class is extended (extInfo)
-void ObjcCategoryMerger::parseCatInfoToExtInfo(const InfoInputCategory &catInfo,
+bool ObjcCategoryMerger::parseCatInfoToExtInfo(const InfoInputCategory &catInfo,
ClassExtensionInfo &extInfo) {
const Reloc *catNameReloc =
catInfo.catBodyIsec->getRelocAt(catLayout.nameOffset);
@@ -808,20 +825,27 @@ void ObjcCategoryMerger::parseCatInfoToExtInfo(const InfoInputCategory &catInfo,
"class");
}
- parsePointerListInfo(catInfo.catBodyIsec, catLayout.instanceMethodsOffset,
- extInfo.instanceMethods);
+ if (!parsePointerListInfo(catInfo.catBodyIsec,
+ catLayout.instanceMethodsOffset,
+ extInfo.instanceMethods))
+ return false;
- parsePointerListInfo(catInfo.catBodyIsec, catLayout.classMethodsOffset,
- extInfo.classMethods);
+ if (!parsePointerListInfo(catInfo.catBodyIsec, catLayout.classMethodsOffset,
+ extInfo.classMethods))
+ return false;
parseProtocolListInfo(catInfo.catBodyIsec, catLayout.protocolsOffset,
extInfo.protocols, catInfo.sourceLanguage);
- parsePointerListInfo(catInfo.catBodyIsec, catLayout.instancePropsOffset,
- extInfo.instanceProps);
+ if (!parsePointerListInfo(catInfo.catBodyIsec, catLayout.instancePropsOffset,
+ extInfo.instanceProps))
+ return false;
- parsePointerListInfo(catInfo.catBodyIsec, catLayout.classPropsOffset,
- extInfo.classProps);
+ if (!parsePointerListInfo(catInfo.catBodyIsec, catLayout.classPropsOffset,
+ extInfo.classProps))
+ return false;
+
+ return true;
}
// Generate a protocol list (including header) and link it into the parent at
@@ -1090,14 +1114,15 @@ Defined *ObjcCategoryMerger::emitCategory(const ClassExtensionInfo &extInfo) {
// This method merges all the categories (sharing a base class) into a single
// category.
-void ObjcCategoryMerger::mergeCategoriesIntoSingleCategory(
+bool ObjcCategoryMerger::mergeCategoriesIntoSingleCategory(
std::vector<InfoInputCategory> &categories) {
assert(categories.size() > 1 && "Expected at least 2 categories");
ClassExtensionInfo extInfo(catLayout);
for (auto &catInfo : categories)
- parseCatInfoToExtInfo(catInfo, extInfo);
+ if (!parseCatInfoToExtInfo(catInfo, extInfo))
+ return false;
Defined *newCatDef = emitCategory(extInfo);
assert(newCatDef && "Failed to create a new category");
@@ -1107,6 +1132,8 @@ void ObjcCategoryMerger::mergeCategoriesIntoSingleCategory(
for (auto &catInfo : categories)
catInfo.wasMerged = true;
+
+ return true;
}
void ObjcCategoryMerger::createSymbolReference(Defined *refFrom,
@@ -1179,9 +1206,10 @@ void ObjcCategoryMerger::collectAndValidateCategoriesData() {
tryGetSymbolAtIsecOffset(catBodyIsec, catLayout.klassOffset);
assert(classSym && "Category does not have a valid base class");
- categoryMap[classSym].push_back(catInputInfo);
+ if (!collectCategoryWriterInfoFromCategory(catInputInfo))
+ continue;
- collectCategoryWriterInfoFromCategory(catInputInfo);
+ categoryMap[classSym].push_back(catInputInfo);
}
}
}
@@ -1309,13 +1337,17 @@ void ObjcCategoryMerger::doMerge() {
collectAndValidateCategoriesData();
for (auto &[baseClass, catInfos] : categoryMap) {
+ bool merged = false;
if (auto *baseClassDef = dyn_cast<Defined>(baseClass)) {
// Merge all categories into the base class
- mergeCategoriesIntoBaseClass(baseClassDef, catInfos);
+ merged = mergeCategoriesIntoBaseClass(baseClassDef, catInfos);
} else if (catInfos.size() > 1) {
// Merge all categories into a new, single category
- mergeCategoriesIntoSingleCategory(catInfos);
+ merged = mergeCategoriesIntoSingleCategory(catInfos);
}
+ if (!merged)
+ warn("ObjC category merging skipped for class symbol' " +
+ baseClass->getName().str() + "'\n");
}
// Erase all categories that were merged
@@ -1374,7 +1406,8 @@ ObjcCategoryMerger::getClassSymSourceLang(const Defined *classSym) {
llvm_unreachable("Unexpected class symbol name during category merging");
}
-void ObjcCategoryMerger::mergeCategoriesIntoBaseClass(
+
+bool ObjcCategoryMerger::mergeCategoriesIntoBaseClass(
const Defined *baseClass, std::vector<InfoInputCategory> &categories) {
assert(categories.size() >= 1 && "Expected at least one category to merge");
@@ -1383,9 +1416,9 @@ void ObjcCategoryMerger::mergeCategoriesIntoBaseClass(
extInfo.baseClass = baseClass;
extInfo.baseClassSourceLanguage = getClassSymSourceLang(baseClass);
- for (auto &catInfo : categories) {
- parseCatInfoToExtInfo(catInfo, extInfo);
- }
+ for (auto &catInfo : categories)
+ if (!parseCatInfoToExtInfo(catInfo, extInfo))
+ return false;
// Get metadata for the base class
Defined *metaRo = getClassRo(baseClass, /*getMetaRo=*/true);
@@ -1452,6 +1485,8 @@ void ObjcCategoryMerger::mergeCategoriesIntoBaseClass(
// Mark all the categories as merged - this will be used to erase them later
for (auto &catInfo : categories)
catInfo.wasMerged = true;
+
+ return true;
}
// Erase the symbol at a given offset in an InputSection
diff --git a/lld/test/MachO/objc-category-merging-minimal.s b/lld/test/MachO/objc-category-merging-minimal.s
index 088a4d0..0fc785a 100644
--- a/lld/test/MachO/objc-category-merging-minimal.s
+++ b/lld/test/MachO/objc-category-merging-minimal.s
@@ -28,6 +28,19 @@
# RUN: %lld -no_objc_relative_method_lists -arch arm64 -dylib -o merge_base_class_swift_minimal_yes_merge.dylib -objc_category_merging MyBaseClassSwiftExtension.o merge_base_class_minimal.o
# RUN: llvm-objdump --objc-meta-data --macho merge_base_class_swift_minimal_yes_merge.dylib | FileCheck %s --check-prefixes=YES_MERGE_INTO_BASE_SWIFT
+############ Test merging skipped due to invalid category name ############
+# Modify __OBJC_$_CATEGORY_MyBaseClass_$_Category01's name to point to L_OBJC_IMAGE_INFO+3
+# RUN: sed -E '/^__OBJC_\$_CATEGORY_MyBaseClass_\$_Category01:/ { n; s/^[ \t]*\.quad[ \t]+l_OBJC_CLASS_NAME_$/\t.quad\tL_OBJC_IMAGE_INFO+3/ }' merge_cat_minimal.s > merge_cat_minimal_bad_name.s
+
+# Assemble the modified source
+# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o merge_cat_minimal_bad_name.o merge_cat_minimal_bad_name.s
+
+# Run lld and check for the specific warning
+# RUN: %no-fatal-warnings-lld -arch arm64 -dylib -objc_category_merging -o merge_cat_minimal_merge.dylib a64_fakedylib.dylib merge_cat_minimal_bad_name.o 2>&1 | FileCheck %s --check-prefix=MERGE_WARNING
+
+# Check that lld emitted the warning about skipping category merging
+MERGE_WARNING: warning: ObjC category merging skipped for class symbol' _OBJC_CLASS_$_MyBaseClass'
+
#### Check merge categories enabled ###
# Check that the original categories are not there
MERGE_CATS-NOT: __OBJC_$_CATEGORY_MyBaseClass_$_Category01
diff --git a/lldb/CMakeLists.txt b/lldb/CMakeLists.txt
index 59cdc45..5827e04 100644
--- a/lldb/CMakeLists.txt
+++ b/lldb/CMakeLists.txt
@@ -97,7 +97,7 @@ if (LLDB_ENABLE_PYTHON OR LLDB_ENABLE_LUA)
add_subdirectory(bindings)
endif ()
-# We need the headers generated by instrinsics_gen before we can compile
+# We need the headers generated by intrinsics_gen before we can compile
# any source file in LLDB as the imported Clang modules might include
# some of these generated headers. This approach is copied from Clang's main
# CMakeLists.txt, so it should kept in sync the code in Clang which was added
diff --git a/lldb/docs/use/aarch64-linux.md b/lldb/docs/use/aarch64-linux.md
index 803f56d..70432f5 100644
--- a/lldb/docs/use/aarch64-linux.md
+++ b/lldb/docs/use/aarch64-linux.md
@@ -160,7 +160,7 @@ Kernel does.
### Visibility of an Inactive ZA Register
LLDB does not handle registers that can come and go at runtime (SVE changes
-size but it does not dissappear). Therefore when `za` is not enabled, LLDB
+size but it does not disappear). Therefore when `za` is not enabled, LLDB
will return a block of 0s instead. This block will match the expected size of
`za`:
```
@@ -183,9 +183,9 @@ If you want to know whether `za` is active or not, refer to bit 2 of the
As for SVE, LLDB does not know how the debugee will use `za`, and therefore
does not know how it would be best to display it. At any time any given
-instrucion could interpret its contents as many kinds and sizes of data.
+instruction could interpret its contents as many kinds and sizes of data.
-So LLDB will default to showing `za` as one large vector of individual bytes.
+So LLDB will default to showing `za` as one large vector of individual bytes.
You can override this with a format option (see the SVE example above).
### Expression Evaluation
@@ -228,4 +228,4 @@ bytes.
### Expression Evaluation
`zt0`'s value and whether it is active or not will be saved prior to
-expression evaluation and restored afterwards. \ No newline at end of file
+expression evaluation and restored afterwards.
diff --git a/lldb/source/Commands/CommandObjectMultiword.cpp b/lldb/source/Commands/CommandObjectMultiword.cpp
index 484d902..b4cdfea 100644
--- a/lldb/source/Commands/CommandObjectMultiword.cpp
+++ b/lldb/source/Commands/CommandObjectMultiword.cpp
@@ -102,11 +102,9 @@ llvm::Error CommandObjectMultiword::LoadUserSubcommand(
std::string str_name(name);
- auto pos = m_subcommand_dict.find(str_name);
- if (pos == m_subcommand_dict.end()) {
- m_subcommand_dict[str_name] = cmd_obj_sp;
+ auto [pos, inserted] = m_subcommand_dict.try_emplace(str_name, cmd_obj_sp);
+ if (inserted)
return llvm::Error::success();
- }
const char *error_str = nullptr;
if (!can_replace)
@@ -117,7 +115,7 @@ llvm::Error CommandObjectMultiword::LoadUserSubcommand(
if (error_str) {
return llvm::createStringError(llvm::inconvertibleErrorCode(), error_str);
}
- m_subcommand_dict[str_name] = cmd_obj_sp;
+ pos->second = cmd_obj_sp;
return llvm::Error::success();
}
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 50115a6..e710f97 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -5066,6 +5066,9 @@ lldb::Encoding TypeSystemClang::GetEncoding(lldb::opaque_compiler_type_t type,
case clang::BuiltinType::SveUint64x3:
case clang::BuiltinType::SveUint64x4:
case clang::BuiltinType::SveMFloat8:
+ case clang::BuiltinType::SveMFloat8x2:
+ case clang::BuiltinType::SveMFloat8x3:
+ case clang::BuiltinType::SveMFloat8x4:
case clang::BuiltinType::SveFloat16:
case clang::BuiltinType::SveBFloat16:
case clang::BuiltinType::SveBFloat16x2:
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/Makefile b/lldb/test/API/lang/cpp/odr-handling-with-dylib/Makefile
new file mode 100644
index 0000000..91eadaa
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/Makefile
@@ -0,0 +1,6 @@
+CXX_SOURCES := main.cpp service.cpp
+
+DYLIB_CXX_SOURCES := plugin.cpp
+DYLIB_NAME := plugin
+
+include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/TestOdrHandlingWithDylib.py b/lldb/test/API/lang/cpp/odr-handling-with-dylib/TestOdrHandlingWithDylib.py
new file mode 100644
index 0000000..f67d933
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/TestOdrHandlingWithDylib.py
@@ -0,0 +1,29 @@
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class OdrHandlingWithDylibTestCase(TestBase):
+ @skipIf(
+ bugnumber="https://github.com/llvm/llvm-project/issues/50375, rdar://135551810"
+ )
+ def test(self):
+ """
+ Tests that the expression evaluator is able to deal with types
+ whose definitions conflict across multiple LLDB modules (in this
+ case the definition for 'class Service' in the main executable
+ has an additional field compared to the definition found in the
+ dylib). This causes the ASTImporter to detect a name conflict
+ while importing 'Service'. With LLDB's liberal ODRHandlingType
+ the ASTImporter happily creates a conflicting AST node for
+ 'Service' in the scratch ASTContext, leading to a crash down
+ the line.
+ """
+ self.build()
+
+ lldbutil.run_to_source_breakpoint(
+ self, "plugin_entry", lldb.SBFileSpec("plugin.cpp")
+ )
+
+ self.expect_expr("*gProxyThis")
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/main.cpp b/lldb/test/API/lang/cpp/odr-handling-with-dylib/main.cpp
new file mode 100644
index 0000000..f3372e0
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/main.cpp
@@ -0,0 +1,11 @@
+#include "plugin.h"
+
+#define HIDE_FROM_PLUGIN 1
+#include "service.h"
+
+int main() {
+ exported();
+ plugin_init();
+ plugin_entry();
+ return 0;
+}
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.cpp b/lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.cpp
new file mode 100644
index 0000000..1903880
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.cpp
@@ -0,0 +1,14 @@
+#include "plugin.h"
+#include "service.h"
+
+struct Proxy : public Service {
+ State *proxyState;
+};
+
+Proxy *gProxyThis = 0;
+
+extern "C" {
+void plugin_init() { gProxyThis = new Proxy; }
+
+void plugin_entry() {}
+}
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.h b/lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.h
new file mode 100644
index 0000000..9d4ba5d
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/plugin.h
@@ -0,0 +1,9 @@
+#ifndef PLUGIN_H_IN
+#define PLUGIN_H_IN
+
+extern "C" {
+void plugin_entry(void);
+void plugin_init(void);
+}
+
+#endif // PLUGIN_H_IN
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/service.cpp b/lldb/test/API/lang/cpp/odr-handling-with-dylib/service.cpp
new file mode 100644
index 0000000..6302a45
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/service.cpp
@@ -0,0 +1,15 @@
+#define HIDE_FROM_PLUGIN 1
+#include "service.h"
+
+struct ServiceAux {
+ Service *Owner;
+};
+
+struct Service::State {};
+
+void exported() {
+ // Make sure debug-info for definition of Service is
+ // emitted in this CU.
+ Service service;
+ service.start(0);
+}
diff --git a/lldb/test/API/lang/cpp/odr-handling-with-dylib/service.h b/lldb/test/API/lang/cpp/odr-handling-with-dylib/service.h
new file mode 100644
index 0000000..37c6b9a
--- /dev/null
+++ b/lldb/test/API/lang/cpp/odr-handling-with-dylib/service.h
@@ -0,0 +1,20 @@
+#ifndef SERVICE_H_IN
+#define SERVICE_H_IN
+
+struct ServiceAux;
+
+struct Service {
+ struct State;
+ bool start(State *) { return true; }
+
+#ifdef HIDE_FROM_PLUGIN
+ int __resv1;
+#endif // !HIDE_FROM_PLUGIN
+
+ Service *__owner;
+ ServiceAux *aux;
+};
+
+void exported();
+
+#endif
diff --git a/llvm/include/llvm/ADT/STLExtras.h b/llvm/include/llvm/ADT/STLExtras.h
index eb441bb..43c9b80 100644
--- a/llvm/include/llvm/ADT/STLExtras.h
+++ b/llvm/include/llvm/ADT/STLExtras.h
@@ -1023,6 +1023,16 @@ class concat_iterator
std::forward_iterator_tag, ValueT> {
using BaseT = typename concat_iterator::iterator_facade_base;
+ static constexpr bool ReturnsByValue =
+ !(std::is_reference_v<decltype(*std::declval<IterTs>())> && ...);
+
+ using reference_type =
+ typename std::conditional_t<ReturnsByValue, ValueT, ValueT &>;
+
+ using handle_type =
+ typename std::conditional_t<ReturnsByValue, std::optional<ValueT>,
+ ValueT *>;
+
/// We store both the current and end iterators for each concatenated
/// sequence in a tuple of pairs.
///
@@ -1065,27 +1075,30 @@ class concat_iterator
/// Returns null if the specified iterator is at the end. Otherwise,
/// dereferences the iterator and returns the address of the resulting
/// reference.
- template <size_t Index> ValueT *getHelper() const {
+ template <size_t Index> handle_type getHelper() const {
auto &Begin = std::get<Index>(Begins);
auto &End = std::get<Index>(Ends);
if (Begin == End)
- return nullptr;
+ return {};
- return &*Begin;
+ if constexpr (ReturnsByValue)
+ return *Begin;
+ else
+ return &*Begin;
}
/// Finds the first non-end iterator, dereferences, and returns the resulting
/// reference.
///
/// It is an error to call this with all iterators at the end.
- template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
+ template <size_t... Ns> reference_type get(std::index_sequence<Ns...>) const {
// Build a sequence of functions to get from iterator if possible.
- ValueT *(concat_iterator::*GetHelperFns[])() const = {
- &concat_iterator::getHelper<Ns>...};
+ handle_type (concat_iterator::*GetHelperFns[])()
+ const = {&concat_iterator::getHelper<Ns>...};
// Loop over them, and return the first result we find.
for (auto &GetHelperFn : GetHelperFns)
- if (ValueT *P = (this->*GetHelperFn)())
+ if (auto P = (this->*GetHelperFn)())
return *P;
llvm_unreachable("Attempted to get a pointer from an end concat iterator!");
@@ -1107,7 +1120,7 @@ public:
return *this;
}
- ValueT &operator*() const {
+ reference_type operator*() const {
return get(std::index_sequence_for<IterTs...>());
}
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/llvm/include/llvm/Analysis/TargetLibraryInfo.def
index 9b9affd..d472cde 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.def
@@ -1751,6 +1751,21 @@ TLI_DEFINE_ENUM_INTERNAL(log2l)
TLI_DEFINE_STRING_INTERNAL("log2l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)
+/// int ilogb(double x);
+TLI_DEFINE_ENUM_INTERNAL(ilogb)
+TLI_DEFINE_STRING_INTERNAL("ilogb")
+TLI_DEFINE_SIG_INTERNAL(Int, Dbl)
+
+/// int ilogbf(float x);
+TLI_DEFINE_ENUM_INTERNAL(ilogbf)
+TLI_DEFINE_STRING_INTERNAL("ilogbf")
+TLI_DEFINE_SIG_INTERNAL(Int, Flt)
+
+/// int ilogbl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(ilogbl)
+TLI_DEFINE_STRING_INTERNAL("ilogbl")
+TLI_DEFINE_SIG_INTERNAL(Int, LDbl)
+
/// double logb(double x);
TLI_DEFINE_ENUM_INTERNAL(logb)
TLI_DEFINE_STRING_INTERNAL("logb")
diff --git a/llvm/include/llvm/BinaryFormat/Minidump.h b/llvm/include/llvm/BinaryFormat/Minidump.h
index addff429..03497d4 100644
--- a/llvm/include/llvm/BinaryFormat/Minidump.h
+++ b/llvm/include/llvm/BinaryFormat/Minidump.h
@@ -247,7 +247,7 @@ static_assert(sizeof(Thread) == 48);
struct Exception {
static constexpr size_t MaxParameters = 15;
static constexpr size_t MaxParameterBytes = MaxParameters * sizeof(uint64_t);
- static const uint32_t LLDB_FLAG = 'LLDB';
+ static const uint32_t LLDB_FLAG = 0x4C4C4442; // ASCII for 'LLDB'
support::ulittle32_t ExceptionCode;
support::ulittle32_t ExceptionFlags;
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index b8f8073..12ff36c 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -2301,10 +2301,11 @@ public:
Align getEVTAlign(EVT MemoryVT) const;
/// Test whether the given value is a constant int or similar node.
- SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
+ bool isConstantIntBuildVectorOrConstantInt(SDValue N,
+ bool AllowOpaques = true) const;
/// Test whether the given value is a constant FP or similar node.
- SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
+ bool isConstantFPBuildVectorOrConstantFP(SDValue N) const;
/// \returns true if \p N is any kind of constant or build_vector of
/// constants, int or float. If a vector, it may not necessarily be a splat.
diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index 9882d85..97de019 100644
--- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -280,7 +280,11 @@ public:
/// hasFP - Return true if the specified function should have a dedicated
/// frame pointer register. For most targets this is true only if the function
/// has variable sized allocas or if frame pointer elimination is disabled.
- virtual bool hasFP(const MachineFunction &MF) const = 0;
+ /// For all targets, this is false if the function has the naked attribute
+ /// since there is no prologue to set up the frame pointer.
+ bool hasFP(const MachineFunction &MF) const {
+ return !MF.getFunction().hasFnAttribute(Attribute::Naked) && hasFPImpl(MF);
+ }
/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
/// not required, we reserve argument space for call sites in the function
@@ -477,6 +481,9 @@ public:
/// targets can emit remarks based on the final frame layout.
virtual void emitRemarks(const MachineFunction &MF,
MachineOptimizationRemarkEmitter *ORE) const {};
+
+protected:
+ virtual bool hasFPImpl(const MachineFunction &MF) const = 0;
};
} // End llvm namespace
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 5ab31a6..61615cb 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5567,9 +5567,7 @@ public:
/// If this function returns true, SelectionDAGBuilder emits a
/// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
- virtual bool useLoadStackGuardNode() const {
- return false;
- }
+ virtual bool useLoadStackGuardNode(const Module &M) const { return false; }
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
const SDLoc &DL) const {
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index f2f0981..f784c37 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -1170,7 +1170,7 @@ def OMP_Workshare : Directive<"workshare"> {
let category = CA_Executable;
}
def OMP_EndWorkshare : Directive<"end workshare"> {
- let allowedClauses = [
+ let allowedOnceClauses = [
VersionedClause<OMPC_NoWait>,
];
let leafConstructs = OMP_Workshare.leafConstructs;
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 715f2cc..92226a6 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1115,7 +1115,7 @@ def int_amdgcn_s_buffer_load : DefaultAttrsIntrinsic <
// it is const 0. A struct intrinsic with constant 0 index is different to the
// corresponding raw intrinsic on gfx9+ because the behavior of bound checking
// and swizzling changes depending on whether idxen is set in the instruction.
-// These instrinsics also keep the offset and soffset arguments separate as
+// These intrinsics also keep the offset and soffset arguments separate as
// they behave differently in bounds checking and swizzling.
// The versions of these intrinsics that take <4 x i32> arguments are deprecated
diff --git a/llvm/include/llvm/Object/OffloadBinary.h b/llvm/include/llvm/Object/OffloadBinary.h
index 13383d5..c02aec8 100644
--- a/llvm/include/llvm/Object/OffloadBinary.h
+++ b/llvm/include/llvm/Object/OffloadBinary.h
@@ -165,7 +165,8 @@ public:
/// Make a deep copy of this offloading file.
OffloadFile copy() const {
std::unique_ptr<MemoryBuffer> Buffer = MemoryBuffer::getMemBufferCopy(
- getBinary()->getMemoryBufferRef().getBuffer());
+ getBinary()->getMemoryBufferRef().getBuffer(),
+ getBinary()->getMemoryBufferRef().getBufferIdentifier());
// This parsing should never fail because it has already been parsed.
auto NewBinaryOrErr = OffloadBinary::create(*Buffer);
diff --git a/llvm/include/llvm/SandboxIR/Operator.h b/llvm/include/llvm/SandboxIR/Operator.h
new file mode 100644
index 0000000..f19c54c
--- /dev/null
+++ b/llvm/include/llvm/SandboxIR/Operator.h
@@ -0,0 +1,99 @@
+//===- Operator.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SANDBOXIR_OPERATOR_H
+#define LLVM_SANDBOXIR_OPERATOR_H
+
+#include "llvm/IR/Operator.h"
+#include "llvm/SandboxIR/Instruction.h"
+#include "llvm/SandboxIR/User.h"
+
+namespace llvm::sandboxir {
+
+class Operator : public User {
+public:
+ // The Operator class is intended to be used as a utility, and is never itself
+ // instantiated.
+ Operator() = delete;
+ void *operator new(size_t s) = delete;
+
+ static bool classof(const Instruction *) { return true; }
+ static bool classof(const ConstantExpr *) { return true; }
+ static bool classof(const Value *From) {
+ return llvm::Operator::classof(From->Val);
+ }
+ bool hasPoisonGeneratingFlags() const {
+ return cast<llvm::Operator>(Val)->hasPoisonGeneratingFlags();
+ }
+};
+
+class OverflowingBinaryOperator : public Operator {
+public:
+ bool hasNoUnsignedWrap() const {
+ return cast<llvm::OverflowingBinaryOperator>(Val)->hasNoUnsignedWrap();
+ }
+ bool hasNoSignedWrap() const {
+ return cast<llvm::OverflowingBinaryOperator>(Val)->hasNoSignedWrap();
+ }
+ unsigned getNoWrapKind() const {
+ return cast<llvm::OverflowingBinaryOperator>(Val)->getNoWrapKind();
+ }
+ static bool classof(const Instruction *From) {
+ return llvm::OverflowingBinaryOperator::classof(
+ cast<llvm::Instruction>(From->Val));
+ }
+ static bool classof(const ConstantExpr *From) {
+ return llvm::OverflowingBinaryOperator::classof(
+ cast<llvm::ConstantExpr>(From->Val));
+ }
+ static bool classof(const Value *From) {
+ return llvm::OverflowingBinaryOperator::classof(From->Val);
+ }
+};
+
+class FPMathOperator : public Operator {
+public:
+ bool isFast() const { return cast<llvm::FPMathOperator>(Val)->isFast(); }
+ bool hasAllowReassoc() const {
+ return cast<llvm::FPMathOperator>(Val)->hasAllowReassoc();
+ }
+ bool hasNoNaNs() const {
+ return cast<llvm::FPMathOperator>(Val)->hasNoNaNs();
+ }
+ bool hasNoInfs() const {
+ return cast<llvm::FPMathOperator>(Val)->hasNoInfs();
+ }
+ bool hasNoSignedZeros() const {
+ return cast<llvm::FPMathOperator>(Val)->hasNoSignedZeros();
+ }
+ bool hasAllowReciprocal() const {
+ return cast<llvm::FPMathOperator>(Val)->hasAllowReciprocal();
+ }
+ bool hasAllowContract() const {
+ return cast<llvm::FPMathOperator>(Val)->hasAllowContract();
+ }
+ bool hasApproxFunc() const {
+ return cast<llvm::FPMathOperator>(Val)->hasApproxFunc();
+ }
+ FastMathFlags getFastMathFlags() const {
+ return cast<llvm::FPMathOperator>(Val)->getFastMathFlags();
+ }
+ float getFPAccuracy() const {
+ return cast<llvm::FPMathOperator>(Val)->getFPAccuracy();
+ }
+ static bool isSupportedFloatingPointType(Type *Ty) {
+ return llvm::FPMathOperator::isSupportedFloatingPointType(Ty->LLVMTy);
+ }
+ static bool classof(const Value *V) {
+ return llvm::FPMathOperator::classof(V->Val);
+ }
+};
+
+} // namespace llvm::sandboxir
+
+#endif // LLVM_SANDBOXIR_OPERATOR_H
diff --git a/llvm/include/llvm/SandboxIR/Type.h b/llvm/include/llvm/SandboxIR/Type.h
index 8094f66..9d1db11 100644
--- a/llvm/include/llvm/SandboxIR/Type.h
+++ b/llvm/include/llvm/SandboxIR/Type.h
@@ -33,12 +33,13 @@ class ArrayType;
class StructType;
class TargetExtType;
class Module;
+class FPMathOperator;
#define DEF_INSTR(ID, OPCODE, CLASS) class CLASS;
#define DEF_CONST(ID, CLASS) class CLASS;
#include "llvm/SandboxIR/Values.def"
-/// Just like llvm::Type these are immutable, unique, never get freed and can
-/// only be created via static factory methods.
+/// Just like llvm::Type these are immutable, unique, never get freed and
+/// can only be created via static factory methods.
class Type {
protected:
llvm::Type *LLVMTy;
@@ -61,6 +62,7 @@ protected:
friend class Utils; // for LLVMTy
friend class TargetExtType; // For LLVMTy.
friend class Module; // For LLVMTy.
+ friend class FPMathOperator; // For LLVMTy.
// Friend all instruction classes because `create()` functions use LLVMTy.
#define DEF_INSTR(ID, OPCODE, CLASS) friend class CLASS;
diff --git a/llvm/include/llvm/SandboxIR/Value.h b/llvm/include/llvm/SandboxIR/Value.h
index 3509f2a..243195f 100644
--- a/llvm/include/llvm/SandboxIR/Value.h
+++ b/llvm/include/llvm/SandboxIR/Value.h
@@ -28,6 +28,9 @@ class Module;
class UnaryInstruction;
class CmpInst;
class IntrinsicInst;
+class Operator;
+class OverflowingBinaryOperator;
+class FPMathOperator;
/// Iterator for the `Use` edges of a Value's users.
/// \Returns a `Use` when dereferenced.
@@ -158,6 +161,9 @@ protected:
friend class Utils; // For `Val`.
friend class Module; // For `Val`.
friend class IntrinsicInst; // For `Val`.
+ friend class Operator; // For `Val`.
+ friend class OverflowingBinaryOperator; // For `Val`.
+ friend class FPMathOperator; // For `Val`.
// Region needs to manipulate metadata in the underlying LLVM Value, we don't
// expose metadata in sandboxir.
friend class Region;
diff --git a/llvm/include/llvm/Support/AutoConvert.h b/llvm/include/llvm/Support/AutoConvert.h
index 6f45c46..65ac576 100644
--- a/llvm/include/llvm/Support/AutoConvert.h
+++ b/llvm/include/llvm/Support/AutoConvert.h
@@ -1,4 +1,4 @@
-//===- AutoConvert.h - Auto conversion between ASCII/EBCDIC -----*- C++ -*-===//
+/*===- AutoConvert.h - Auto conversion between ASCII/EBCDIC -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -9,7 +9,7 @@
// This file contains functions used for auto conversion between
// ASCII/EBCDIC codepages specific to z/OS.
//
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===*/
#ifndef LLVM_SUPPORT_AUTOCONVERT_H
#define LLVM_SUPPORT_AUTOCONVERT_H
@@ -18,7 +18,7 @@
#include <_Ccsid.h>
#ifdef __cplusplus
#include <system_error>
-#endif // __cplusplus
+#endif /* __cplusplus */
#define CCSID_IBM_1047 1047
#define CCSID_UTF_8 1208
@@ -26,35 +26,37 @@
#ifdef __cplusplus
extern "C" {
-#endif // __cplusplus
+#endif /* __cplusplus */
int enablezOSAutoConversion(int FD);
int disablezOSAutoConversion(int FD);
int restorezOSStdHandleAutoConversion(int FD);
#ifdef __cplusplus
}
-#endif // __cplusplus
+#endif /* __cplusplus */
#ifdef __cplusplus
namespace llvm {
-/// \brief Disable the z/OS enhanced ASCII auto-conversion for the file
-/// descriptor.
+/** \brief Disable the z/OS enhanced ASCII auto-conversion for the file
+ * descriptor.
+ */
std::error_code disablezOSAutoConversion(int FD);
-/// \brief Query the z/OS enhanced ASCII auto-conversion status of a file
-/// descriptor and force the conversion if the file is not tagged with a
-/// codepage.
+/** \brief Query the z/OS enhanced ASCII auto-conversion status of a file
+ * descriptor and force the conversion if the file is not tagged with a
+ * codepage.
+ */
std::error_code enablezOSAutoConversion(int FD);
-/// Restore the z/OS enhanced ASCII auto-conversion for the std handle.
+/** Restore the z/OS enhanced ASCII auto-conversion for the std handle. */
std::error_code restorezOSStdHandleAutoConversion(int FD);
-/// \brief Set the tag information for a file descriptor.
+/** \brief Set the tag information for a file descriptor. */
std::error_code setzOSFileTag(int FD, int CCSID, bool Text);
-} // namespace llvm
-#endif // __cplusplus
+} /* namespace llvm */
+#endif /* __cplusplus */
-#endif // __MVS__
+#endif /* __MVS__ */
-#endif // LLVM_SUPPORT_AUTOCONVERT_H
+#endif /* LLVM_SUPPORT_AUTOCONVERT_H */
diff --git a/llvm/include/llvm/TableGen/Error.h b/llvm/include/llvm/TableGen/Error.h
index 512249b..b963dcb 100644
--- a/llvm/include/llvm/TableGen/Error.h
+++ b/llvm/include/llvm/TableGen/Error.h
@@ -49,8 +49,8 @@ void PrintError(const RecordVal *RecVal, const Twine &Msg);
[[noreturn]] void PrintFatalError(function_ref<void(raw_ostream &OS)> PrintMsg);
// Returns true if the assert failed.
-bool CheckAssert(SMLoc Loc, Init *Condition, Init *Message);
-void dumpMessage(SMLoc Loc, Init *Message);
+bool CheckAssert(SMLoc Loc, const Init *Condition, const Init *Message);
+void dumpMessage(SMLoc Loc, const Init *Message);
extern SourceMgr SrcMgr;
extern unsigned ErrorsPrinted;
diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h
index f856ff4..63267b7 100644
--- a/llvm/include/llvm/TableGen/Record.h
+++ b/llvm/include/llvm/TableGen/Record.h
@@ -374,25 +374,26 @@ public:
/// If this value is convertible to type \p Ty, return a value whose
/// type is \p Ty, generating a !cast operation if required.
/// Otherwise, return null.
- virtual Init *getCastTo(const RecTy *Ty) const = 0;
+ virtual const Init *getCastTo(const RecTy *Ty) const = 0;
/// Convert to a value whose type is \p Ty, or return null if this
/// is not possible. This can happen if the value's type is convertible
/// to \p Ty, but there are unresolved references.
- virtual Init *convertInitializerTo(const RecTy *Ty) const = 0;
+ virtual const Init *convertInitializerTo(const RecTy *Ty) const = 0;
/// This function is used to implement the bit range
/// selection operator. Given a value, it selects the specified bits,
/// returning them as a new \p Init of type \p bits. If it is not legal
/// to use the bit selection operator on this value, null is returned.
- virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
+ virtual const Init *
+ convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
return nullptr;
}
/// This function is used to implement the FieldInit class.
/// Implementors of this method should return the type of the named
/// field if they are of type record.
- virtual const RecTy *getFieldType(StringInit *FieldName) const {
+ virtual const RecTy *getFieldType(const StringInit *FieldName) const {
return nullptr;
}
@@ -400,12 +401,12 @@ public:
/// variables which may not be defined at the time the expression is formed.
/// If a value is set for the variable later, this method will be called on
/// users of the value to allow the value to propagate out.
- virtual Init *resolveReferences(Resolver &R) const {
+ virtual const Init *resolveReferences(Resolver &R) const {
return const_cast<Init *>(this);
}
/// Get the \p Init value of the specified bit.
- virtual Init *getBit(unsigned Bit) const = 0;
+ virtual const Init *getBit(unsigned Bit) const = 0;
};
inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
@@ -436,15 +437,16 @@ public:
/// Get the record keeper that initialized this Init.
RecordKeeper &getRecordKeeper() const { return ValueTy->getRecordKeeper(); }
- Init *getCastTo(const RecTy *Ty) const override;
- Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *getCastTo(const RecTy *Ty) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
- Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+ const Init *
+ convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
/// This method is used to implement the FieldInit class.
/// Implementors of this method should return the type of the named field if
/// they are of type record.
- const RecTy *getFieldType(StringInit *FieldName) const override;
+ const RecTy *getFieldType(const StringInit *FieldName) const override;
};
/// '?' - Represents an uninitialized value.
@@ -470,10 +472,10 @@ public:
/// Get the record keeper that initialized this Init.
RecordKeeper &getRecordKeeper() const { return RK; }
- Init *getCastTo(const RecTy *Ty) const override;
- Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *getCastTo(const RecTy *Ty) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
return const_cast<UnsetInit*>(this);
}
@@ -487,7 +489,7 @@ public:
};
// Represent an argument.
-using ArgAuxType = std::variant<unsigned, Init *>;
+using ArgAuxType = std::variant<unsigned, const Init *>;
class ArgumentInit : public Init, public FoldingSetNode {
public:
enum Kind {
@@ -496,11 +498,11 @@ public:
};
private:
- Init *Value;
+ const Init *Value;
ArgAuxType Aux;
protected:
- explicit ArgumentInit(Init *Value, ArgAuxType Aux)
+ explicit ArgumentInit(const Init *Value, ArgAuxType Aux)
: Init(IK_ArgumentInit), Value(Value), Aux(Aux) {}
public:
@@ -511,25 +513,27 @@ public:
RecordKeeper &getRecordKeeper() const { return Value->getRecordKeeper(); }
- static ArgumentInit *get(Init *Value, ArgAuxType Aux);
+ static const ArgumentInit *get(const Init *Value, ArgAuxType Aux);
bool isPositional() const { return Aux.index() == Positional; }
bool isNamed() const { return Aux.index() == Named; }
- Init *getValue() const { return Value; }
+ const Init *getValue() const { return Value; }
unsigned getIndex() const {
assert(isPositional() && "Should be positional!");
return std::get<Positional>(Aux);
}
- Init *getName() const {
+ const Init *getName() const {
assert(isNamed() && "Should be named!");
return std::get<Named>(Aux);
}
- ArgumentInit *cloneWithValue(Init *Value) const { return get(Value, Aux); }
+ const ArgumentInit *cloneWithValue(const Init *Value) const {
+ return get(Value, Aux);
+ }
void Profile(FoldingSetNodeID &ID) const;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
std::string getAsString() const override {
if (isPositional())
return utostr(getIndex()) + ": " + Value->getAsString();
@@ -541,11 +545,11 @@ public:
bool isComplete() const override { return false; }
bool isConcrete() const override { return false; }
- Init *getBit(unsigned Bit) const override { return Value->getBit(Bit); }
- Init *getCastTo(const RecTy *Ty) const override {
+ const Init *getBit(unsigned Bit) const override { return Value->getBit(Bit); }
+ const Init *getCastTo(const RecTy *Ty) const override {
return Value->getCastTo(Ty);
}
- Init *convertInitializerTo(const RecTy *Ty) const override {
+ const Init *convertInitializerTo(const RecTy *Ty) const override {
return Value->convertInitializerTo(Ty);
}
};
@@ -571,9 +575,9 @@ public:
bool getValue() const { return Value; }
- Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
assert(Bit < 1 && "Bit index out of range!");
return const_cast<BitInit*>(this);
}
@@ -584,8 +588,9 @@ public:
/// '{ a, b, c }' - Represents an initializer for a BitsRecTy value.
/// It contains a vector of bits, whose size is determined by the type.
-class BitsInit final : public TypedInit, public FoldingSetNode,
- public TrailingObjects<BitsInit, Init *> {
+class BitsInit final : public TypedInit,
+ public FoldingSetNode,
+ public TrailingObjects<BitsInit, const Init *> {
unsigned NumBits;
BitsInit(RecordKeeper &RK, unsigned N)
@@ -602,14 +607,15 @@ public:
return I->getKind() == IK_BitsInit;
}
- static BitsInit *get(RecordKeeper &RK, ArrayRef<Init *> Range);
+ static BitsInit *get(RecordKeeper &RK, ArrayRef<const Init *> Range);
void Profile(FoldingSetNodeID &ID) const;
unsigned getNumBits() const { return NumBits; }
- Init *convertInitializerTo(const RecTy *Ty) const override;
- Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *
+ convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
std::optional<int64_t> convertInitializerToInt() const;
bool isComplete() const override {
@@ -627,11 +633,11 @@ public:
bool isConcrete() const override;
std::string getAsString() const override;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
assert(Bit < NumBits && "Bit index out of range!");
- return getTrailingObjects<Init *>()[Bit];
+ return getTrailingObjects<const Init *>()[Bit];
}
};
@@ -654,13 +660,14 @@ public:
int64_t getValue() const { return Value; }
- Init *convertInitializerTo(const RecTy *Ty) const override;
- Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *
+ convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
bool isConcrete() const override { return true; }
std::string getAsString() const override;
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
return BitInit::get(getRecordKeeper(), (Value & (1ULL << Bit)) != 0);
}
};
@@ -684,13 +691,13 @@ public:
unsigned getValue() const { return Value; }
- StringInit *getNameInit() const;
+ const StringInit *getNameInit() const;
std::string getAsString() const override;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off string");
}
};
@@ -718,8 +725,8 @@ public:
return I->getKind() == IK_StringInit;
}
- static StringInit *get(RecordKeeper &RK, StringRef,
- StringFormat Fmt = SF_String);
+ static const StringInit *get(RecordKeeper &RK, StringRef,
+ StringFormat Fmt = SF_String);
static StringFormat determineFormat(StringFormat Fmt1, StringFormat Fmt2) {
return (Fmt1 == SF_Code || Fmt2 == SF_Code) ? SF_Code : SF_String;
@@ -729,7 +736,7 @@ public:
StringFormat getFormat() const { return Format; }
bool hasCodeFormat() const { return Format == SF_Code; }
- Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
bool isConcrete() const override { return true; }
@@ -744,19 +751,20 @@ public:
return std::string(Value);
}
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off string");
}
};
/// [AL, AH, CL] - Represent a list of defs
///
-class ListInit final : public TypedInit, public FoldingSetNode,
- public TrailingObjects<ListInit, Init *> {
+class ListInit final : public TypedInit,
+ public FoldingSetNode,
+ public TrailingObjects<ListInit, const Init *> {
unsigned NumValues;
public:
- using const_iterator = Init *const *;
+ using const_iterator = const Init *const *;
private:
explicit ListInit(unsigned N, const RecTy *EltTy)
@@ -772,13 +780,13 @@ public:
static bool classof(const Init *I) {
return I->getKind() == IK_ListInit;
}
- static ListInit *get(ArrayRef<Init *> Range, const RecTy *EltTy);
+ static const ListInit *get(ArrayRef<const Init *> Range, const RecTy *EltTy);
void Profile(FoldingSetNodeID &ID) const;
- Init *getElement(unsigned i) const {
+ const Init *getElement(unsigned i) const {
assert(i < NumValues && "List element index out of range!");
- return getTrailingObjects<Init *>()[i];
+ return getTrailingObjects<const Init *>()[i];
}
const RecTy *getElementType() const {
return cast<ListRecTy>(getType())->getElementType();
@@ -786,30 +794,30 @@ public:
const Record *getElementAsRecord(unsigned i) const;
- Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
/// This method is used by classes that refer to other
/// variables which may not be defined at the time they expression is formed.
/// If a value is set for the variable later, this method will be called on
/// users of the value to allow the value to propagate out.
///
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
bool isComplete() const override;
bool isConcrete() const override;
std::string getAsString() const override;
- ArrayRef<Init*> getValues() const {
- return ArrayRef(getTrailingObjects<Init *>(), NumValues);
+ ArrayRef<const Init *> getValues() const {
+ return ArrayRef(getTrailingObjects<const Init *>(), NumValues);
}
- const_iterator begin() const { return getTrailingObjects<Init *>(); }
+ const_iterator begin() const { return getTrailingObjects<const Init *>(); }
const_iterator end () const { return begin() + NumValues; }
size_t size () const { return NumValues; }
bool empty() const { return NumValues == 0; }
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off list");
}
};
@@ -831,12 +839,12 @@ public:
}
// Clone - Clone this operator, replacing arguments with the new list
- virtual OpInit *clone(ArrayRef<Init *> Operands) const = 0;
+ virtual const OpInit *clone(ArrayRef<const Init *> Operands) const = 0;
virtual unsigned getNumOperands() const = 0;
- virtual Init *getOperand(unsigned i) const = 0;
+ virtual const Init *getOperand(unsigned i) const = 0;
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
};
/// !op (X) - Transform an init.
@@ -859,9 +867,9 @@ public:
};
private:
- Init *LHS;
+ const Init *LHS;
- UnOpInit(UnaryOp opc, Init *lhs, const RecTy *Type)
+ UnOpInit(UnaryOp opc, const Init *lhs, const RecTy *Type)
: OpInit(IK_UnOpInit, Type, opc), LHS(lhs) {}
public:
@@ -872,12 +880,12 @@ public:
return I->getKind() == IK_UnOpInit;
}
- static UnOpInit *get(UnaryOp opc, Init *lhs, const RecTy *Type);
+ static const UnOpInit *get(UnaryOp opc, const Init *lhs, const RecTy *Type);
void Profile(FoldingSetNodeID &ID) const;
// Clone - Clone this operator, replacing arguments with the new list
- OpInit *clone(ArrayRef<Init *> Operands) const override {
+ const OpInit *clone(ArrayRef<const Init *> Operands) const override {
assert(Operands.size() == 1 &&
"Wrong number of operands for unary operation");
return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
@@ -885,19 +893,19 @@ public:
unsigned getNumOperands() const override { return 1; }
- Init *getOperand(unsigned i) const override {
+ const Init *getOperand(unsigned i) const override {
assert(i == 0 && "Invalid operand id for unary operator");
return getOperand();
}
UnaryOp getOpcode() const { return (UnaryOp)Opc; }
- Init *getOperand() const { return LHS; }
+ const Init *getOperand() const { return LHS; }
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, bool IsFinal = false) const;
+ const Init *Fold(const Record *CurRec, bool IsFinal = false) const;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
std::string getAsString() const override;
};
@@ -937,9 +945,9 @@ public:
};
private:
- Init *LHS, *RHS;
+ const Init *LHS, *RHS;
- BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, const RecTy *Type)
+ BinOpInit(BinaryOp opc, const Init *lhs, const Init *rhs, const RecTy *Type)
: OpInit(IK_BinOpInit, Type, opc), LHS(lhs), RHS(rhs) {}
public:
@@ -950,21 +958,22 @@ public:
return I->getKind() == IK_BinOpInit;
}
- static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs, const RecTy *Type);
- static Init *getStrConcat(Init *lhs, Init *rhs);
- static Init *getListConcat(TypedInit *lhs, Init *rhs);
+ static const BinOpInit *get(BinaryOp opc, const Init *lhs, const Init *rhs,
+ const RecTy *Type);
+ static const Init *getStrConcat(const Init *lhs, const Init *rhs);
+ static const Init *getListConcat(const TypedInit *lhs, const Init *rhs);
void Profile(FoldingSetNodeID &ID) const;
// Clone - Clone this operator, replacing arguments with the new list
- OpInit *clone(ArrayRef<Init *> Operands) const override {
+ const OpInit *clone(ArrayRef<const Init *> Operands) const override {
assert(Operands.size() == 2 &&
"Wrong number of operands for binary operation");
return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
}
unsigned getNumOperands() const override { return 2; }
- Init *getOperand(unsigned i) const override {
+ const Init *getOperand(unsigned i) const override {
switch (i) {
default: llvm_unreachable("Invalid operand id for binary operator");
case 0: return getLHS();
@@ -973,16 +982,17 @@ public:
}
BinaryOp getOpcode() const { return (BinaryOp)Opc; }
- Init *getLHS() const { return LHS; }
- Init *getRHS() const { return RHS; }
+ const Init *getLHS() const { return LHS; }
+ const Init *getRHS() const { return RHS; }
- std::optional<bool> CompareInit(unsigned Opc, Init *LHS, Init *RHS) const;
+ std::optional<bool> CompareInit(unsigned Opc, const Init *LHS,
+ const Init *RHS) const;
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec) const;
+ const Init *Fold(const Record *CurRec) const;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
std::string getAsString() const override;
};
@@ -1004,9 +1014,10 @@ public:
};
private:
- Init *LHS, *MHS, *RHS;
+ const Init *LHS, *MHS, *RHS;
- TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs, const RecTy *Type)
+ TernOpInit(TernaryOp opc, const Init *lhs, const Init *mhs, const Init *rhs,
+ const RecTy *Type)
: OpInit(IK_TernOpInit, Type, opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
public:
@@ -1017,13 +1028,13 @@ public:
return I->getKind() == IK_TernOpInit;
}
- static TernOpInit *get(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
- const RecTy *Type);
+ static const TernOpInit *get(TernaryOp opc, const Init *lhs, const Init *mhs,
+ const Init *rhs, const RecTy *Type);
void Profile(FoldingSetNodeID &ID) const;
// Clone - Clone this operator, replacing arguments with the new list
- OpInit *clone(ArrayRef<Init *> Operands) const override {
+ const OpInit *clone(ArrayRef<const Init *> Operands) const override {
assert(Operands.size() == 3 &&
"Wrong number of operands for ternary operation");
return TernOpInit::get(getOpcode(), Operands[0], Operands[1], Operands[2],
@@ -1031,7 +1042,7 @@ public:
}
unsigned getNumOperands() const override { return 3; }
- Init *getOperand(unsigned i) const override {
+ const Init *getOperand(unsigned i) const override {
switch (i) {
default: llvm_unreachable("Invalid operand id for ternary operator");
case 0: return getLHS();
@@ -1041,19 +1052,19 @@ public:
}
TernaryOp getOpcode() const { return (TernaryOp)Opc; }
- Init *getLHS() const { return LHS; }
- Init *getMHS() const { return MHS; }
- Init *getRHS() const { return RHS; }
+ const Init *getLHS() const { return LHS; }
+ const Init *getMHS() const { return MHS; }
+ const Init *getRHS() const { return RHS; }
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec) const;
+ const Init *Fold(const Record *CurRec) const;
bool isComplete() const override {
return LHS->isComplete() && MHS->isComplete() && RHS->isComplete();
}
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
std::string getAsString() const override;
};
@@ -1061,8 +1072,9 @@ public:
/// !cond(condition_1: value1, ... , condition_n: value)
/// Selects the first value for which condition is true.
/// Otherwise reports an error.
-class CondOpInit final : public TypedInit, public FoldingSetNode,
- public TrailingObjects<CondOpInit, Init *> {
+class CondOpInit final : public TypedInit,
+ public FoldingSetNode,
+ public TrailingObjects<CondOpInit, const Init *> {
unsigned NumConds;
const RecTy *ValType;
@@ -1081,8 +1093,8 @@ public:
return I->getKind() == IK_CondOpInit;
}
- static CondOpInit *get(ArrayRef<Init *> C, ArrayRef<Init *> V,
- const RecTy *Type);
+ static const CondOpInit *get(ArrayRef<const Init *> C,
+ ArrayRef<const Init *> V, const RecTy *Type);
void Profile(FoldingSetNodeID &ID) const;
@@ -1090,34 +1102,34 @@ public:
unsigned getNumConds() const { return NumConds; }
- Init *getCond(unsigned Num) const {
+ const Init *getCond(unsigned Num) const {
assert(Num < NumConds && "Condition number out of range!");
- return getTrailingObjects<Init *>()[Num];
+ return getTrailingObjects<const Init *>()[Num];
}
- Init *getVal(unsigned Num) const {
+ const Init *getVal(unsigned Num) const {
assert(Num < NumConds && "Val number out of range!");
- return getTrailingObjects<Init *>()[Num+NumConds];
+ return getTrailingObjects<const Init *>()[Num + NumConds];
}
- ArrayRef<Init *> getConds() const {
- return ArrayRef(getTrailingObjects<Init *>(), NumConds);
+ ArrayRef<const Init *> getConds() const {
+ return ArrayRef(getTrailingObjects<const Init *>(), NumConds);
}
- ArrayRef<Init *> getVals() const {
- return ArrayRef(getTrailingObjects<Init *>() + NumConds, NumConds);
+ ArrayRef<const Init *> getVals() const {
+ return ArrayRef(getTrailingObjects<const Init *>() + NumConds, NumConds);
}
- Init *Fold(Record *CurRec) const;
+ const Init *Fold(const Record *CurRec) const;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
bool isConcrete() const override;
bool isComplete() const override;
std::string getAsString() const override;
- using const_case_iterator = SmallVectorImpl<Init*>::const_iterator;
- using const_val_iterator = SmallVectorImpl<Init*>::const_iterator;
+ using const_case_iterator = SmallVectorImpl<const Init *>::const_iterator;
+ using const_val_iterator = SmallVectorImpl<const Init *>::const_iterator;
inline const_case_iterator arg_begin() const { return getConds().begin(); }
inline const_case_iterator arg_end () const { return getConds().end(); }
@@ -1131,20 +1143,16 @@ public:
inline size_t val_size () const { return NumConds; }
inline bool val_empty() const { return NumConds == 0; }
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
};
/// !foldl (a, b, expr, start, lst) - Fold over a list.
class FoldOpInit : public TypedInit, public FoldingSetNode {
private:
- Init *Start;
- Init *List;
- Init *A;
- Init *B;
- Init *Expr;
+ const Init *Start, *List, *A, *B, *Expr;
- FoldOpInit(Init *Start, Init *List, Init *A, Init *B, Init *Expr,
- const RecTy *Type)
+ FoldOpInit(const Init *Start, const Init *List, const Init *A, const Init *B,
+ const Init *Expr, const RecTy *Type)
: TypedInit(IK_FoldOpInit, Type), Start(Start), List(List), A(A), B(B),
Expr(Expr) {}
@@ -1154,20 +1162,21 @@ public:
static bool classof(const Init *I) { return I->getKind() == IK_FoldOpInit; }
- static FoldOpInit *get(Init *Start, Init *List, Init *A, Init *B, Init *Expr,
- const RecTy *Type);
+ static const FoldOpInit *get(const Init *Start, const Init *List,
+ const Init *A, const Init *B, const Init *Expr,
+ const RecTy *Type);
void Profile(FoldingSetNodeID &ID) const;
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec) const;
+ const Init *Fold(const Record *CurRec) const;
bool isComplete() const override { return false; }
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
std::string getAsString() const override;
};
@@ -1176,9 +1185,9 @@ public:
class IsAOpInit : public TypedInit, public FoldingSetNode {
private:
const RecTy *CheckType;
- Init *Expr;
+ const Init *Expr;
- IsAOpInit(const RecTy *CheckType, Init *Expr)
+ IsAOpInit(const RecTy *CheckType, const Init *Expr)
: TypedInit(IK_IsAOpInit, IntRecTy::get(CheckType->getRecordKeeper())),
CheckType(CheckType), Expr(Expr) {}
@@ -1188,19 +1197,19 @@ public:
static bool classof(const Init *I) { return I->getKind() == IK_IsAOpInit; }
- static IsAOpInit *get(const RecTy *CheckType, Init *Expr);
+ static const IsAOpInit *get(const RecTy *CheckType, const Init *Expr);
void Profile(FoldingSetNodeID &ID) const;
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold() const;
+ const Init *Fold() const;
bool isComplete() const override { return false; }
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
std::string getAsString() const override;
};
@@ -1210,9 +1219,9 @@ public:
class ExistsOpInit : public TypedInit, public FoldingSetNode {
private:
const RecTy *CheckType;
- Init *Expr;
+ const Init *Expr;
- ExistsOpInit(const RecTy *CheckType, Init *Expr)
+ ExistsOpInit(const RecTy *CheckType, const Init *Expr)
: TypedInit(IK_ExistsOpInit, IntRecTy::get(CheckType->getRecordKeeper())),
CheckType(CheckType), Expr(Expr) {}
@@ -1222,28 +1231,28 @@ public:
static bool classof(const Init *I) { return I->getKind() == IK_ExistsOpInit; }
- static ExistsOpInit *get(const RecTy *CheckType, Init *Expr);
+ static const ExistsOpInit *get(const RecTy *CheckType, const Init *Expr);
void Profile(FoldingSetNodeID &ID) const;
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, bool IsFinal = false) const;
+ const Init *Fold(const Record *CurRec, bool IsFinal = false) const;
bool isComplete() const override { return false; }
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
std::string getAsString() const override;
};
/// 'Opcode' - Represent a reference to an entire variable object.
class VarInit : public TypedInit {
- Init *VarName;
+ const Init *VarName;
- explicit VarInit(Init *VN, const RecTy *T)
+ explicit VarInit(const Init *VN, const RecTy *T)
: TypedInit(IK_VarInit, T), VarName(VN) {}
public:
@@ -1254,11 +1263,11 @@ public:
return I->getKind() == IK_VarInit;
}
- static VarInit *get(StringRef VN, const RecTy *T);
- static VarInit *get(Init *VN, const RecTy *T);
+ static const VarInit *get(StringRef VN, const RecTy *T);
+ static const VarInit *get(const Init *VN, const RecTy *T);
StringRef getName() const;
- Init *getNameInit() const { return VarName; }
+ const Init *getNameInit() const { return VarName; }
std::string getNameInitAsString() const {
return getNameInit()->getAsUnquotedString();
@@ -1269,19 +1278,19 @@ public:
/// If a value is set for the variable later, this method will be called on
/// users of the value to allow the value to propagate out.
///
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
std::string getAsString() const override { return std::string(getName()); }
};
/// Opcode{0} - Represent access to one bit of a variable or field.
class VarBitInit final : public TypedInit {
- TypedInit *TI;
+ const TypedInit *TI;
unsigned Bit;
- VarBitInit(TypedInit *T, unsigned B)
+ VarBitInit(const TypedInit *T, unsigned B)
: TypedInit(IK_VarBitInit, BitRecTy::get(T->getRecordKeeper())), TI(T),
Bit(B) {
assert(T->getType() &&
@@ -1299,15 +1308,15 @@ public:
return I->getKind() == IK_VarBitInit;
}
- static VarBitInit *get(TypedInit *T, unsigned B);
+ static const VarBitInit *get(const TypedInit *T, unsigned B);
- Init *getBitVar() const { return TI; }
+ const Init *getBitVar() const { return TI; }
unsigned getBitNum() const { return Bit; }
std::string getAsString() const override;
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
- Init *getBit(unsigned B) const override {
+ const Init *getBit(unsigned B) const override {
assert(B < 1 && "Bit index out of range!");
return const_cast<VarBitInit*>(this);
}
@@ -1329,33 +1338,34 @@ public:
return I->getKind() == IK_DefInit;
}
- Init *convertInitializerTo(const RecTy *Ty) const override;
+ const Init *convertInitializerTo(const RecTy *Ty) const override;
const Record *getDef() const { return Def; }
- const RecTy *getFieldType(StringInit *FieldName) const override;
+ const RecTy *getFieldType(const StringInit *FieldName) const override;
bool isConcrete() const override { return true; }
std::string getAsString() const override;
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off def");
}
};
/// classname<targs...> - Represent an uninstantiated anonymous class
/// instantiation.
-class VarDefInit final : public TypedInit,
- public FoldingSetNode,
- public TrailingObjects<VarDefInit, ArgumentInit *> {
+class VarDefInit final
+ : public TypedInit,
+ public FoldingSetNode,
+ public TrailingObjects<VarDefInit, const ArgumentInit *> {
SMLoc Loc;
Record *Class;
- DefInit *Def = nullptr; // after instantiation
+ const DefInit *Def = nullptr; // after instantiation
unsigned NumArgs;
explicit VarDefInit(SMLoc Loc, Record *Class, unsigned N);
- DefInit *instantiate();
+ const DefInit *instantiate();
public:
VarDefInit(const VarDefInit &) = delete;
@@ -1367,46 +1377,46 @@ public:
static bool classof(const Init *I) {
return I->getKind() == IK_VarDefInit;
}
- static VarDefInit *get(SMLoc Loc, Record *Class,
- ArrayRef<ArgumentInit *> Args);
+ static const VarDefInit *get(SMLoc Loc, Record *Class,
+ ArrayRef<const ArgumentInit *> Args);
void Profile(FoldingSetNodeID &ID) const;
- Init *resolveReferences(Resolver &R) const override;
- Init *Fold() const;
+ const Init *resolveReferences(Resolver &R) const override;
+ const Init *Fold() const;
std::string getAsString() const override;
- ArgumentInit *getArg(unsigned i) const {
+ const ArgumentInit *getArg(unsigned i) const {
assert(i < NumArgs && "Argument index out of range!");
- return getTrailingObjects<ArgumentInit *>()[i];
+ return getTrailingObjects<const ArgumentInit *>()[i];
}
- using const_iterator = ArgumentInit *const *;
+ using const_iterator = const ArgumentInit *const *;
const_iterator args_begin() const {
- return getTrailingObjects<ArgumentInit *>();
+ return getTrailingObjects<const ArgumentInit *>();
}
const_iterator args_end () const { return args_begin() + NumArgs; }
size_t args_size () const { return NumArgs; }
bool args_empty() const { return NumArgs == 0; }
- ArrayRef<ArgumentInit *> args() const {
+ ArrayRef<const ArgumentInit *> args() const {
return ArrayRef(args_begin(), NumArgs);
}
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off anonymous def");
}
};
/// X.Y - Represent a reference to a subfield of a variable
class FieldInit : public TypedInit {
- Init *Rec; // Record we are referring to
- StringInit *FieldName; // Field we are accessing
+ const Init *Rec; // Record we are referring to
+ const StringInit *FieldName; // Field we are accessing
- FieldInit(Init *R, StringInit *FN)
+ FieldInit(const Init *R, const StringInit *FN)
: TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
#ifndef NDEBUG
if (!getType()) {
@@ -1426,15 +1436,15 @@ public:
return I->getKind() == IK_FieldInit;
}
- static FieldInit *get(Init *R, StringInit *FN);
+ static const FieldInit *get(const Init *R, const StringInit *FN);
- Init *getRecord() const { return Rec; }
- StringInit *getFieldName() const { return FieldName; }
+ const Init *getRecord() const { return Rec; }
+ const StringInit *getFieldName() const { return FieldName; }
- Init *getBit(unsigned Bit) const override;
+ const Init *getBit(unsigned Bit) const override;
- Init *resolveReferences(Resolver &R) const override;
- Init *Fold(Record *CurRec) const;
+ const Init *resolveReferences(Resolver &R) const override;
+ const Init *Fold(const Record *CurRec) const;
bool isConcrete() const override;
std::string getAsString() const override {
@@ -1445,20 +1455,25 @@ public:
/// (v a, b) - Represent a DAG tree value. DAG inits are required
/// to have at least one value then a (possibly empty) list of arguments. Each
/// argument can have a name associated with it.
-class DagInit final : public TypedInit, public FoldingSetNode,
- public TrailingObjects<DagInit, Init *, StringInit *> {
+class DagInit final
+ : public TypedInit,
+ public FoldingSetNode,
+ public TrailingObjects<DagInit, const Init *, const StringInit *> {
friend TrailingObjects;
- Init *Val;
- StringInit *ValName;
+ const Init *Val;
+ const StringInit *ValName;
unsigned NumArgs;
unsigned NumArgNames;
- DagInit(Init *V, StringInit *VN, unsigned NumArgs, unsigned NumArgNames)
+ DagInit(const Init *V, const StringInit *VN, unsigned NumArgs,
+ unsigned NumArgNames)
: TypedInit(IK_DagInit, DagRecTy::get(V->getRecordKeeper())), Val(V),
ValName(VN), NumArgs(NumArgs), NumArgNames(NumArgNames) {}
- size_t numTrailingObjects(OverloadToken<Init *>) const { return NumArgs; }
+ size_t numTrailingObjects(OverloadToken<const Init *>) const {
+ return NumArgs;
+ }
public:
DagInit(const DagInit &) = delete;
@@ -1468,17 +1483,19 @@ public:
return I->getKind() == IK_DagInit;
}
- static DagInit *get(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
- ArrayRef<StringInit*> NameRange);
- static DagInit *get(Init *V, StringInit *VN,
- ArrayRef<std::pair<Init*, StringInit*>> Args);
+ static const DagInit *get(const Init *V, const StringInit *VN,
+ ArrayRef<const Init *> ArgRange,
+ ArrayRef<const StringInit *> NameRange);
+ static const DagInit *
+ get(const Init *V, const StringInit *VN,
+ ArrayRef<std::pair<const Init *, const StringInit *>> Args);
void Profile(FoldingSetNodeID &ID) const;
- Init *getOperator() const { return Val; }
+ const Init *getOperator() const { return Val; }
const Record *getOperatorAsDef(ArrayRef<SMLoc> Loc) const;
- StringInit *getName() const { return ValName; }
+ const StringInit *getName() const { return ValName; }
StringRef getNameStr() const {
return ValName ? ValName->getValue() : StringRef();
@@ -1486,40 +1503,41 @@ public:
unsigned getNumArgs() const { return NumArgs; }
- Init *getArg(unsigned Num) const {
+ const Init *getArg(unsigned Num) const {
assert(Num < NumArgs && "Arg number out of range!");
- return getTrailingObjects<Init *>()[Num];
+ return getTrailingObjects<const Init *>()[Num];
}
/// This method looks up the specified argument name and returns its argument
/// number or std::nullopt if that argument name does not exist.
std::optional<unsigned> getArgNo(StringRef Name) const;
- StringInit *getArgName(unsigned Num) const {
+ const StringInit *getArgName(unsigned Num) const {
assert(Num < NumArgNames && "Arg number out of range!");
- return getTrailingObjects<StringInit *>()[Num];
+ return getTrailingObjects<const StringInit *>()[Num];
}
StringRef getArgNameStr(unsigned Num) const {
- StringInit *Init = getArgName(Num);
+ const StringInit *Init = getArgName(Num);
return Init ? Init->getValue() : StringRef();
}
- ArrayRef<Init *> getArgs() const {
- return ArrayRef(getTrailingObjects<Init *>(), NumArgs);
+ ArrayRef<const Init *> getArgs() const {
+ return ArrayRef(getTrailingObjects<const Init *>(), NumArgs);
}
- ArrayRef<StringInit *> getArgNames() const {
- return ArrayRef(getTrailingObjects<StringInit *>(), NumArgNames);
+ ArrayRef<const StringInit *> getArgNames() const {
+ return ArrayRef(getTrailingObjects<const StringInit *>(), NumArgNames);
}
- Init *resolveReferences(Resolver &R) const override;
+ const Init *resolveReferences(Resolver &R) const override;
bool isConcrete() const override;
std::string getAsString() const override;
- using const_arg_iterator = SmallVectorImpl<Init*>::const_iterator;
- using const_name_iterator = SmallVectorImpl<StringInit*>::const_iterator;
+ using const_arg_iterator = SmallVectorImpl<const Init *>::const_iterator;
+ using const_name_iterator =
+ SmallVectorImpl<const StringInit *>::const_iterator;
inline const_arg_iterator arg_begin() const { return getArgs().begin(); }
inline const_arg_iterator arg_end () const { return getArgs().end(); }
@@ -1533,7 +1551,7 @@ public:
inline size_t name_size () const { return NumArgNames; }
inline bool name_empty() const { return NumArgNames == 0; }
- Init *getBit(unsigned Bit) const override {
+ const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off dag");
}
};
@@ -1555,18 +1573,18 @@ public:
};
private:
- Init *Name;
+ const Init *Name;
SMLoc Loc; // Source location of definition of name.
PointerIntPair<const RecTy *, 2, FieldKind> TyAndKind;
- Init *Value;
+ const Init *Value;
bool IsUsed = false;
/// Reference locations to this record value.
SmallVector<SMRange> ReferenceLocs;
public:
- RecordVal(Init *N, const RecTy *T, FieldKind K);
- RecordVal(Init *N, SMLoc Loc, const RecTy *T, FieldKind K);
+ RecordVal(const Init *N, const RecTy *T, FieldKind K);
+ RecordVal(const Init *N, SMLoc Loc, const RecTy *T, FieldKind K);
/// Get the record keeper used to unique this value.
RecordKeeper &getRecordKeeper() const { return Name->getRecordKeeper(); }
@@ -1575,7 +1593,7 @@ public:
StringRef getName() const;
/// Get the name of the field as an Init.
- Init *getNameInit() const { return Name; }
+ const Init *getNameInit() const { return Name; }
/// Get the name of the field as a std::string.
std::string getNameInitAsString() const {
@@ -1602,13 +1620,13 @@ public:
std::string getPrintType() const;
/// Get the value of the field as an Init.
- Init *getValue() const { return Value; }
+ const Init *getValue() const { return Value; }
/// Set the value of the field from an Init.
- bool setValue(Init *V);
+ bool setValue(const Init *V);
/// Set the value and source location of the field.
- bool setValue(Init *V, SMLoc NewLoc);
+ bool setValue(const Init *V, SMLoc NewLoc);
/// Add a reference to this record value.
void addReferenceLoc(SMRange Loc) { ReferenceLocs.push_back(Loc); }
@@ -1636,35 +1654,35 @@ class Record {
public:
struct AssertionInfo {
SMLoc Loc;
- Init *Condition;
- Init *Message;
+ const Init *Condition;
+ const Init *Message;
// User-defined constructor to support std::make_unique(). It can be
// removed in C++20 when braced initialization is supported.
- AssertionInfo(SMLoc Loc, Init *Condition, Init *Message)
+ AssertionInfo(SMLoc Loc, const Init *Condition, const Init *Message)
: Loc(Loc), Condition(Condition), Message(Message) {}
};
struct DumpInfo {
SMLoc Loc;
- Init *Message;
+ const Init *Message;
// User-defined constructor to support std::make_unique(). It can be
// removed in C++20 when braced initialization is supported.
- DumpInfo(SMLoc Loc, Init *Message) : Loc(Loc), Message(Message) {}
+ DumpInfo(SMLoc Loc, const Init *Message) : Loc(Loc), Message(Message) {}
};
enum RecordKind { RK_Def, RK_AnonymousDef, RK_Class, RK_MultiClass };
private:
- Init *Name;
+ const Init *Name;
// Location where record was instantiated, followed by the location of
// multiclass prototypes used, and finally by the locations of references to
// this record.
SmallVector<SMLoc, 4> Locs;
SmallVector<SMLoc, 0> ForwardDeclarationLocs;
mutable SmallVector<SMRange, 0> ReferenceLocs;
- SmallVector<Init *, 0> TemplateArgs;
+ SmallVector<const Init *, 0> TemplateArgs;
SmallVector<RecordVal, 0> Values;
SmallVector<AssertionInfo, 0> Assertions;
SmallVector<DumpInfo, 0> Dumps;
@@ -1688,7 +1706,7 @@ private:
public:
// Constructs a record.
- explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
+ explicit Record(const Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
RecordKind Kind = RK_Def)
: Name(N), Locs(locs), TrackedRecords(records),
ID(getNewUID(N->getRecordKeeper())), Kind(Kind) {
@@ -1714,15 +1732,13 @@ public:
StringRef getName() const { return cast<StringInit>(Name)->getValue(); }
- Init *getNameInit() const {
- return Name;
- }
+ const Init *getNameInit() const { return Name; }
std::string getNameInitAsString() const {
return getNameInit()->getAsUnquotedString();
}
- void setName(Init *Name); // Also updates RecordKeeper.
+ void setName(const Init *Name); // Also updates RecordKeeper.
ArrayRef<SMLoc> getLoc() const { return Locs; }
void appendLoc(SMLoc Loc) { Locs.push_back(Loc); }
@@ -1752,9 +1768,7 @@ public:
bool isAnonymous() const { return Kind == RK_AnonymousDef; }
- ArrayRef<Init *> getTemplateArgs() const {
- return TemplateArgs;
- }
+ ArrayRef<const Init *> getTemplateArgs() const { return TemplateArgs; }
ArrayRef<RecordVal> getValues() const { return Values; }
@@ -1771,7 +1785,7 @@ public:
/// Append the direct superclasses of this record to Classes.
void getDirectSuperClasses(SmallVectorImpl<const Record *> &Classes) const;
- bool isTemplateArg(Init *Name) const {
+ bool isTemplateArg(const Init *Name) const {
return llvm::is_contained(TemplateArgs, Name);
}
@@ -1795,7 +1809,7 @@ public:
static_cast<const Record *>(this)->getValue(Name));
}
- void addTemplateArg(Init *Name) {
+ void addTemplateArg(const Init *Name) {
assert(!isTemplateArg(Name) && "Template arg already defined!");
TemplateArgs.push_back(Name);
}
@@ -1805,7 +1819,7 @@ public:
Values.push_back(RV);
}
- void removeValue(Init *Name) {
+ void removeValue(const Init *Name) {
for (unsigned i = 0, e = Values.size(); i != e; ++i)
if (Values[i].getNameInit() == Name) {
Values.erase(Values.begin()+i);
@@ -1818,11 +1832,11 @@ public:
removeValue(StringInit::get(getRecords(), Name));
}
- void addAssertion(SMLoc Loc, Init *Condition, Init *Message) {
+ void addAssertion(SMLoc Loc, const Init *Condition, const Init *Message) {
Assertions.push_back(AssertionInfo(Loc, Condition, Message));
}
- void addDump(SMLoc Loc, Init *Message) {
+ void addDump(SMLoc Loc, const Init *Message) {
Dumps.push_back(DumpInfo(Loc, Message));
}
@@ -1867,7 +1881,7 @@ public:
///
/// This is a final resolve: any error messages, e.g. due to undefined !cast
/// references, are generated now.
- void resolveReferences(Init *NewName = nullptr);
+ void resolveReferences(const Init *NewName = nullptr);
/// Apply the resolver to the name of the record as well as to the
/// initializers of all fields of the record except SkipVal.
@@ -1891,7 +1905,7 @@ public:
/// Return the initializer for a value with the specified name, or throw an
/// exception if the field does not exist.
- Init *getValueInit(StringRef FieldName) const;
+ const Init *getValueInit(StringRef FieldName) const;
/// Return true if the named field is unset.
bool isValueUnset(StringRef FieldName) const {
@@ -1911,12 +1925,12 @@ public:
/// This method looks up the specified field and returns its value as a
/// BitsInit, throwing an exception if the field does not exist or if the
/// value is not the right type.
- BitsInit *getValueAsBitsInit(StringRef FieldName) const;
+ const BitsInit *getValueAsBitsInit(StringRef FieldName) const;
/// This method looks up the specified field and returns its value as a
/// ListInit, throwing an exception if the field does not exist or if the
/// value is not the right type.
- ListInit *getValueAsListInit(StringRef FieldName) const;
+ const ListInit *getValueAsListInit(StringRef FieldName) const;
/// This method looks up the specified field and returns its value as a
/// vector of records, throwing an exception if the field does not exist or
@@ -1961,14 +1975,14 @@ public:
/// This method looks up the specified field and returns its value as an Dag,
/// throwing an exception if the field does not exist or if the value is not
/// the right type.
- DagInit *getValueAsDag(StringRef FieldName) const;
+ const DagInit *getValueAsDag(StringRef FieldName) const;
};
raw_ostream &operator<<(raw_ostream &OS, const Record &R);
class RecordKeeper {
using RecordMap = std::map<std::string, std::unique_ptr<Record>, std::less<>>;
- using GlobalMap = std::map<std::string, Init *, std::less<>>;
+ using GlobalMap = std::map<std::string, const Init *, std::less<>>;
public:
RecordKeeper();
@@ -2002,7 +2016,7 @@ public:
}
/// Get the \p Init value of the specified global variable.
- Init *getGlobal(StringRef Name) const {
+ const Init *getGlobal(StringRef Name) const {
if (const Record *R = getDef(Name))
return R->getDefInit();
auto It = ExtraGlobals.find(Name);
@@ -2027,14 +2041,14 @@ public:
assert(Ins && "Record already exists");
}
- void addExtraGlobal(StringRef Name, Init *I) {
+ void addExtraGlobal(StringRef Name, const Init *I) {
bool Ins = ExtraGlobals.insert(std::make_pair(std::string(Name), I)).second;
(void)Ins;
assert(!getDef(Name));
assert(Ins && "Global already exists");
}
- Init *getNewAnonymousName();
+ const Init *getNewAnonymousName();
TGTimer &getTimer() const { return *Timer; }
@@ -2190,18 +2204,18 @@ raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);
/// Interface for looking up the initializer for a variable name, used by
/// Init::resolveReferences.
class Resolver {
- Record *CurRec;
+ const Record *CurRec;
bool IsFinal = false;
public:
- explicit Resolver(Record *CurRec) : CurRec(CurRec) {}
+ explicit Resolver(const Record *CurRec) : CurRec(CurRec) {}
virtual ~Resolver() = default;
- Record *getCurrentRecord() const { return CurRec; }
+ const Record *getCurrentRecord() const { return CurRec; }
/// Return the initializer for the given variable name (should normally be a
/// StringInit), or nullptr if the name could not be resolved.
- virtual Init *resolve(Init *VarName) = 0;
+ virtual const Init *resolve(const Init *VarName) = 0;
// Whether bits in a BitsInit should stay unresolved if resolving them would
// result in a ? (UnsetInit). This behavior is used to represent instruction
@@ -2219,19 +2233,19 @@ public:
/// Resolve arbitrary mappings.
class MapResolver final : public Resolver {
struct MappedValue {
- Init *V;
+ const Init *V;
bool Resolved;
MappedValue() : V(nullptr), Resolved(false) {}
- MappedValue(Init *V, bool Resolved) : V(V), Resolved(Resolved) {}
+ MappedValue(const Init *V, bool Resolved) : V(V), Resolved(Resolved) {}
};
- DenseMap<Init *, MappedValue> Map;
+ DenseMap<const Init *, MappedValue> Map;
public:
- explicit MapResolver(Record *CurRec = nullptr) : Resolver(CurRec) {}
+ explicit MapResolver(const Record *CurRec = nullptr) : Resolver(CurRec) {}
- void set(Init *Key, Init *Value) { Map[Key] = {Value, false}; }
+ void set(const Init *Key, const Init *Value) { Map[Key] = {Value, false}; }
bool isComplete(Init *VarName) const {
auto It = Map.find(VarName);
@@ -2239,21 +2253,21 @@ public:
return It->second.V->isComplete();
}
- Init *resolve(Init *VarName) override;
+ const Init *resolve(const Init *VarName) override;
};
/// Resolve all variables from a record except for unset variables.
class RecordResolver final : public Resolver {
- DenseMap<Init *, Init *> Cache;
- SmallVector<Init *, 4> Stack;
- Init *Name = nullptr;
+ DenseMap<const Init *, const Init *> Cache;
+ SmallVector<const Init *, 4> Stack;
+ const Init *Name = nullptr;
public:
- explicit RecordResolver(Record &R) : Resolver(&R) {}
+ explicit RecordResolver(const Record &R) : Resolver(&R) {}
- void setName(Init *NewName) { Name = NewName; }
+ void setName(const Init *NewName) { Name = NewName; }
- Init *resolve(Init *VarName) override;
+ const Init *resolve(const Init *VarName) override;
bool keepUnsetBits() const override { return true; }
};
@@ -2261,7 +2275,7 @@ public:
/// Delegate resolving to a sub-resolver, but shadow some variable names.
class ShadowResolver final : public Resolver {
Resolver &R;
- DenseSet<Init *> Shadowed;
+ DenseSet<const Init *> Shadowed;
public:
explicit ShadowResolver(Resolver &R)
@@ -2269,9 +2283,9 @@ public:
setFinal(R.isFinal());
}
- void addShadow(Init *Key) { Shadowed.insert(Key); }
+ void addShadow(const Init *Key) { Shadowed.insert(Key); }
- Init *resolve(Init *VarName) override {
+ const Init *resolve(const Init *VarName) override {
if (Shadowed.count(VarName))
return nullptr;
return R.resolve(VarName);
@@ -2290,22 +2304,22 @@ public:
bool foundUnresolved() const { return FoundUnresolved; }
- Init *resolve(Init *VarName) override;
+ const Init *resolve(const Init *VarName) override;
};
/// Do not resolve anything, but keep track of whether a given variable was
/// referenced.
class HasReferenceResolver final : public Resolver {
- Init *VarNameToTrack;
+ const Init *VarNameToTrack;
bool Found = false;
public:
- explicit HasReferenceResolver(Init *VarNameToTrack)
+ explicit HasReferenceResolver(const Init *VarNameToTrack)
: Resolver(nullptr), VarNameToTrack(VarNameToTrack) {}
bool found() const { return Found; }
- Init *resolve(Init *VarName) override;
+ const Init *resolve(const Init *VarName) override;
};
void EmitDetailedRecords(const RecordKeeper &RK, raw_ostream &OS);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 77cb437..d0373a7 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1791,20 +1791,24 @@ class integer_of_opcode<Instruction castOpcode> : GICombineRule <
def integer_of_truncate : integer_of_opcode<G_TRUNC>;
-def cast_combines: GICombineGroup<[
+def cast_of_cast_combines: GICombineGroup<[
truncate_of_zext,
truncate_of_sext,
truncate_of_anyext,
- select_of_zext,
- select_of_anyext,
- select_of_truncate,
zext_of_zext,
zext_of_anyext,
sext_of_sext,
sext_of_anyext,
anyext_of_anyext,
anyext_of_zext,
- anyext_of_sext,
+ anyext_of_sext
+]>;
+
+def cast_combines: GICombineGroup<[
+ cast_of_cast_combines,
+ select_of_zext,
+ select_of_anyext,
+ select_of_truncate,
buildvector_of_truncate,
narrow_binop_add,
narrow_binop_sub,
diff --git a/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
index 29d96a0..7364976 100644
--- a/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -118,7 +118,7 @@ public:
/// Rewrite debug value intrinsics to conform to a new SSA form.
///
- /// This will scout out all the debug value instrinsics associated with
+ /// This will scout out all the debug value intrinsics associated with
/// the instruction. Anything outside of its block will have its
/// value set to the new SSA value if available, and undef if not.
void UpdateDebugValues(Instruction *I);
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index da0fd1f..74df67a 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -82,7 +82,7 @@ static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
else
Element = C->getAggregateElement(i);
- if (Element && isa<UndefValue>(Element)) {
+ if (isa_and_nonnull<UndefValue>(Element)) {
Result <<= BitShift;
continue;
}
@@ -219,7 +219,7 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
Constant *Src = C->getAggregateElement(SrcElt++);
- if (Src && isa<UndefValue>(Src))
+ if (isa_and_nonnull<UndefValue>(Src))
Src = Constant::getNullValue(
cast<VectorType>(C->getType())->getElementType());
else
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 10ad470..42b04046 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1188,6 +1188,20 @@ std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
return ValueLatticeElement::getRange(*CR);
}
+ // a - b or ptrtoint(a) - ptrtoint(b) ==/!= 0 if a ==/!= b
+ Value *X, *Y;
+ if (ICI->isEquality() && match(Val, m_Sub(m_Value(X), m_Value(Y)))) {
+ // Peek through ptrtoints
+ match(X, m_PtrToIntSameSize(DL, m_Value(X)));
+ match(Y, m_PtrToIntSameSize(DL, m_Value(Y)));
+ if ((X == LHS && Y == RHS) || (X == RHS && Y == LHS)) {
+ Constant *NullVal = Constant::getNullValue(Val->getType());
+ if (EdgePred == ICmpInst::ICMP_EQ)
+ return ValueLatticeElement::get(NullVal);
+ return ValueLatticeElement::getNot(NullVal);
+ }
+ }
+
return ValueLatticeElement::getOverdefined();
}
diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index 1bd9ee6..004e8b7 100644
--- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -82,7 +82,7 @@ static cl::opt<std::string> ModuleSummaryDotFile(
cl::desc("File to emit dot graph of new summary into"));
static cl::opt<bool> EnableMemProfIndirectCallSupport(
- "enable-memprof-indirect-call-support", cl::init(true), cl::Hidden,
+ "enable-memprof-indirect-call-support", cl::init(false), cl::Hidden,
cl::desc(
"Enable MemProf support for summarizing and cloning indirect calls"));
@@ -503,6 +503,10 @@ static void computeFunctionSummary(
if (!IsThinLTO)
continue;
+ // Skip indirect calls if we haven't enabled memprof ICP.
+ if (!CalledFunction && !EnableMemProfIndirectCallSupport)
+ continue;
+
// Ensure we keep this analysis in sync with the handling in the ThinLTO
// backend (see MemProfContextDisambiguation::applyImport). Save this call
// so that we can skip it in checking the reverse case later.
@@ -561,7 +565,8 @@ static void computeFunctionSummary(
auto CalleeValueInfo =
Index.getOrInsertValueInfo(cast<GlobalValue>(CalledValue));
Callsites.push_back({CalleeValueInfo, StackIdIndices});
- } else if (EnableMemProfIndirectCallSupport) {
+ } else {
+ assert(EnableMemProfIndirectCallSupport);
// For indirect callsites, create multiple Callsites, one per target.
// This enables having a different set of clone versions per target,
// and we will apply the cloning decisions while speculatively
@@ -1223,6 +1228,9 @@ bool llvm::mayHaveMemprofSummary(const CallBase *CB) {
if (CI && CalledFunction->isIntrinsic())
return false;
} else {
+ // Skip indirect calls if we haven't enabled memprof ICP.
+ if (!EnableMemProfIndirectCallSupport)
+ return false;
// Skip inline assembly calls.
if (CI && CI->isInlineAsm())
return false;
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 1785d77..d9651d2 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -372,6 +372,8 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
TLI.setUnavailable(LibFunc_log2);
TLI.setUnavailable(LibFunc_log2f);
TLI.setAvailableWithName(LibFunc_logb, "_logb");
+ TLI.setUnavailable(LibFunc_ilogb);
+ TLI.setUnavailable(LibFunc_ilogbf);
if (hasPartialFloat)
TLI.setAvailableWithName(LibFunc_logbf, "_logbf");
else
@@ -398,6 +400,7 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
TLI.setUnavailable(LibFunc_log1pl);
TLI.setUnavailable(LibFunc_log2l);
TLI.setUnavailable(LibFunc_logbl);
+ TLI.setUnavailable(LibFunc_ilogbl);
TLI.setUnavailable(LibFunc_nearbyintl);
TLI.setUnavailable(LibFunc_rintl);
TLI.setUnavailable(LibFunc_roundl);
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 3a8cde7..327e7f7 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2849,7 +2849,7 @@ void AsmPrinter::emitJumpTableSizesSection(const MachineJumpTableInfo *MJTI,
if (isElf) {
MCSymbolELF *LinkedToSym = dyn_cast<MCSymbolELF>(CurrentFnSym);
- int Flags = F.hasComdat() ? ELF::SHF_GROUP : 0;
+ int Flags = F.hasComdat() ? (unsigned)ELF::SHF_GROUP : 0;
JumpTableSizesSection = OutContext.getELFSection(
sectionName, ELF::SHT_LLVM_JT_SIZES, Flags, 0, GroupName, F.hasComdat(),
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index d046467..563a826 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2378,7 +2378,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
case Intrinsic::stackprotector: {
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
Register GuardVal;
- if (TLI->useLoadStackGuardNode()) {
+ if (TLI->useLoadStackGuardNode(*CI.getModule())) {
GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal, MIRBuilder);
} else
@@ -3869,7 +3869,7 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
// If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
// Otherwise, emit a volatile load to retrieve the stack guard value.
- if (TLI->useLoadStackGuardNode()) {
+ if (TLI->useLoadStackGuardNode(*ParentBB->getBasicBlock()->getModule())) {
Guard =
MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
getStackGuard(Guard, *CurBuilder);
diff --git a/llvm/lib/CodeGen/MachineSSAUpdater.cpp b/llvm/lib/CodeGen/MachineSSAUpdater.cpp
index 4cbb6ad..c7a673b 100644
--- a/llvm/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/llvm/lib/CodeGen/MachineSSAUpdater.cpp
@@ -286,7 +286,7 @@ public:
bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
bool operator!=(const PHI_iterator& x) const { return !operator==(x); }
- unsigned getIncomingValue() { return PHI->getOperand(idx).getReg(); }
+ Register getIncomingValue() { return PHI->getOperand(idx).getReg(); }
MachineBasicBlock *getIncomingBlock() {
return PHI->getOperand(idx+1).getMBB();
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 18439b8..98eed6b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1205,13 +1205,13 @@ SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL,
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
- if (DAG.isConstantIntBuildVectorOrConstantInt(peekThroughBitcasts(N01))) {
+ if (DAG.isConstantIntBuildVectorOrConstantInt(N01)) {
SDNodeFlags NewFlags;
if (N0.getOpcode() == ISD::ADD && N0->getFlags().hasNoUnsignedWrap() &&
Flags.hasNoUnsignedWrap())
NewFlags.setNoUnsignedWrap(true);
- if (DAG.isConstantIntBuildVectorOrConstantInt(peekThroughBitcasts(N1))) {
+ if (DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
// Reassociate: (op (op x, c1), c2) -> (op x, (op c1, c2))
if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, {N01, N1}))
return DAG.getNode(Opc, DL, VT, N00, OpNode, NewFlags);
@@ -9931,10 +9931,10 @@ SDValue DAGCombiner::visitRotate(SDNode *N) {
// fold (rot* (rot* x, c2), c1)
// -> (rot* x, ((c1 % bitsize) +- (c2 % bitsize) + bitsize) % bitsize)
if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) {
- SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
- SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
- if (C1 && C2 && C1->getValueType(0) == C2->getValueType(0)) {
- EVT ShiftVT = C1->getValueType(0);
+ bool C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
+ bool C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
+ if (C1 && C2 && N1.getValueType() == N0.getOperand(1).getValueType()) {
+ EVT ShiftVT = N1.getValueType();
bool SameSide = (N->getOpcode() == NextOp);
unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT);
@@ -16805,8 +16805,8 @@ SDValue DAGCombiner::visitVP_FADD(SDNode *N) {
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- SDNode *N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
- SDNode *N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
+ bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
+ bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
@@ -16903,10 +16903,8 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
// of rounding steps.
if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
if (N0.getOpcode() == ISD::FMUL) {
- SDNode *CFP00 =
- DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
- SDNode *CFP01 =
- DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
+ bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
+ bool CFP01 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
// (fadd (fmul x, c), x) -> (fmul x, c+1)
if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
@@ -16926,10 +16924,8 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
}
if (N1.getOpcode() == ISD::FMUL) {
- SDNode *CFP10 =
- DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
- SDNode *CFP11 =
- DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
+ bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
+ bool CFP11 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
// (fadd x, (fmul x, c)) -> (fmul x, c+1)
if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
@@ -16949,8 +16945,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
}
if (N0.getOpcode() == ISD::FADD) {
- SDNode *CFP00 =
- DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
+ bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
// (fadd (fadd x, x), x) -> (fmul x, 3.0)
if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) &&
(N0.getOperand(0) == N1)) {
@@ -16960,8 +16955,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
}
if (N1.getOpcode() == ISD::FADD) {
- SDNode *CFP10 =
- DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
+ bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
// (fadd x, (fadd x, x)) -> (fmul x, 3.0)
if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
N1.getOperand(0) == N0) {
@@ -17374,11 +17368,9 @@ template <class MatchContextClass> SDValue DAGCombiner::visitFMA(SDNode *N) {
MatchContextClass matcher(DAG, TLI, N);
// Constant fold FMA.
- if (isa<ConstantFPSDNode>(N0) &&
- isa<ConstantFPSDNode>(N1) &&
- isa<ConstantFPSDNode>(N2)) {
- return matcher.getNode(ISD::FMA, DL, VT, N0, N1, N2);
- }
+ if (SDValue C =
+ DAG.FoldConstantArithmetic(N->getOpcode(), DL, VT, {N0, N1, N2}))
+ return C;
// (-N0 * -N1) + N2 --> (N0 * N1) + N2
TargetLowering::NegatibleCost CostN0 =
@@ -17406,14 +17398,14 @@ template <class MatchContextClass> SDValue DAGCombiner::visitFMA(SDNode *N) {
// FIXME: Support splat of constant.
if (N0CFP && N0CFP->isExactlyValue(1.0))
- return matcher.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
+ return matcher.getNode(ISD::FADD, DL, VT, N1, N2);
if (N1CFP && N1CFP->isExactlyValue(1.0))
- return matcher.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
+ return matcher.getNode(ISD::FADD, DL, VT, N0, N2);
// Canonicalize (fma c, x, y) -> (fma x, c, y)
if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
!DAG.isConstantFPBuildVectorOrConstantFP(N1))
- return matcher.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
+ return matcher.getNode(ISD::FMA, DL, VT, N1, N0, N2);
bool CanReassociate =
Options.UnsafeFPMath || N->getFlags().hasAllowReassociation();
@@ -17494,9 +17486,8 @@ SDValue DAGCombiner::visitFMAD(SDNode *N) {
SDLoc DL(N);
// Constant fold FMAD.
- if (isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1) &&
- isa<ConstantFPSDNode>(N2))
- return DAG.getNode(ISD::FMAD, DL, VT, N0, N1, N2);
+ if (SDValue C = DAG.FoldConstantArithmetic(ISD::FMAD, DL, VT, {N0, N1, N2}))
+ return C;
return SDValue();
}
@@ -17722,7 +17713,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize, CostN1);
if (NegN1 && (CostN0 == TargetLowering::NegatibleCost::Cheaper ||
CostN1 == TargetLowering::NegatibleCost::Cheaper))
- return DAG.getNode(ISD::FDIV, SDLoc(N), VT, NegN0, NegN1);
+ return DAG.getNode(ISD::FDIV, DL, VT, NegN0, NegN1);
}
if (SDValue R = combineFMulOrFDivWithIntPow2(N))
@@ -18156,8 +18147,9 @@ SDValue DAGCombiner::visitXROUND(SDNode *N) {
// fold (lrint|llrint c1fp) -> c1
// fold (lround|llround c1fp) -> c1
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
- return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N0);
+ if (SDValue C =
+ DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, {N0}))
+ return C;
return SDValue();
}
@@ -18166,10 +18158,10 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
+ SDLoc DL(N);
// fold (fp_round c1fp) -> c1fp
- if (SDValue C =
- DAG.FoldConstantArithmetic(ISD::FP_ROUND, SDLoc(N), VT, {N0, N1}))
+ if (SDValue C = DAG.FoldConstantArithmetic(ISD::FP_ROUND, DL, VT, {N0, N1}))
return C;
// fold (fp_round (fp_extend x)) -> x
@@ -18200,12 +18192,10 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
// single-step fp_round we want to fold to.
// In other words, double rounding isn't the same as rounding.
// Also, this is a value preserving truncation iff both fp_round's are.
- if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) {
- SDLoc DL(N);
+ if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc)
return DAG.getNode(
ISD::FP_ROUND, DL, VT, N0.getOperand(0),
DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL, /*isTarget=*/true));
- }
}
// fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
@@ -18219,8 +18209,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
N0.getOperand(0), N1);
AddToWorklist(Tmp.getNode());
- return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
- Tmp, N0.getOperand(1));
+ return DAG.getNode(ISD::FCOPYSIGN, DL, VT, Tmp, N0.getOperand(1));
}
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
@@ -18232,42 +18221,40 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
+ SDLoc DL(N);
if (VT.isVector())
- if (SDValue FoldedVOp = SimplifyVCastOp(N, SDLoc(N)))
+ if (SDValue FoldedVOp = SimplifyVCastOp(N, DL))
return FoldedVOp;
// If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
- if (N->hasOneUse() &&
- N->use_begin()->getOpcode() == ISD::FP_ROUND)
+ if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND)
return SDValue();
// fold (fp_extend c1fp) -> c1fp
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
- return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
+ if (SDValue C = DAG.FoldConstantArithmetic(ISD::FP_EXTEND, DL, VT, {N0}))
+ return C;
// fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
if (N0.getOpcode() == ISD::FP16_TO_FP &&
TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal)
- return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0));
+ return DAG.getNode(ISD::FP16_TO_FP, DL, VT, N0.getOperand(0));
// Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
// value of X.
- if (N0.getOpcode() == ISD::FP_ROUND
- && N0.getConstantOperandVal(1) == 1) {
+ if (N0.getOpcode() == ISD::FP_ROUND && N0.getConstantOperandVal(1) == 1) {
SDValue In = N0.getOperand(0);
if (In.getValueType() == VT) return In;
if (VT.bitsLT(In.getValueType()))
- return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
- In, N0.getOperand(1));
- return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
+ return DAG.getNode(ISD::FP_ROUND, DL, VT, In, N0.getOperand(1));
+ return DAG.getNode(ISD::FP_EXTEND, DL, VT, In);
}
// fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
TLI.isLoadExtLegalOrCustom(ISD::EXTLOAD, VT, N0.getValueType())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT,
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
@@ -18433,10 +18420,11 @@ SDValue DAGCombiner::visitFMinMax(SDNode *N) {
SDValue DAGCombiner::visitFABS(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
+ SDLoc DL(N);
// fold (fabs c1) -> fabs(c1)
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
- return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
+ if (SDValue C = DAG.FoldConstantArithmetic(ISD::FABS, DL, VT, {N0}))
+ return C;
// fold (fabs (fabs x)) -> (fabs x)
if (N0.getOpcode() == ISD::FABS)
@@ -18445,7 +18433,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
// fold (fabs (fneg x)) -> (fabs x)
// fold (fabs (fcopysign x, y)) -> (fabs x)
if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
- return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
+ return DAG.getNode(ISD::FABS, DL, VT, N0.getOperand(0));
if (SDValue Cast = foldSignChangeInBitcast(N))
return Cast;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 66c078b..43d4967 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6962,10 +6962,10 @@ void SelectionDAG::canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1,
// Canonicalize:
// binop(const, nonconst) -> binop(nonconst, const)
- SDNode *N1C = isConstantIntBuildVectorOrConstantInt(N1);
- SDNode *N2C = isConstantIntBuildVectorOrConstantInt(N2);
- SDNode *N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
- SDNode *N2CFP = isConstantFPBuildVectorOrConstantFP(N2);
+ bool N1C = isConstantIntBuildVectorOrConstantInt(N1);
+ bool N2C = isConstantIntBuildVectorOrConstantInt(N2);
+ bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
+ bool N2CFP = isConstantFPBuildVectorOrConstantFP(N2);
if ((N1C && !N2C) || (N1CFP && !N2CFP))
std::swap(N1, N2);
@@ -13197,39 +13197,44 @@ bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
return true;
}
-// Returns the SDNode if it is a constant integer BuildVector
-// or constant integer.
-SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
- if (isa<ConstantSDNode>(N))
- return N.getNode();
+// Returns true if it is a constant integer BuildVector or constant integer,
+// possibly hidden by a bitcast.
+bool SelectionDAG::isConstantIntBuildVectorOrConstantInt(
+ SDValue N, bool AllowOpaques) const {
+ N = peekThroughBitcasts(N);
+
+ if (auto *C = dyn_cast<ConstantSDNode>(N))
+ return AllowOpaques || !C->isOpaque();
+
if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
- return N.getNode();
+ return true;
+
// Treat a GlobalAddress supporting constant offset folding as a
// constant integer.
- if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
+ if (auto *GA = dyn_cast<GlobalAddressSDNode>(N))
if (GA->getOpcode() == ISD::GlobalAddress &&
TLI->isOffsetFoldingLegal(GA))
- return GA;
+ return true;
+
if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
isa<ConstantSDNode>(N.getOperand(0)))
- return N.getNode();
- return nullptr;
+ return true;
+ return false;
}
-// Returns the SDNode if it is a constant float BuildVector
-// or constant float.
-SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
+// Returns true if it is a constant float BuildVector or constant float.
+bool SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
if (isa<ConstantFPSDNode>(N))
- return N.getNode();
+ return true;
if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
- return N.getNode();
+ return true;
if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
isa<ConstantFPSDNode>(N.getOperand(0)))
- return N.getNode();
+ return true;
- return nullptr;
+ return false;
}
std::optional<bool> SelectionDAG::isBoolConstant(SDValue N,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3e13364..8450553 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3138,7 +3138,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
// If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
// Otherwise, emit a volatile load to retrieve the stack guard value.
SDValue Chain = DAG.getEntryNode();
- if (TLI.useLoadStackGuardNode()) {
+ if (TLI.useLoadStackGuardNode(M)) {
Guard = getLoadStackGuard(DAG, dl, Chain);
} else {
const Value *IRGuard = TLI.getSDagStackGuard(M);
@@ -7349,7 +7349,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
const Module &M = *MF.getFunction().getParent();
EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
SDValue Chain = getRoot();
- if (TLI.useLoadStackGuardNode()) {
+ if (TLI.useLoadStackGuardNode(M)) {
Res = getLoadStackGuard(DAG, sdl, Chain);
Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
} else {
@@ -7369,9 +7369,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// Emit code into the DAG to store the stack guard onto the stack.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
+ const Module &M = *MF.getFunction().getParent();
SDValue Src, Chain = getRoot();
- if (TLI.useLoadStackGuardNode())
+ if (TLI.useLoadStackGuardNode(M))
Src = getLoadStackGuard(DAG, sdl, Chain);
else
Src = getValue(I.getArgOperand(0)); // The guard's value.
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index ebad350..60ab33b 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -80,6 +80,7 @@
#include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
#include "llvm/CodeGen/CallBrPrepare.h"
#include "llvm/CodeGen/CodeGenPrepare.h"
+#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/DeadMachineInstructionElim.h"
#include "llvm/CodeGen/DwarfEHPrepare.h"
#include "llvm/CodeGen/EarlyIfConversion.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index 90859c1..549c135 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -339,6 +339,7 @@ FUNCTION_PASS("callbr-prepare", CallBrPreparePass())
FUNCTION_PASS("callsite-splitting", CallSiteSplittingPass())
FUNCTION_PASS("chr", ControlHeightReductionPass())
FUNCTION_PASS("codegenprepare", CodeGenPreparePass(TM))
+FUNCTION_PASS("complex-deinterleaving", ComplexDeinterleavingPass(TM))
FUNCTION_PASS("consthoist", ConstantHoistingPass())
FUNCTION_PASS("constraint-elimination", ConstraintEliminationPass())
FUNCTION_PASS("coro-elide", CoroElidePass())
diff --git a/llvm/lib/TableGen/DetailedRecordsBackend.cpp b/llvm/lib/TableGen/DetailedRecordsBackend.cpp
index 61fd363..4a337248 100644
--- a/llvm/lib/TableGen/DetailedRecordsBackend.cpp
+++ b/llvm/lib/TableGen/DetailedRecordsBackend.cpp
@@ -131,7 +131,7 @@ void DetailedRecordsEmitter::printDefms(const Record &Rec, raw_ostream &OS) {
// Print the template arguments of a class.
void DetailedRecordsEmitter::printTemplateArgs(const Record &Rec,
raw_ostream &OS) {
- ArrayRef<Init *> Args = Rec.getTemplateArgs();
+ ArrayRef<const Init *> Args = Rec.getTemplateArgs();
if (Args.empty()) {
OS << " Template args: (none)\n";
return;
diff --git a/llvm/lib/TableGen/Error.cpp b/llvm/lib/TableGen/Error.cpp
index 6d1d581..9142366 100644
--- a/llvm/lib/TableGen/Error.cpp
+++ b/llvm/lib/TableGen/Error.cpp
@@ -160,7 +160,7 @@ void PrintFatalError(const RecordVal *RecVal, const Twine &Msg) {
// Check an assertion: Obtain the condition value and be sure it is true.
// If not, print a nonfatal error along with the message.
-bool CheckAssert(SMLoc Loc, Init *Condition, Init *Message) {
+bool CheckAssert(SMLoc Loc, const Init *Condition, const Init *Message) {
auto *CondValue = dyn_cast_or_null<IntInit>(Condition->convertInitializerTo(
IntRecTy::get(Condition->getRecordKeeper())));
if (!CondValue) {
@@ -178,7 +178,7 @@ bool CheckAssert(SMLoc Loc, Init *Condition, Init *Message) {
}
// Dump a message to stderr.
-void dumpMessage(SMLoc Loc, Init *Message) {
+void dumpMessage(SMLoc Loc, const Init *Message) {
if (auto *MessageInit = dyn_cast<StringInit>(Message))
PrintNote(Loc, MessageInit->getValue());
else
diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index 447ecb7..f8ea883 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -74,8 +74,8 @@ struct RecordKeeperImpl {
FoldingSet<ArgumentInit> TheArgumentInitPool;
FoldingSet<BitsInit> TheBitsInitPool;
std::map<int64_t, IntInit *> TheIntInitPool;
- StringMap<StringInit *, BumpPtrAllocator &> StringInitStringPool;
- StringMap<StringInit *, BumpPtrAllocator &> StringInitCodePool;
+ StringMap<const StringInit *, BumpPtrAllocator &> StringInitStringPool;
+ StringMap<const StringInit *, BumpPtrAllocator &> StringInitCodePool;
FoldingSet<ListInit> TheListInitPool;
FoldingSet<UnOpInit> TheUnOpInitPool;
FoldingSet<BinOpInit> TheBinOpInitPool;
@@ -83,10 +83,12 @@ struct RecordKeeperImpl {
FoldingSet<FoldOpInit> TheFoldOpInitPool;
FoldingSet<IsAOpInit> TheIsAOpInitPool;
FoldingSet<ExistsOpInit> TheExistsOpInitPool;
- DenseMap<std::pair<const RecTy *, Init *>, VarInit *> TheVarInitPool;
- DenseMap<std::pair<TypedInit *, unsigned>, VarBitInit *> TheVarBitInitPool;
+ DenseMap<std::pair<const RecTy *, const Init *>, VarInit *> TheVarInitPool;
+ DenseMap<std::pair<const TypedInit *, unsigned>, VarBitInit *>
+ TheVarBitInitPool;
FoldingSet<VarDefInit> TheVarDefInitPool;
- DenseMap<std::pair<Init *, StringInit *>, FieldInit *> TheFieldInitPool;
+ DenseMap<std::pair<const Init *, const StringInit *>, FieldInit *>
+ TheFieldInitPool;
FoldingSet<CondOpInit> TheCondOpInitPool;
FoldingSet<DagInit> TheDagInitPool;
FoldingSet<RecordRecTy> RecordTypePool;
@@ -389,15 +391,13 @@ UnsetInit *UnsetInit::get(RecordKeeper &RK) {
return &RK.getImpl().TheUnsetInit;
}
-Init *UnsetInit::getCastTo(const RecTy *Ty) const {
- return const_cast<UnsetInit *>(this);
-}
+const Init *UnsetInit::getCastTo(const RecTy *Ty) const { return this; }
-Init *UnsetInit::convertInitializerTo(const RecTy *Ty) const {
- return const_cast<UnsetInit *>(this);
+const Init *UnsetInit::convertInitializerTo(const RecTy *Ty) const {
+ return this;
}
-static void ProfileArgumentInit(FoldingSetNodeID &ID, Init *Value,
+static void ProfileArgumentInit(FoldingSetNodeID &ID, const Init *Value,
ArgAuxType Aux) {
auto I = Aux.index();
ID.AddInteger(I);
@@ -412,14 +412,15 @@ void ArgumentInit::Profile(FoldingSetNodeID &ID) const {
ProfileArgumentInit(ID, Value, Aux);
}
-ArgumentInit *ArgumentInit::get(Init *Value, ArgAuxType Aux) {
+const ArgumentInit *ArgumentInit::get(const Init *Value, ArgAuxType Aux) {
FoldingSetNodeID ID;
ProfileArgumentInit(ID, Value, Aux);
RecordKeeper &RK = Value->getRecordKeeper();
detail::RecordKeeperImpl &RKImpl = RK.getImpl();
void *IP = nullptr;
- if (ArgumentInit *I = RKImpl.TheArgumentInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const ArgumentInit *I =
+ RKImpl.TheArgumentInitPool.FindNodeOrInsertPos(ID, IP))
return I;
ArgumentInit *I = new (RKImpl.Allocator) ArgumentInit(Value, Aux);
@@ -427,8 +428,8 @@ ArgumentInit *ArgumentInit::get(Init *Value, ArgAuxType Aux) {
return I;
}
-Init *ArgumentInit::resolveReferences(Resolver &R) const {
- Init *NewValue = Value->resolveReferences(R);
+const Init *ArgumentInit::resolveReferences(Resolver &R) const {
+ const Init *NewValue = Value->resolveReferences(R);
if (NewValue != Value)
return cloneWithValue(NewValue);
@@ -439,7 +440,7 @@ BitInit *BitInit::get(RecordKeeper &RK, bool V) {
return V ? &RK.getImpl().TrueBitInit : &RK.getImpl().FalseBitInit;
}
-Init *BitInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *BitInit::convertInitializerTo(const RecTy *Ty) const {
if (isa<BitRecTy>(Ty))
return const_cast<BitInit *>(this);
@@ -455,15 +456,15 @@ Init *BitInit::convertInitializerTo(const RecTy *Ty) const {
return nullptr;
}
-static void
-ProfileBitsInit(FoldingSetNodeID &ID, ArrayRef<Init *> Range) {
+static void ProfileBitsInit(FoldingSetNodeID &ID,
+ ArrayRef<const Init *> Range) {
ID.AddInteger(Range.size());
- for (Init *I : Range)
+ for (const Init *I : Range)
ID.AddPointer(I);
}
-BitsInit *BitsInit::get(RecordKeeper &RK, ArrayRef<Init *> Range) {
+BitsInit *BitsInit::get(RecordKeeper &RK, ArrayRef<const Init *> Range) {
FoldingSetNodeID ID;
ProfileBitsInit(ID, Range);
@@ -472,20 +473,20 @@ BitsInit *BitsInit::get(RecordKeeper &RK, ArrayRef<Init *> Range) {
if (BitsInit *I = RKImpl.TheBitsInitPool.FindNodeOrInsertPos(ID, IP))
return I;
- void *Mem = RKImpl.Allocator.Allocate(totalSizeToAlloc<Init *>(Range.size()),
- alignof(BitsInit));
+ void *Mem = RKImpl.Allocator.Allocate(
+ totalSizeToAlloc<const Init *>(Range.size()), alignof(BitsInit));
BitsInit *I = new (Mem) BitsInit(RK, Range.size());
std::uninitialized_copy(Range.begin(), Range.end(),
- I->getTrailingObjects<Init *>());
+ I->getTrailingObjects<const Init *>());
RKImpl.TheBitsInitPool.InsertNode(I, IP);
return I;
}
void BitsInit::Profile(FoldingSetNodeID &ID) const {
- ProfileBitsInit(ID, ArrayRef(getTrailingObjects<Init *>(), NumBits));
+ ProfileBitsInit(ID, ArrayRef(getTrailingObjects<const Init *>(), NumBits));
}
-Init *BitsInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *BitsInit::convertInitializerTo(const RecTy *Ty) const {
if (isa<BitRecTy>(Ty)) {
if (getNumBits() != 1) return nullptr; // Only accept if just one bit!
return getBit(0);
@@ -517,9 +518,9 @@ std::optional<int64_t> BitsInit::convertInitializerToInt() const {
return Result;
}
-Init *
+const Init *
BitsInit::convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
- SmallVector<Init *, 16> NewBits(Bits.size());
+ SmallVector<const Init *, 16> NewBits(Bits.size());
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Bits[i] >= getNumBits())
@@ -541,7 +542,7 @@ std::string BitsInit::getAsString() const {
std::string Result = "{ ";
for (unsigned i = 0, e = getNumBits(); i != e; ++i) {
if (i) Result += ", ";
- if (Init *Bit = getBit(e-i-1))
+ if (const Init *Bit = getBit(e - i - 1))
Result += Bit->getAsString();
else
Result += "*";
@@ -551,18 +552,18 @@ std::string BitsInit::getAsString() const {
// resolveReferences - If there are any field references that refer to fields
// that have been filled in, we can propagate the values now.
-Init *BitsInit::resolveReferences(Resolver &R) const {
+const Init *BitsInit::resolveReferences(Resolver &R) const {
bool Changed = false;
- SmallVector<Init *, 16> NewBits(getNumBits());
+ SmallVector<const Init *, 16> NewBits(getNumBits());
- Init *CachedBitVarRef = nullptr;
- Init *CachedBitVarResolved = nullptr;
+ const Init *CachedBitVarRef = nullptr;
+ const Init *CachedBitVarResolved = nullptr;
for (unsigned i = 0, e = getNumBits(); i != e; ++i) {
- Init *CurBit = getBit(i);
- Init *NewBit = CurBit;
+ const Init *CurBit = getBit(i);
+ const Init *NewBit = CurBit;
- if (VarBitInit *CurBitVar = dyn_cast<VarBitInit>(CurBit)) {
+ if (const VarBitInit *CurBitVar = dyn_cast<VarBitInit>(CurBit)) {
if (CurBitVar->getBitVar() != CachedBitVarRef) {
CachedBitVarRef = CurBitVar->getBitVar();
CachedBitVarResolved = CachedBitVarRef->resolveReferences(R);
@@ -583,7 +584,7 @@ Init *BitsInit::resolveReferences(Resolver &R) const {
if (Changed)
return BitsInit::get(getRecordKeeper(), NewBits);
- return const_cast<BitsInit *>(this);
+ return this;
}
IntInit *IntInit::get(RecordKeeper &RK, int64_t V) {
@@ -603,7 +604,7 @@ static bool canFitInBitfield(int64_t Value, unsigned NumBits) {
(Value >> NumBits == 0) || (Value >> (NumBits-1) == -1);
}
-Init *IntInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *IntInit::convertInitializerTo(const RecTy *Ty) const {
if (isa<IntRecTy>(Ty))
return const_cast<IntInit *>(this);
@@ -619,7 +620,7 @@ Init *IntInit::convertInitializerTo(const RecTy *Ty) const {
if (!canFitInBitfield(Value, BRT->getNumBits()))
return nullptr;
- SmallVector<Init *, 16> NewBits(BRT->getNumBits());
+ SmallVector<const Init *, 16> NewBits(BRT->getNumBits());
for (unsigned i = 0; i != BRT->getNumBits(); ++i)
NewBits[i] =
BitInit::get(getRecordKeeper(), Value & ((i < 64) ? (1LL << i) : 0));
@@ -630,9 +631,8 @@ Init *IntInit::convertInitializerTo(const RecTy *Ty) const {
return nullptr;
}
-Init *
-IntInit::convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
- SmallVector<Init *, 16> NewBits(Bits.size());
+const Init *IntInit::convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
+ SmallVector<const Init *, 16> NewBits(Bits.size());
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Bits[i] >= 64)
@@ -648,7 +648,7 @@ AnonymousNameInit *AnonymousNameInit::get(RecordKeeper &RK, unsigned V) {
return new (RK.getImpl().Allocator) AnonymousNameInit(RK, V);
}
-StringInit *AnonymousNameInit::getNameInit() const {
+const StringInit *AnonymousNameInit::getNameInit() const {
return StringInit::get(getRecordKeeper(), getAsString());
}
@@ -656,7 +656,7 @@ std::string AnonymousNameInit::getAsString() const {
return "anonymous_" + utostr(Value);
}
-Init *AnonymousNameInit::resolveReferences(Resolver &R) const {
+const Init *AnonymousNameInit::resolveReferences(Resolver &R) const {
auto *Old = const_cast<Init *>(static_cast<const Init *>(this));
auto *New = R.resolve(Old);
New = New ? New : Old;
@@ -666,7 +666,8 @@ Init *AnonymousNameInit::resolveReferences(Resolver &R) const {
return New;
}
-StringInit *StringInit::get(RecordKeeper &RK, StringRef V, StringFormat Fmt) {
+const StringInit *StringInit::get(RecordKeeper &RK, StringRef V,
+ StringFormat Fmt) {
detail::RecordKeeperImpl &RKImpl = RK.getImpl();
auto &InitMap = Fmt == SF_String ? RKImpl.StringInitStringPool
: RKImpl.StringInitCodePool;
@@ -676,39 +677,40 @@ StringInit *StringInit::get(RecordKeeper &RK, StringRef V, StringFormat Fmt) {
return Entry.second;
}
-Init *StringInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *StringInit::convertInitializerTo(const RecTy *Ty) const {
if (isa<StringRecTy>(Ty))
return const_cast<StringInit *>(this);
return nullptr;
}
-static void ProfileListInit(FoldingSetNodeID &ID, ArrayRef<Init *> Range,
+static void ProfileListInit(FoldingSetNodeID &ID, ArrayRef<const Init *> Range,
const RecTy *EltTy) {
ID.AddInteger(Range.size());
ID.AddPointer(EltTy);
- for (Init *I : Range)
+ for (const Init *I : Range)
ID.AddPointer(I);
}
-ListInit *ListInit::get(ArrayRef<Init *> Range, const RecTy *EltTy) {
+const ListInit *ListInit::get(ArrayRef<const Init *> Range,
+ const RecTy *EltTy) {
FoldingSetNodeID ID;
ProfileListInit(ID, Range, EltTy);
detail::RecordKeeperImpl &RK = EltTy->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (ListInit *I = RK.TheListInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const ListInit *I = RK.TheListInitPool.FindNodeOrInsertPos(ID, IP))
return I;
assert(Range.empty() || !isa<TypedInit>(Range[0]) ||
cast<TypedInit>(Range[0])->getType()->typeIsConvertibleTo(EltTy));
- void *Mem = RK.Allocator.Allocate(totalSizeToAlloc<Init *>(Range.size()),
- alignof(ListInit));
+ void *Mem = RK.Allocator.Allocate(
+ totalSizeToAlloc<const Init *>(Range.size()), alignof(ListInit));
ListInit *I = new (Mem) ListInit(Range.size(), EltTy);
std::uninitialized_copy(Range.begin(), Range.end(),
- I->getTrailingObjects<Init *>());
+ I->getTrailingObjects<const Init *>());
RK.TheListInitPool.InsertNode(I, IP);
return I;
}
@@ -719,20 +721,20 @@ void ListInit::Profile(FoldingSetNodeID &ID) const {
ProfileListInit(ID, getValues(), EltTy);
}
-Init *ListInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *ListInit::convertInitializerTo(const RecTy *Ty) const {
if (getType() == Ty)
return const_cast<ListInit*>(this);
if (auto *LRT = dyn_cast<ListRecTy>(Ty)) {
- SmallVector<Init*, 8> Elements;
+ SmallVector<const Init *, 8> Elements;
Elements.reserve(getValues().size());
// Verify that all of the elements of the list are subclasses of the
// appropriate class!
bool Changed = false;
const RecTy *ElementType = LRT->getElementType();
- for (Init *I : getValues())
- if (Init *CI = I->convertInitializerTo(ElementType)) {
+ for (const Init *I : getValues())
+ if (const Init *CI = I->convertInitializerTo(ElementType)) {
Elements.push_back(CI);
if (CI != I)
Changed = true;
@@ -749,30 +751,30 @@ Init *ListInit::convertInitializerTo(const RecTy *Ty) const {
const Record *ListInit::getElementAsRecord(unsigned i) const {
assert(i < NumValues && "List element index out of range!");
- DefInit *DI = dyn_cast<DefInit>(getElement(i));
+ const DefInit *DI = dyn_cast<DefInit>(getElement(i));
if (!DI)
PrintFatalError("Expected record in list!");
return DI->getDef();
}
-Init *ListInit::resolveReferences(Resolver &R) const {
- SmallVector<Init*, 8> Resolved;
+const Init *ListInit::resolveReferences(Resolver &R) const {
+ SmallVector<const Init *, 8> Resolved;
Resolved.reserve(size());
bool Changed = false;
- for (Init *CurElt : getValues()) {
- Init *E = CurElt->resolveReferences(R);
+ for (const Init *CurElt : getValues()) {
+ const Init *E = CurElt->resolveReferences(R);
Changed |= E != CurElt;
Resolved.push_back(E);
}
if (Changed)
return ListInit::get(Resolved, getElementType());
- return const_cast<ListInit *>(this);
+ return this;
}
bool ListInit::isComplete() const {
- for (Init *Element : *this) {
+ for (const Init *Element : *this) {
if (!Element->isComplete())
return false;
}
@@ -780,7 +782,7 @@ bool ListInit::isComplete() const {
}
bool ListInit::isConcrete() const {
- for (Init *Element : *this) {
+ for (const Init *Element : *this) {
if (!Element->isConcrete())
return false;
}
@@ -790,7 +792,7 @@ bool ListInit::isConcrete() const {
std::string ListInit::getAsString() const {
std::string Result = "[";
const char *sep = "";
- for (Init *Element : *this) {
+ for (const Init *Element : *this) {
Result += sep;
sep = ", ";
Result += Element->getAsString();
@@ -798,26 +800,26 @@ std::string ListInit::getAsString() const {
return Result + "]";
}
-Init *OpInit::getBit(unsigned Bit) const {
+const Init *OpInit::getBit(unsigned Bit) const {
if (getType() == BitRecTy::get(getRecordKeeper()))
return const_cast<OpInit*>(this);
- return VarBitInit::get(const_cast<OpInit*>(this), Bit);
+ return VarBitInit::get(this, Bit);
}
-static void ProfileUnOpInit(FoldingSetNodeID &ID, unsigned Opcode, Init *Op,
- const RecTy *Type) {
+static void ProfileUnOpInit(FoldingSetNodeID &ID, unsigned Opcode,
+ const Init *Op, const RecTy *Type) {
ID.AddInteger(Opcode);
ID.AddPointer(Op);
ID.AddPointer(Type);
}
-UnOpInit *UnOpInit::get(UnaryOp Opc, Init *LHS, const RecTy *Type) {
+const UnOpInit *UnOpInit::get(UnaryOp Opc, const Init *LHS, const RecTy *Type) {
FoldingSetNodeID ID;
ProfileUnOpInit(ID, Opc, LHS, Type);
detail::RecordKeeperImpl &RK = Type->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (UnOpInit *I = RK.TheUnOpInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const UnOpInit *I = RK.TheUnOpInitPool.FindNodeOrInsertPos(ID, IP))
return I;
UnOpInit *I = new (RK.Allocator) UnOpInit(Opc, LHS, Type);
@@ -829,7 +831,7 @@ void UnOpInit::Profile(FoldingSetNodeID &ID) const {
ProfileUnOpInit(ID, getOpcode(), getOperand(), getType());
}
-Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
+const Init *UnOpInit::Fold(const Record *CurRec, bool IsFinal) const {
RecordKeeper &RK = getRecordKeeper();
switch (getOpcode()) {
case REPR:
@@ -851,27 +853,27 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
}
break;
case TOLOWER:
- if (StringInit *LHSs = dyn_cast<StringInit>(LHS))
+ if (const StringInit *LHSs = dyn_cast<StringInit>(LHS))
return StringInit::get(RK, LHSs->getValue().lower());
break;
case TOUPPER:
- if (StringInit *LHSs = dyn_cast<StringInit>(LHS))
+ if (const StringInit *LHSs = dyn_cast<StringInit>(LHS))
return StringInit::get(RK, LHSs->getValue().upper());
break;
case CAST:
if (isa<StringRecTy>(getType())) {
- if (StringInit *LHSs = dyn_cast<StringInit>(LHS))
+ if (const StringInit *LHSs = dyn_cast<StringInit>(LHS))
return LHSs;
- if (DefInit *LHSd = dyn_cast<DefInit>(LHS))
+ if (const DefInit *LHSd = dyn_cast<DefInit>(LHS))
return StringInit::get(RK, LHSd->getAsString());
- if (IntInit *LHSi = dyn_cast_or_null<IntInit>(
+ if (const IntInit *LHSi = dyn_cast_or_null<IntInit>(
LHS->convertInitializerTo(IntRecTy::get(RK))))
return StringInit::get(RK, LHSi->getAsString());
} else if (isa<RecordRecTy>(getType())) {
- if (StringInit *Name = dyn_cast<StringInit>(LHS)) {
+ if (const StringInit *Name = dyn_cast<StringInit>(LHS)) {
const Record *D = RK.getDef(Name->getValue());
if (!D && CurRec) {
// Self-references are allowed, but their resolution is delayed until
@@ -911,25 +913,25 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
}
}
- if (Init *NewInit = LHS->convertInitializerTo(getType()))
+ if (const Init *NewInit = LHS->convertInitializerTo(getType()))
return NewInit;
break;
case NOT:
- if (IntInit *LHSi = dyn_cast_or_null<IntInit>(
+ if (const IntInit *LHSi = dyn_cast_or_null<IntInit>(
LHS->convertInitializerTo(IntRecTy::get(RK))))
return IntInit::get(RK, LHSi->getValue() ? 0 : 1);
break;
case HEAD:
- if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
+ if (const ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
assert(!LHSl->empty() && "Empty list in head");
return LHSl->getElement(0);
}
break;
case TAIL:
- if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
+ if (const ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
assert(!LHSl->empty() && "Empty list in tail");
// Note the +1. We can't just pass the result of getValues()
// directly.
@@ -938,25 +940,25 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
break;
case SIZE:
- if (ListInit *LHSl = dyn_cast<ListInit>(LHS))
+ if (const ListInit *LHSl = dyn_cast<ListInit>(LHS))
return IntInit::get(RK, LHSl->size());
- if (DagInit *LHSd = dyn_cast<DagInit>(LHS))
+ if (const DagInit *LHSd = dyn_cast<DagInit>(LHS))
return IntInit::get(RK, LHSd->arg_size());
- if (StringInit *LHSs = dyn_cast<StringInit>(LHS))
+ if (const StringInit *LHSs = dyn_cast<StringInit>(LHS))
return IntInit::get(RK, LHSs->getValue().size());
break;
case EMPTY:
- if (ListInit *LHSl = dyn_cast<ListInit>(LHS))
+ if (const ListInit *LHSl = dyn_cast<ListInit>(LHS))
return IntInit::get(RK, LHSl->empty());
- if (DagInit *LHSd = dyn_cast<DagInit>(LHS))
+ if (const DagInit *LHSd = dyn_cast<DagInit>(LHS))
return IntInit::get(RK, LHSd->arg_empty());
- if (StringInit *LHSs = dyn_cast<StringInit>(LHS))
+ if (const StringInit *LHSs = dyn_cast<StringInit>(LHS))
return IntInit::get(RK, LHSs->getValue().empty());
break;
case GETDAGOP:
- if (DagInit *Dag = dyn_cast<DagInit>(LHS)) {
+ if (const DagInit *Dag = dyn_cast<DagInit>(LHS)) {
// TI is not necessarily a def due to the late resolution in multiclasses,
// but has to be a TypedInit.
auto *TI = cast<TypedInit>(Dag->getOperator());
@@ -972,7 +974,7 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
break;
case LOG2:
- if (IntInit *LHSi = dyn_cast_or_null<IntInit>(
+ if (const IntInit *LHSi = dyn_cast_or_null<IntInit>(
LHS->convertInitializerTo(IntRecTy::get(RK)))) {
int64_t LHSv = LHSi->getValue();
if (LHSv <= 0) {
@@ -989,21 +991,22 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
break;
case LISTFLATTEN:
- if (ListInit *LHSList = dyn_cast<ListInit>(LHS)) {
+ if (const ListInit *LHSList = dyn_cast<ListInit>(LHS)) {
const ListRecTy *InnerListTy =
dyn_cast<ListRecTy>(LHSList->getElementType());
// list of non-lists, !listflatten() is a NOP.
if (!InnerListTy)
return LHS;
- auto Flatten = [](ListInit *List) -> std::optional<std::vector<Init *>> {
- std::vector<Init *> Flattened;
+ auto Flatten =
+ [](const ListInit *List) -> std::optional<std::vector<const Init *>> {
+ std::vector<const Init *> Flattened;
// Concatenate elements of all the inner lists.
- for (Init *InnerInit : List->getValues()) {
- ListInit *InnerList = dyn_cast<ListInit>(InnerInit);
+ for (const Init *InnerInit : List->getValues()) {
+ const ListInit *InnerList = dyn_cast<ListInit>(InnerInit);
if (!InnerList)
return std::nullopt;
- for (Init *InnerElem : InnerList->getValues())
+ for (const Init *InnerElem : InnerList->getValues())
Flattened.push_back(InnerElem);
};
return Flattened;
@@ -1018,13 +1021,13 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
return const_cast<UnOpInit *>(this);
}
-Init *UnOpInit::resolveReferences(Resolver &R) const {
- Init *lhs = LHS->resolveReferences(R);
+const Init *UnOpInit::resolveReferences(Resolver &R) const {
+ const Init *lhs = LHS->resolveReferences(R);
if (LHS != lhs || (R.isFinal() && getOpcode() == CAST))
return (UnOpInit::get(getOpcode(), lhs, getType()))
->Fold(R.getCurrentRecord(), R.isFinal());
- return const_cast<UnOpInit *>(this);
+ return this;
}
std::string UnOpInit::getAsString() const {
@@ -1054,22 +1057,23 @@ std::string UnOpInit::getAsString() const {
return Result + "(" + LHS->getAsString() + ")";
}
-static void ProfileBinOpInit(FoldingSetNodeID &ID, unsigned Opcode, Init *LHS,
- Init *RHS, const RecTy *Type) {
+static void ProfileBinOpInit(FoldingSetNodeID &ID, unsigned Opcode,
+ const Init *LHS, const Init *RHS,
+ const RecTy *Type) {
ID.AddInteger(Opcode);
ID.AddPointer(LHS);
ID.AddPointer(RHS);
ID.AddPointer(Type);
}
-BinOpInit *BinOpInit::get(BinaryOp Opc, Init *LHS, Init *RHS,
- const RecTy *Type) {
+const BinOpInit *BinOpInit::get(BinaryOp Opc, const Init *LHS, const Init *RHS,
+ const RecTy *Type) {
FoldingSetNodeID ID;
ProfileBinOpInit(ID, Opc, LHS, RHS, Type);
detail::RecordKeeperImpl &RK = LHS->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (BinOpInit *I = RK.TheBinOpInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const BinOpInit *I = RK.TheBinOpInitPool.FindNodeOrInsertPos(ID, IP))
return I;
BinOpInit *I = new (RK.Allocator) BinOpInit(Opc, LHS, RHS, Type);
@@ -1081,8 +1085,8 @@ void BinOpInit::Profile(FoldingSetNodeID &ID) const {
ProfileBinOpInit(ID, getOpcode(), getLHS(), getRHS(), getType());
}
-static StringInit *ConcatStringInits(const StringInit *I0,
- const StringInit *I1) {
+static const StringInit *ConcatStringInits(const StringInit *I0,
+ const StringInit *I1) {
SmallString<80> Concat(I0->getValue());
Concat.append(I1->getValue());
return StringInit::get(
@@ -1090,11 +1094,11 @@ static StringInit *ConcatStringInits(const StringInit *I0,
StringInit::determineFormat(I0->getFormat(), I1->getFormat()));
}
-static StringInit *interleaveStringList(const ListInit *List,
- const StringInit *Delim) {
+static const StringInit *interleaveStringList(const ListInit *List,
+ const StringInit *Delim) {
if (List->size() == 0)
return StringInit::get(List->getRecordKeeper(), "");
- StringInit *Element = dyn_cast<StringInit>(List->getElement(0));
+ const StringInit *Element = dyn_cast<StringInit>(List->getElement(0));
if (!Element)
return nullptr;
SmallString<80> Result(Element->getValue());
@@ -1102,7 +1106,7 @@ static StringInit *interleaveStringList(const ListInit *List,
for (unsigned I = 1, E = List->size(); I < E; ++I) {
Result.append(Delim->getValue());
- StringInit *Element = dyn_cast<StringInit>(List->getElement(I));
+ const StringInit *Element = dyn_cast<StringInit>(List->getElement(I));
if (!Element)
return nullptr;
Result.append(Element->getValue());
@@ -1111,12 +1115,12 @@ static StringInit *interleaveStringList(const ListInit *List,
return StringInit::get(List->getRecordKeeper(), Result, Fmt);
}
-static StringInit *interleaveIntList(const ListInit *List,
- const StringInit *Delim) {
+static const StringInit *interleaveIntList(const ListInit *List,
+ const StringInit *Delim) {
RecordKeeper &RK = List->getRecordKeeper();
if (List->size() == 0)
return StringInit::get(RK, "");
- IntInit *Element = dyn_cast_or_null<IntInit>(
+ const IntInit *Element = dyn_cast_or_null<IntInit>(
List->getElement(0)->convertInitializerTo(IntRecTy::get(RK)));
if (!Element)
return nullptr;
@@ -1124,7 +1128,7 @@ static StringInit *interleaveIntList(const ListInit *List,
for (unsigned I = 1, E = List->size(); I < E; ++I) {
Result.append(Delim->getValue());
- IntInit *Element = dyn_cast_or_null<IntInit>(
+ const IntInit *Element = dyn_cast_or_null<IntInit>(
List->getElement(I)->convertInitializerTo(IntRecTy::get(RK)));
if (!Element)
return nullptr;
@@ -1133,7 +1137,7 @@ static StringInit *interleaveIntList(const ListInit *List,
return StringInit::get(RK, Result);
}
-Init *BinOpInit::getStrConcat(Init *I0, Init *I1) {
+const Init *BinOpInit::getStrConcat(const Init *I0, const Init *I1) {
// Shortcut for the common case of concatenating two strings.
if (const StringInit *I0s = dyn_cast<StringInit>(I0))
if (const StringInit *I1s = dyn_cast<StringInit>(I1))
@@ -1142,15 +1146,15 @@ Init *BinOpInit::getStrConcat(Init *I0, Init *I1) {
StringRecTy::get(I0->getRecordKeeper()));
}
-static ListInit *ConcatListInits(const ListInit *LHS,
- const ListInit *RHS) {
- SmallVector<Init *, 8> Args;
+static const ListInit *ConcatListInits(const ListInit *LHS,
+ const ListInit *RHS) {
+ SmallVector<const Init *, 8> Args;
llvm::append_range(Args, *LHS);
llvm::append_range(Args, *RHS);
return ListInit::get(Args, LHS->getElementType());
}
-Init *BinOpInit::getListConcat(TypedInit *LHS, Init *RHS) {
+const Init *BinOpInit::getListConcat(const TypedInit *LHS, const Init *RHS) {
assert(isa<ListRecTy>(LHS->getType()) && "First arg must be a list");
// Shortcut for the common case of concatenating two lists.
@@ -1160,12 +1164,12 @@ Init *BinOpInit::getListConcat(TypedInit *LHS, Init *RHS) {
return BinOpInit::get(BinOpInit::LISTCONCAT, LHS, RHS, LHS->getType());
}
-std::optional<bool> BinOpInit::CompareInit(unsigned Opc, Init *LHS,
- Init *RHS) const {
+std::optional<bool> BinOpInit::CompareInit(unsigned Opc, const Init *LHS,
+ const Init *RHS) const {
// First see if we have two bit, bits, or int.
- IntInit *LHSi = dyn_cast_or_null<IntInit>(
+ const IntInit *LHSi = dyn_cast_or_null<IntInit>(
LHS->convertInitializerTo(IntRecTy::get(getRecordKeeper())));
- IntInit *RHSi = dyn_cast_or_null<IntInit>(
+ const IntInit *RHSi = dyn_cast_or_null<IntInit>(
RHS->convertInitializerTo(IntRecTy::get(getRecordKeeper())));
if (LHSi && RHSi) {
@@ -1196,8 +1200,8 @@ std::optional<bool> BinOpInit::CompareInit(unsigned Opc, Init *LHS,
}
// Next try strings.
- StringInit *LHSs = dyn_cast<StringInit>(LHS);
- StringInit *RHSs = dyn_cast<StringInit>(RHS);
+ const StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ const StringInit *RHSs = dyn_cast<StringInit>(RHS);
if (LHSs && RHSs) {
bool Result;
@@ -1228,8 +1232,8 @@ std::optional<bool> BinOpInit::CompareInit(unsigned Opc, Init *LHS,
// Finally, !eq and !ne can be used with records.
if (Opc == EQ || Opc == NE) {
- DefInit *LHSd = dyn_cast<DefInit>(LHS);
- DefInit *RHSd = dyn_cast<DefInit>(RHS);
+ const DefInit *LHSd = dyn_cast<DefInit>(LHS);
+ const DefInit *RHSd = dyn_cast<DefInit>(RHS);
if (LHSd && RHSd)
return (Opc == EQ) ? LHSd == RHSd : LHSd != RHSd;
}
@@ -1237,10 +1241,10 @@ std::optional<bool> BinOpInit::CompareInit(unsigned Opc, Init *LHS,
return std::nullopt;
}
-static std::optional<unsigned> getDagArgNoByKey(DagInit *Dag, Init *Key,
- std::string &Error) {
+static std::optional<unsigned>
+getDagArgNoByKey(const DagInit *Dag, const Init *Key, std::string &Error) {
// Accessor by index
- if (IntInit *Idx = dyn_cast<IntInit>(Key)) {
+ if (const IntInit *Idx = dyn_cast<IntInit>(Key)) {
int64_t Pos = Idx->getValue();
if (Pos < 0) {
// The index is negative.
@@ -1260,7 +1264,7 @@ static std::optional<unsigned> getDagArgNoByKey(DagInit *Dag, Init *Key,
}
assert(isa<StringInit>(Key));
// Accessor by name
- StringInit *Name = dyn_cast<StringInit>(Key);
+ const StringInit *Name = dyn_cast<StringInit>(Key);
auto ArgNo = Dag->getArgNo(Name->getValue());
if (!ArgNo) {
// The key is not found.
@@ -1270,14 +1274,14 @@ static std::optional<unsigned> getDagArgNoByKey(DagInit *Dag, Init *Key,
return *ArgNo;
}
-Init *BinOpInit::Fold(Record *CurRec) const {
+const Init *BinOpInit::Fold(const Record *CurRec) const {
switch (getOpcode()) {
case CONCAT: {
- DagInit *LHSs = dyn_cast<DagInit>(LHS);
- DagInit *RHSs = dyn_cast<DagInit>(RHS);
+ const DagInit *LHSs = dyn_cast<DagInit>(LHS);
+ const DagInit *RHSs = dyn_cast<DagInit>(RHS);
if (LHSs && RHSs) {
- DefInit *LOp = dyn_cast<DefInit>(LHSs->getOperator());
- DefInit *ROp = dyn_cast<DefInit>(RHSs->getOperator());
+ const DefInit *LOp = dyn_cast<DefInit>(LHSs->getOperator());
+ const DefInit *ROp = dyn_cast<DefInit>(RHSs->getOperator());
if ((!LOp && !isa<UnsetInit>(LHSs->getOperator())) ||
(!ROp && !isa<UnsetInit>(RHSs->getOperator())))
break;
@@ -1286,12 +1290,12 @@ Init *BinOpInit::Fold(Record *CurRec) const {
LHSs->getAsString() + "' vs. '" + RHSs->getAsString() +
"'");
}
- Init *Op = LOp ? LOp : ROp;
+ const Init *Op = LOp ? LOp : ROp;
if (!Op)
Op = UnsetInit::get(getRecordKeeper());
- SmallVector<Init*, 8> Args;
- SmallVector<StringInit*, 8> ArgNames;
+ SmallVector<const Init *, 8> Args;
+ SmallVector<const StringInit *, 8> ArgNames;
for (unsigned i = 0, e = LHSs->getNumArgs(); i != e; ++i) {
Args.push_back(LHSs->getArg(i));
ArgNames.push_back(LHSs->getArgName(i));
@@ -1305,10 +1309,10 @@ Init *BinOpInit::Fold(Record *CurRec) const {
break;
}
case LISTCONCAT: {
- ListInit *LHSs = dyn_cast<ListInit>(LHS);
- ListInit *RHSs = dyn_cast<ListInit>(RHS);
+ const ListInit *LHSs = dyn_cast<ListInit>(LHS);
+ const ListInit *RHSs = dyn_cast<ListInit>(RHS);
if (LHSs && RHSs) {
- SmallVector<Init *, 8> Args;
+ SmallVector<const Init *, 8> Args;
llvm::append_range(Args, *LHSs);
llvm::append_range(Args, *RHSs);
return ListInit::get(Args, LHSs->getElementType());
@@ -1316,22 +1320,22 @@ Init *BinOpInit::Fold(Record *CurRec) const {
break;
}
case LISTSPLAT: {
- TypedInit *Value = dyn_cast<TypedInit>(LHS);
- IntInit *Size = dyn_cast<IntInit>(RHS);
+ const TypedInit *Value = dyn_cast<TypedInit>(LHS);
+ const IntInit *Size = dyn_cast<IntInit>(RHS);
if (Value && Size) {
- SmallVector<Init *, 8> Args(Size->getValue(), Value);
+ SmallVector<const Init *, 8> Args(Size->getValue(), Value);
return ListInit::get(Args, Value->getType());
}
break;
}
case LISTREMOVE: {
- ListInit *LHSs = dyn_cast<ListInit>(LHS);
- ListInit *RHSs = dyn_cast<ListInit>(RHS);
+ const ListInit *LHSs = dyn_cast<ListInit>(LHS);
+ const ListInit *RHSs = dyn_cast<ListInit>(RHS);
if (LHSs && RHSs) {
- SmallVector<Init *, 8> Args;
- for (Init *EltLHS : *LHSs) {
+ SmallVector<const Init *, 8> Args;
+ for (const Init *EltLHS : *LHSs) {
bool Found = false;
- for (Init *EltRHS : *RHSs) {
+ for (const Init *EltRHS : *RHSs) {
if (std::optional<bool> Result = CompareInit(EQ, EltLHS, EltRHS)) {
if (*Result) {
Found = true;
@@ -1361,7 +1365,7 @@ Init *BinOpInit::Fold(Record *CurRec) const {
auto *SliceIdxs = dyn_cast<ListInit>(RHS);
if (!TheList || !SliceIdxs)
break;
- SmallVector<Init *, 8> Args;
+ SmallVector<const Init *, 8> Args;
Args.reserve(SliceIdxs->size());
for (auto *I : *SliceIdxs) {
auto *II = dyn_cast<IntInit>(I);
@@ -1382,7 +1386,7 @@ Init *BinOpInit::Fold(Record *CurRec) const {
auto Start = LHSi->getValue();
auto End = RHSi->getValue();
- SmallVector<Init *, 8> Args;
+ SmallVector<const Init *, 8> Args;
if (getOpcode() == RANGEC) {
// Closed interval
if (Start <= End) {
@@ -1407,17 +1411,17 @@ Init *BinOpInit::Fold(Record *CurRec) const {
return ListInit::get(Args, LHSi->getType());
}
case STRCONCAT: {
- StringInit *LHSs = dyn_cast<StringInit>(LHS);
- StringInit *RHSs = dyn_cast<StringInit>(RHS);
+ const StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ const StringInit *RHSs = dyn_cast<StringInit>(RHS);
if (LHSs && RHSs)
return ConcatStringInits(LHSs, RHSs);
break;
}
case INTERLEAVE: {
- ListInit *List = dyn_cast<ListInit>(LHS);
- StringInit *Delim = dyn_cast<StringInit>(RHS);
+ const ListInit *List = dyn_cast<ListInit>(LHS);
+ const StringInit *Delim = dyn_cast<StringInit>(RHS);
if (List && Delim) {
- StringInit *Result;
+ const StringInit *Result;
if (isa<StringRecTy>(List->getElementType()))
Result = interleaveStringList(List, Delim);
else
@@ -1438,7 +1442,7 @@ Init *BinOpInit::Fold(Record *CurRec) const {
break;
}
case GETDAGARG: {
- DagInit *Dag = dyn_cast<DagInit>(LHS);
+ const DagInit *Dag = dyn_cast<DagInit>(LHS);
if (Dag && isa<IntInit, StringInit>(RHS)) {
std::string Error;
auto ArgNo = getDagArgNoByKey(Dag, RHS, Error);
@@ -1447,7 +1451,7 @@ Init *BinOpInit::Fold(Record *CurRec) const {
assert(*ArgNo < Dag->getNumArgs());
- Init *Arg = Dag->getArg(*ArgNo);
+ const Init *Arg = Dag->getArg(*ArgNo);
if (auto *TI = dyn_cast<TypedInit>(Arg))
if (!TI->getType()->typeIsConvertibleTo(getType()))
return UnsetInit::get(Dag->getRecordKeeper());
@@ -1456,8 +1460,8 @@ Init *BinOpInit::Fold(Record *CurRec) const {
break;
}
case GETDAGNAME: {
- DagInit *Dag = dyn_cast<DagInit>(LHS);
- IntInit *Idx = dyn_cast<IntInit>(RHS);
+ const DagInit *Dag = dyn_cast<DagInit>(LHS);
+ const IntInit *Idx = dyn_cast<IntInit>(RHS);
if (Dag && Idx) {
int64_t Pos = Idx->getValue();
if (Pos < 0 || Pos >= Dag->getNumArgs()) {
@@ -1467,7 +1471,7 @@ Init *BinOpInit::Fold(Record *CurRec) const {
std::to_string(Dag->getNumArgs() - 1) + ": " +
std::to_string(Pos));
}
- Init *ArgName = Dag->getArgName(Pos);
+ const Init *ArgName = Dag->getArgName(Pos);
if (!ArgName)
return UnsetInit::get(getRecordKeeper());
return ArgName;
@@ -1475,11 +1479,11 @@ Init *BinOpInit::Fold(Record *CurRec) const {
break;
}
case SETDAGOP: {
- DagInit *Dag = dyn_cast<DagInit>(LHS);
- DefInit *Op = dyn_cast<DefInit>(RHS);
+ const DagInit *Dag = dyn_cast<DagInit>(LHS);
+ const DefInit *Op = dyn_cast<DefInit>(RHS);
if (Dag && Op) {
- SmallVector<Init*, 8> Args;
- SmallVector<StringInit*, 8> ArgNames;
+ SmallVector<const Init *, 8> Args;
+ SmallVector<const StringInit *, 8> ArgNames;
for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) {
Args.push_back(Dag->getArg(i));
ArgNames.push_back(Dag->getArgName(i));
@@ -1498,9 +1502,9 @@ Init *BinOpInit::Fold(Record *CurRec) const {
case SHL:
case SRA:
case SRL: {
- IntInit *LHSi = dyn_cast_or_null<IntInit>(
+ const IntInit *LHSi = dyn_cast_or_null<IntInit>(
LHS->convertInitializerTo(IntRecTy::get(getRecordKeeper())));
- IntInit *RHSi = dyn_cast_or_null<IntInit>(
+ const IntInit *RHSi = dyn_cast_or_null<IntInit>(
RHS->convertInitializerTo(IntRecTy::get(getRecordKeeper())));
if (LHSi && RHSi) {
int64_t LHSv = LHSi->getValue(), RHSv = RHSi->getValue();
@@ -1533,17 +1537,17 @@ Init *BinOpInit::Fold(Record *CurRec) const {
}
}
unresolved:
- return const_cast<BinOpInit *>(this);
+ return this;
}
-Init *BinOpInit::resolveReferences(Resolver &R) const {
- Init *lhs = LHS->resolveReferences(R);
- Init *rhs = RHS->resolveReferences(R);
+const Init *BinOpInit::resolveReferences(Resolver &R) const {
+ const Init *lhs = LHS->resolveReferences(R);
+ const Init *rhs = RHS->resolveReferences(R);
if (LHS != lhs || RHS != rhs)
return (BinOpInit::get(getOpcode(), lhs, rhs, getType()))
->Fold(R.getCurrentRecord());
- return const_cast<BinOpInit *>(this);
+ return this;
}
std::string BinOpInit::getAsString() const {
@@ -1589,8 +1593,9 @@ std::string BinOpInit::getAsString() const {
return Result + "(" + LHS->getAsString() + ", " + RHS->getAsString() + ")";
}
-static void ProfileTernOpInit(FoldingSetNodeID &ID, unsigned Opcode, Init *LHS,
- Init *MHS, Init *RHS, const RecTy *Type) {
+static void ProfileTernOpInit(FoldingSetNodeID &ID, unsigned Opcode,
+ const Init *LHS, const Init *MHS, const Init *RHS,
+ const RecTy *Type) {
ID.AddInteger(Opcode);
ID.AddPointer(LHS);
ID.AddPointer(MHS);
@@ -1598,8 +1603,9 @@ static void ProfileTernOpInit(FoldingSetNodeID &ID, unsigned Opcode, Init *LHS,
ID.AddPointer(Type);
}
-TernOpInit *TernOpInit::get(TernaryOp Opc, Init *LHS, Init *MHS, Init *RHS,
- const RecTy *Type) {
+const TernOpInit *TernOpInit::get(TernaryOp Opc, const Init *LHS,
+ const Init *MHS, const Init *RHS,
+ const RecTy *Type) {
FoldingSetNodeID ID;
ProfileTernOpInit(ID, Opc, LHS, MHS, RHS, Type);
@@ -1617,26 +1623,27 @@ void TernOpInit::Profile(FoldingSetNodeID &ID) const {
ProfileTernOpInit(ID, getOpcode(), getLHS(), getMHS(), getRHS(), getType());
}
-static Init *ItemApply(Init *LHS, Init *MHSe, Init *RHS, Record *CurRec) {
+static const Init *ItemApply(const Init *LHS, const Init *MHSe, const Init *RHS,
+ const Record *CurRec) {
MapResolver R(CurRec);
R.set(LHS, MHSe);
return RHS->resolveReferences(R);
}
-static Init *ForeachDagApply(Init *LHS, DagInit *MHSd, Init *RHS,
- Record *CurRec) {
+static const Init *ForeachDagApply(const Init *LHS, const DagInit *MHSd,
+ const Init *RHS, const Record *CurRec) {
bool Change = false;
- Init *Val = ItemApply(LHS, MHSd->getOperator(), RHS, CurRec);
+ const Init *Val = ItemApply(LHS, MHSd->getOperator(), RHS, CurRec);
if (Val != MHSd->getOperator())
Change = true;
- SmallVector<std::pair<Init *, StringInit *>, 8> NewArgs;
+ SmallVector<std::pair<const Init *, const StringInit *>, 8> NewArgs;
for (unsigned int i = 0; i < MHSd->getNumArgs(); ++i) {
- Init *Arg = MHSd->getArg(i);
- Init *NewArg;
- StringInit *ArgName = MHSd->getArgName(i);
+ const Init *Arg = MHSd->getArg(i);
+ const Init *NewArg;
+ const StringInit *ArgName = MHSd->getArgName(i);
- if (DagInit *Argd = dyn_cast<DagInit>(Arg))
+ if (const DagInit *Argd = dyn_cast<DagInit>(Arg))
NewArg = ForeachDagApply(LHS, Argd, RHS, CurRec);
else
NewArg = ItemApply(LHS, Arg, RHS, CurRec);
@@ -1652,16 +1659,17 @@ static Init *ForeachDagApply(Init *LHS, DagInit *MHSd, Init *RHS,
}
// Applies RHS to all elements of MHS, using LHS as a temp variable.
-static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, const RecTy *Type,
- Record *CurRec) {
- if (DagInit *MHSd = dyn_cast<DagInit>(MHS))
+static const Init *ForeachHelper(const Init *LHS, const Init *MHS,
+ const Init *RHS, const RecTy *Type,
+ const Record *CurRec) {
+ if (const DagInit *MHSd = dyn_cast<DagInit>(MHS))
return ForeachDagApply(LHS, MHSd, RHS, CurRec);
- if (ListInit *MHSl = dyn_cast<ListInit>(MHS)) {
- SmallVector<Init *, 8> NewList(MHSl->begin(), MHSl->end());
+ if (const ListInit *MHSl = dyn_cast<ListInit>(MHS)) {
+ SmallVector<const Init *, 8> NewList(MHSl->begin(), MHSl->end());
- for (Init *&Item : NewList) {
- Init *NewItem = ItemApply(LHS, Item, RHS, CurRec);
+ for (const Init *&Item : NewList) {
+ const Init *NewItem = ItemApply(LHS, Item, RHS, CurRec);
if (NewItem != Item)
Item = NewItem;
}
@@ -1673,16 +1681,17 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, const RecTy *Type,
// Evaluates RHS for all elements of MHS, using LHS as a temp variable.
// Creates a new list with the elements that evaluated to true.
-static Init *FilterHelper(Init *LHS, Init *MHS, Init *RHS, const RecTy *Type,
- Record *CurRec) {
- if (ListInit *MHSl = dyn_cast<ListInit>(MHS)) {
- SmallVector<Init *, 8> NewList;
-
- for (Init *Item : MHSl->getValues()) {
- Init *Include = ItemApply(LHS, Item, RHS, CurRec);
+static const Init *FilterHelper(const Init *LHS, const Init *MHS,
+ const Init *RHS, const RecTy *Type,
+ const Record *CurRec) {
+ if (const ListInit *MHSl = dyn_cast<ListInit>(MHS)) {
+ SmallVector<const Init *, 8> NewList;
+
+ for (const Init *Item : MHSl->getValues()) {
+ const Init *Include = ItemApply(LHS, Item, RHS, CurRec);
if (!Include)
return nullptr;
- if (IntInit *IncludeInt =
+ if (const IntInit *IncludeInt =
dyn_cast_or_null<IntInit>(Include->convertInitializerTo(
IntRecTy::get(LHS->getRecordKeeper())))) {
if (IncludeInt->getValue())
@@ -1697,21 +1706,21 @@ static Init *FilterHelper(Init *LHS, Init *MHS, Init *RHS, const RecTy *Type,
return nullptr;
}
-Init *TernOpInit::Fold(Record *CurRec) const {
+const Init *TernOpInit::Fold(const Record *CurRec) const {
RecordKeeper &RK = getRecordKeeper();
switch (getOpcode()) {
case SUBST: {
- DefInit *LHSd = dyn_cast<DefInit>(LHS);
- VarInit *LHSv = dyn_cast<VarInit>(LHS);
- StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ const DefInit *LHSd = dyn_cast<DefInit>(LHS);
+ const VarInit *LHSv = dyn_cast<VarInit>(LHS);
+ const StringInit *LHSs = dyn_cast<StringInit>(LHS);
- DefInit *MHSd = dyn_cast<DefInit>(MHS);
- VarInit *MHSv = dyn_cast<VarInit>(MHS);
- StringInit *MHSs = dyn_cast<StringInit>(MHS);
+ const DefInit *MHSd = dyn_cast<DefInit>(MHS);
+ const VarInit *MHSv = dyn_cast<VarInit>(MHS);
+ const StringInit *MHSs = dyn_cast<StringInit>(MHS);
- DefInit *RHSd = dyn_cast<DefInit>(RHS);
- VarInit *RHSv = dyn_cast<VarInit>(RHS);
- StringInit *RHSs = dyn_cast<StringInit>(RHS);
+ const DefInit *RHSd = dyn_cast<DefInit>(RHS);
+ const VarInit *RHSv = dyn_cast<VarInit>(RHS);
+ const StringInit *RHSs = dyn_cast<StringInit>(RHS);
if (LHSd && MHSd && RHSd) {
const Record *Val = RHSd->getDef();
@@ -1745,19 +1754,19 @@ Init *TernOpInit::Fold(Record *CurRec) const {
}
case FOREACH: {
- if (Init *Result = ForeachHelper(LHS, MHS, RHS, getType(), CurRec))
+ if (const Init *Result = ForeachHelper(LHS, MHS, RHS, getType(), CurRec))
return Result;
break;
}
case FILTER: {
- if (Init *Result = FilterHelper(LHS, MHS, RHS, getType(), CurRec))
+ if (const Init *Result = FilterHelper(LHS, MHS, RHS, getType(), CurRec))
return Result;
break;
}
case IF: {
- if (IntInit *LHSi = dyn_cast_or_null<IntInit>(
+ if (const IntInit *LHSi = dyn_cast_or_null<IntInit>(
LHS->convertInitializerTo(IntRecTy::get(RK)))) {
if (LHSi->getValue())
return MHS;
@@ -1767,8 +1776,8 @@ Init *TernOpInit::Fold(Record *CurRec) const {
}
case DAG: {
- ListInit *MHSl = dyn_cast<ListInit>(MHS);
- ListInit *RHSl = dyn_cast<ListInit>(RHS);
+ const ListInit *MHSl = dyn_cast<ListInit>(MHS);
+ const ListInit *RHSl = dyn_cast<ListInit>(RHS);
bool MHSok = MHSl || isa<UnsetInit>(MHS);
bool RHSok = RHSl || isa<UnsetInit>(RHS);
@@ -1776,11 +1785,11 @@ Init *TernOpInit::Fold(Record *CurRec) const {
break; // Typically prevented by the parser, but might happen with template args
if (MHSok && RHSok && (!MHSl || !RHSl || MHSl->size() == RHSl->size())) {
- SmallVector<std::pair<Init *, StringInit *>, 8> Children;
+ SmallVector<std::pair<const Init *, const StringInit *>, 8> Children;
unsigned Size = MHSl ? MHSl->size() : RHSl->size();
for (unsigned i = 0; i != Size; ++i) {
- Init *Node = MHSl ? MHSl->getElement(i) : UnsetInit::get(RK);
- Init *Name = RHSl ? RHSl->getElement(i) : UnsetInit::get(RK);
+ const Init *Node = MHSl ? MHSl->getElement(i) : UnsetInit::get(RK);
+ const Init *Name = RHSl ? RHSl->getElement(i) : UnsetInit::get(RK);
if (!isa<StringInit>(Name) && !isa<UnsetInit>(Name))
return const_cast<TernOpInit *>(this);
Children.emplace_back(Node, dyn_cast<StringInit>(Name));
@@ -1803,7 +1812,7 @@ Init *TernOpInit::Fold(Record *CurRec) const {
if (Step == 0)
PrintError(CurRec->getLoc(), "Step of !range can't be 0");
- SmallVector<Init *, 8> Args;
+ SmallVector<const Init *, 8> Args;
if (Start < End && Step > 0) {
Args.reserve((End - Start) / Step);
for (auto I = Start; I < End; I += Step)
@@ -1819,9 +1828,9 @@ Init *TernOpInit::Fold(Record *CurRec) const {
}
case SUBSTR: {
- StringInit *LHSs = dyn_cast<StringInit>(LHS);
- IntInit *MHSi = dyn_cast<IntInit>(MHS);
- IntInit *RHSi = dyn_cast<IntInit>(RHS);
+ const StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ const IntInit *MHSi = dyn_cast<IntInit>(MHS);
+ const IntInit *RHSi = dyn_cast<IntInit>(RHS);
if (LHSs && MHSi && RHSi) {
int64_t StringSize = LHSs->getValue().size();
int64_t Start = MHSi->getValue();
@@ -1840,9 +1849,9 @@ Init *TernOpInit::Fold(Record *CurRec) const {
}
case FIND: {
- StringInit *LHSs = dyn_cast<StringInit>(LHS);
- StringInit *MHSs = dyn_cast<StringInit>(MHS);
- IntInit *RHSi = dyn_cast<IntInit>(RHS);
+ const StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ const StringInit *MHSs = dyn_cast<StringInit>(MHS);
+ const IntInit *RHSi = dyn_cast<IntInit>(RHS);
if (LHSs && MHSs && RHSi) {
int64_t SourceSize = LHSs->getValue().size();
int64_t Start = RHSi->getValue();
@@ -1860,7 +1869,7 @@ Init *TernOpInit::Fold(Record *CurRec) const {
}
case SETDAGARG: {
- DagInit *Dag = dyn_cast<DagInit>(LHS);
+ const DagInit *Dag = dyn_cast<DagInit>(LHS);
if (Dag && isa<IntInit, StringInit>(MHS)) {
std::string Error;
auto ArgNo = getDagArgNoByKey(Dag, MHS, Error);
@@ -1869,8 +1878,8 @@ Init *TernOpInit::Fold(Record *CurRec) const {
assert(*ArgNo < Dag->getNumArgs());
- SmallVector<Init *, 8> Args(Dag->getArgs());
- SmallVector<StringInit *, 8> Names(Dag->getArgNames());
+ SmallVector<const Init *, 8> Args(Dag->getArgs());
+ SmallVector<const StringInit *, 8> Names(Dag->getArgNames());
Args[*ArgNo] = RHS;
return DagInit::get(Dag->getOperator(), Dag->getName(), Args, Names);
}
@@ -1878,7 +1887,7 @@ Init *TernOpInit::Fold(Record *CurRec) const {
}
case SETDAGNAME: {
- DagInit *Dag = dyn_cast<DagInit>(LHS);
+ const DagInit *Dag = dyn_cast<DagInit>(LHS);
if (Dag && isa<IntInit, StringInit>(MHS)) {
std::string Error;
auto ArgNo = getDagArgNoByKey(Dag, MHS, Error);
@@ -1887,8 +1896,8 @@ Init *TernOpInit::Fold(Record *CurRec) const {
assert(*ArgNo < Dag->getNumArgs());
- SmallVector<Init *, 8> Args(Dag->getArgs());
- SmallVector<StringInit *, 8> Names(Dag->getArgNames());
+ SmallVector<const Init *, 8> Args(Dag->getArgs());
+ SmallVector<const StringInit *, 8> Names(Dag->getArgNames());
Names[*ArgNo] = dyn_cast<StringInit>(RHS);
return DagInit::get(Dag->getOperator(), Dag->getName(), Args, Names);
}
@@ -1899,11 +1908,11 @@ Init *TernOpInit::Fold(Record *CurRec) const {
return const_cast<TernOpInit *>(this);
}
-Init *TernOpInit::resolveReferences(Resolver &R) const {
- Init *lhs = LHS->resolveReferences(R);
+const Init *TernOpInit::resolveReferences(Resolver &R) const {
+ const Init *lhs = LHS->resolveReferences(R);
if (getOpcode() == IF && lhs != LHS) {
- if (IntInit *Value = dyn_cast_or_null<IntInit>(
+ if (const IntInit *Value = dyn_cast_or_null<IntInit>(
lhs->convertInitializerTo(IntRecTy::get(getRecordKeeper())))) {
// Short-circuit
if (Value->getValue())
@@ -1912,8 +1921,8 @@ Init *TernOpInit::resolveReferences(Resolver &R) const {
}
}
- Init *mhs = MHS->resolveReferences(R);
- Init *rhs;
+ const Init *mhs = MHS->resolveReferences(R);
+ const Init *rhs;
if (getOpcode() == FOREACH || getOpcode() == FILTER) {
ShadowResolver SR(R);
@@ -1926,7 +1935,7 @@ Init *TernOpInit::resolveReferences(Resolver &R) const {
if (LHS != lhs || MHS != mhs || RHS != rhs)
return (TernOpInit::get(getOpcode(), lhs, mhs, rhs, getType()))
->Fold(R.getCurrentRecord());
- return const_cast<TernOpInit *>(this);
+ return this;
}
std::string TernOpInit::getAsString() const {
@@ -1955,8 +1964,9 @@ std::string TernOpInit::getAsString() const {
", " + MHS->getAsString() + ", " + RHS->getAsString() + ")");
}
-static void ProfileFoldOpInit(FoldingSetNodeID &ID, Init *Start, Init *List,
- Init *A, Init *B, Init *Expr, const RecTy *Type) {
+static void ProfileFoldOpInit(FoldingSetNodeID &ID, const Init *Start,
+ const Init *List, const Init *A, const Init *B,
+ const Init *Expr, const RecTy *Type) {
ID.AddPointer(Start);
ID.AddPointer(List);
ID.AddPointer(A);
@@ -1965,14 +1975,15 @@ static void ProfileFoldOpInit(FoldingSetNodeID &ID, Init *Start, Init *List,
ID.AddPointer(Type);
}
-FoldOpInit *FoldOpInit::get(Init *Start, Init *List, Init *A, Init *B,
- Init *Expr, const RecTy *Type) {
+const FoldOpInit *FoldOpInit::get(const Init *Start, const Init *List,
+ const Init *A, const Init *B,
+ const Init *Expr, const RecTy *Type) {
FoldingSetNodeID ID;
ProfileFoldOpInit(ID, Start, List, A, B, Expr, Type);
detail::RecordKeeperImpl &RK = Start->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (FoldOpInit *I = RK.TheFoldOpInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const FoldOpInit *I = RK.TheFoldOpInitPool.FindNodeOrInsertPos(ID, IP))
return I;
FoldOpInit *I = new (RK.Allocator) FoldOpInit(Start, List, A, B, Expr, Type);
@@ -1984,10 +1995,10 @@ void FoldOpInit::Profile(FoldingSetNodeID &ID) const {
ProfileFoldOpInit(ID, Start, List, A, B, Expr, getType());
}
-Init *FoldOpInit::Fold(Record *CurRec) const {
- if (ListInit *LI = dyn_cast<ListInit>(List)) {
- Init *Accum = Start;
- for (Init *Elt : *LI) {
+const Init *FoldOpInit::Fold(const Record *CurRec) const {
+ if (const ListInit *LI = dyn_cast<ListInit>(List)) {
+ const Init *Accum = Start;
+ for (const Init *Elt : *LI) {
MapResolver R(CurRec);
R.set(A, Accum);
R.set(B, Elt);
@@ -1995,25 +2006,25 @@ Init *FoldOpInit::Fold(Record *CurRec) const {
}
return Accum;
}
- return const_cast<FoldOpInit *>(this);
+ return this;
}
-Init *FoldOpInit::resolveReferences(Resolver &R) const {
- Init *NewStart = Start->resolveReferences(R);
- Init *NewList = List->resolveReferences(R);
+const Init *FoldOpInit::resolveReferences(Resolver &R) const {
+ const Init *NewStart = Start->resolveReferences(R);
+ const Init *NewList = List->resolveReferences(R);
ShadowResolver SR(R);
SR.addShadow(A);
SR.addShadow(B);
- Init *NewExpr = Expr->resolveReferences(SR);
+ const Init *NewExpr = Expr->resolveReferences(SR);
if (Start == NewStart && List == NewList && Expr == NewExpr)
- return const_cast<FoldOpInit *>(this);
+ return this;
return get(NewStart, NewList, A, B, NewExpr, getType())
->Fold(R.getCurrentRecord());
}
-Init *FoldOpInit::getBit(unsigned Bit) const {
+const Init *FoldOpInit::getBit(unsigned Bit) const {
return VarBitInit::get(const_cast<FoldOpInit *>(this), Bit);
}
@@ -2025,19 +2036,19 @@ std::string FoldOpInit::getAsString() const {
}
static void ProfileIsAOpInit(FoldingSetNodeID &ID, const RecTy *CheckType,
- Init *Expr) {
+ const Init *Expr) {
ID.AddPointer(CheckType);
ID.AddPointer(Expr);
}
-IsAOpInit *IsAOpInit::get(const RecTy *CheckType, Init *Expr) {
+const IsAOpInit *IsAOpInit::get(const RecTy *CheckType, const Init *Expr) {
FoldingSetNodeID ID;
ProfileIsAOpInit(ID, CheckType, Expr);
detail::RecordKeeperImpl &RK = Expr->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (IsAOpInit *I = RK.TheIsAOpInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const IsAOpInit *I = RK.TheIsAOpInitPool.FindNodeOrInsertPos(ID, IP))
return I;
IsAOpInit *I = new (RK.Allocator) IsAOpInit(CheckType, Expr);
@@ -2049,8 +2060,8 @@ void IsAOpInit::Profile(FoldingSetNodeID &ID) const {
ProfileIsAOpInit(ID, CheckType, Expr);
}
-Init *IsAOpInit::Fold() const {
- if (TypedInit *TI = dyn_cast<TypedInit>(Expr)) {
+const Init *IsAOpInit::Fold() const {
+ if (const TypedInit *TI = dyn_cast<TypedInit>(Expr)) {
// Is the expression type known to be (a subclass of) the desired type?
if (TI->getType()->typeIsConvertibleTo(CheckType))
return IntInit::get(getRecordKeeper(), 1);
@@ -2066,17 +2077,17 @@ Init *IsAOpInit::Fold() const {
return IntInit::get(getRecordKeeper(), 0);
}
}
- return const_cast<IsAOpInit *>(this);
+ return this;
}
-Init *IsAOpInit::resolveReferences(Resolver &R) const {
- Init *NewExpr = Expr->resolveReferences(R);
+const Init *IsAOpInit::resolveReferences(Resolver &R) const {
+ const Init *NewExpr = Expr->resolveReferences(R);
if (Expr != NewExpr)
return get(CheckType, NewExpr)->Fold();
- return const_cast<IsAOpInit *>(this);
+ return this;
}
-Init *IsAOpInit::getBit(unsigned Bit) const {
+const Init *IsAOpInit::getBit(unsigned Bit) const {
return VarBitInit::get(const_cast<IsAOpInit *>(this), Bit);
}
@@ -2087,18 +2098,20 @@ std::string IsAOpInit::getAsString() const {
}
static void ProfileExistsOpInit(FoldingSetNodeID &ID, const RecTy *CheckType,
- Init *Expr) {
+ const Init *Expr) {
ID.AddPointer(CheckType);
ID.AddPointer(Expr);
}
-ExistsOpInit *ExistsOpInit::get(const RecTy *CheckType, Init *Expr) {
+const ExistsOpInit *ExistsOpInit::get(const RecTy *CheckType,
+ const Init *Expr) {
FoldingSetNodeID ID;
ProfileExistsOpInit(ID, CheckType, Expr);
detail::RecordKeeperImpl &RK = Expr->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (ExistsOpInit *I = RK.TheExistsOpInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const ExistsOpInit *I =
+ RK.TheExistsOpInitPool.FindNodeOrInsertPos(ID, IP))
return I;
ExistsOpInit *I = new (RK.Allocator) ExistsOpInit(CheckType, Expr);
@@ -2110,9 +2123,8 @@ void ExistsOpInit::Profile(FoldingSetNodeID &ID) const {
ProfileExistsOpInit(ID, CheckType, Expr);
}
-Init *ExistsOpInit::Fold(Record *CurRec, bool IsFinal) const {
- if (StringInit *Name = dyn_cast<StringInit>(Expr)) {
-
+const Init *ExistsOpInit::Fold(const Record *CurRec, bool IsFinal) const {
+ if (const StringInit *Name = dyn_cast<StringInit>(Expr)) {
// Look up all defined records to see if we can find one.
const Record *D = CheckType->getRecordKeeper().getDef(Name->getValue());
if (D) {
@@ -2139,19 +2151,18 @@ Init *ExistsOpInit::Fold(Record *CurRec, bool IsFinal) const {
if (IsFinal)
return IntInit::get(getRecordKeeper(), 0);
- return const_cast<ExistsOpInit *>(this);
}
- return const_cast<ExistsOpInit *>(this);
+ return this;
}
-Init *ExistsOpInit::resolveReferences(Resolver &R) const {
- Init *NewExpr = Expr->resolveReferences(R);
+const Init *ExistsOpInit::resolveReferences(Resolver &R) const {
+ const Init *NewExpr = Expr->resolveReferences(R);
if (Expr != NewExpr || R.isFinal())
return get(CheckType, NewExpr)->Fold(R.getCurrentRecord(), R.isFinal());
- return const_cast<ExistsOpInit *>(this);
+ return this;
}
-Init *ExistsOpInit::getBit(unsigned Bit) const {
+const Init *ExistsOpInit::getBit(unsigned Bit) const {
return VarBitInit::get(const_cast<ExistsOpInit *>(this), Bit);
}
@@ -2161,7 +2172,7 @@ std::string ExistsOpInit::getAsString() const {
.str();
}
-const RecTy *TypedInit::getFieldType(StringInit *FieldName) const {
+const RecTy *TypedInit::getFieldType(const StringInit *FieldName) const {
if (const RecordRecTy *RecordType = dyn_cast<RecordRecTy>(getType())) {
for (const Record *Rec : RecordType->getClasses()) {
if (const RecordVal *Field = Rec->getValue(FieldName))
@@ -2171,7 +2182,7 @@ const RecTy *TypedInit::getFieldType(StringInit *FieldName) const {
return nullptr;
}
-Init *TypedInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *TypedInit::convertInitializerTo(const RecTy *Ty) const {
if (getType() == Ty || getType()->typeIsA(Ty))
return const_cast<TypedInit *>(this);
@@ -2182,12 +2193,13 @@ Init *TypedInit::convertInitializerTo(const RecTy *Ty) const {
return nullptr;
}
-Init *TypedInit::convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
+const Init *
+TypedInit::convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
const BitsRecTy *T = dyn_cast<BitsRecTy>(getType());
if (!T) return nullptr; // Cannot subscript a non-bits variable.
unsigned NumBits = T->getNumBits();
- SmallVector<Init *, 16> NewBits;
+ SmallVector<const Init *, 16> NewBits;
NewBits.reserve(Bits.size());
for (unsigned Bit : Bits) {
if (Bit >= NumBits)
@@ -2198,12 +2210,12 @@ Init *TypedInit::convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
return BitsInit::get(getRecordKeeper(), NewBits);
}
-Init *TypedInit::getCastTo(const RecTy *Ty) const {
+const Init *TypedInit::getCastTo(const RecTy *Ty) const {
// Handle the common case quickly
if (getType() == Ty || getType()->typeIsA(Ty))
return const_cast<TypedInit *>(this);
- if (Init *Converted = convertInitializerTo(Ty)) {
+ if (const Init *Converted = convertInitializerTo(Ty)) {
assert(!isa<TypedInit>(Converted) ||
cast<TypedInit>(Converted)->getType()->typeIsA(Ty));
return Converted;
@@ -2216,12 +2228,12 @@ Init *TypedInit::getCastTo(const RecTy *Ty) const {
->Fold(nullptr);
}
-VarInit *VarInit::get(StringRef VN, const RecTy *T) {
- Init *Value = StringInit::get(T->getRecordKeeper(), VN);
+const VarInit *VarInit::get(StringRef VN, const RecTy *T) {
+ const Init *Value = StringInit::get(T->getRecordKeeper(), VN);
return VarInit::get(Value, T);
}
-VarInit *VarInit::get(Init *VN, const RecTy *T) {
+const VarInit *VarInit::get(const Init *VN, const RecTy *T) {
detail::RecordKeeperImpl &RK = T->getRecordKeeper().getImpl();
VarInit *&I = RK.TheVarInitPool[std::make_pair(T, VN)];
if (!I)
@@ -2230,23 +2242,23 @@ VarInit *VarInit::get(Init *VN, const RecTy *T) {
}
StringRef VarInit::getName() const {
- StringInit *NameString = cast<StringInit>(getNameInit());
+ const StringInit *NameString = cast<StringInit>(getNameInit());
return NameString->getValue();
}
-Init *VarInit::getBit(unsigned Bit) const {
+const Init *VarInit::getBit(unsigned Bit) const {
if (getType() == BitRecTy::get(getRecordKeeper()))
return const_cast<VarInit*>(this);
return VarBitInit::get(const_cast<VarInit*>(this), Bit);
}
-Init *VarInit::resolveReferences(Resolver &R) const {
- if (Init *Val = R.resolve(VarName))
+const Init *VarInit::resolveReferences(Resolver &R) const {
+ if (const Init *Val = R.resolve(VarName))
return Val;
- return const_cast<VarInit *>(this);
+ return this;
}
-VarBitInit *VarBitInit::get(TypedInit *T, unsigned B) {
+const VarBitInit *VarBitInit::get(const TypedInit *T, unsigned B) {
detail::RecordKeeperImpl &RK = T->getRecordKeeper().getImpl();
VarBitInit *&I = RK.TheVarBitInitPool[std::make_pair(T, B)];
if (!I)
@@ -2258,25 +2270,25 @@ std::string VarBitInit::getAsString() const {
return TI->getAsString() + "{" + utostr(Bit) + "}";
}
-Init *VarBitInit::resolveReferences(Resolver &R) const {
- Init *I = TI->resolveReferences(R);
+const Init *VarBitInit::resolveReferences(Resolver &R) const {
+ const Init *I = TI->resolveReferences(R);
if (TI != I)
return I->getBit(getBitNum());
- return const_cast<VarBitInit*>(this);
+ return this;
}
DefInit::DefInit(const Record *D)
: TypedInit(IK_DefInit, D->getType()), Def(D) {}
-Init *DefInit::convertInitializerTo(const RecTy *Ty) const {
+const Init *DefInit::convertInitializerTo(const RecTy *Ty) const {
if (auto *RRT = dyn_cast<RecordRecTy>(Ty))
if (getType()->typeIsConvertibleTo(RRT))
return const_cast<DefInit *>(this);
return nullptr;
}
-const RecTy *DefInit::getFieldType(StringInit *FieldName) const {
+const RecTy *DefInit::getFieldType(const StringInit *FieldName) const {
if (const RecordVal *RV = Def->getValue(FieldName))
return RV->getType();
return nullptr;
@@ -2285,11 +2297,11 @@ const RecTy *DefInit::getFieldType(StringInit *FieldName) const {
std::string DefInit::getAsString() const { return std::string(Def->getName()); }
static void ProfileVarDefInit(FoldingSetNodeID &ID, Record *Class,
- ArrayRef<ArgumentInit *> Args) {
+ ArrayRef<const ArgumentInit *> Args) {
ID.AddInteger(Args.size());
ID.AddPointer(Class);
- for (Init *I : Args)
+ for (const Init *I : Args)
ID.AddPointer(I);
}
@@ -2297,21 +2309,21 @@ VarDefInit::VarDefInit(SMLoc Loc, Record *Class, unsigned N)
: TypedInit(IK_VarDefInit, RecordRecTy::get(Class)), Loc(Loc), Class(Class),
NumArgs(N) {}
-VarDefInit *VarDefInit::get(SMLoc Loc, Record *Class,
- ArrayRef<ArgumentInit *> Args) {
+const VarDefInit *VarDefInit::get(SMLoc Loc, Record *Class,
+ ArrayRef<const ArgumentInit *> Args) {
FoldingSetNodeID ID;
ProfileVarDefInit(ID, Class, Args);
detail::RecordKeeperImpl &RK = Class->getRecords().getImpl();
void *IP = nullptr;
- if (VarDefInit *I = RK.TheVarDefInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const VarDefInit *I = RK.TheVarDefInitPool.FindNodeOrInsertPos(ID, IP))
return I;
void *Mem = RK.Allocator.Allocate(
- totalSizeToAlloc<ArgumentInit *>(Args.size()), alignof(VarDefInit));
+ totalSizeToAlloc<const ArgumentInit *>(Args.size()), alignof(VarDefInit));
VarDefInit *I = new (Mem) VarDefInit(Loc, Class, Args.size());
std::uninitialized_copy(Args.begin(), Args.end(),
- I->getTrailingObjects<ArgumentInit *>());
+ I->getTrailingObjects<const ArgumentInit *>());
RK.TheVarDefInitPool.InsertNode(I, IP);
return I;
}
@@ -2320,7 +2332,7 @@ void VarDefInit::Profile(FoldingSetNodeID &ID) const {
ProfileVarDefInit(ID, Class, args());
}
-DefInit *VarDefInit::instantiate() {
+const DefInit *VarDefInit::instantiate() {
if (Def)
return Def;
@@ -2340,10 +2352,10 @@ DefInit *VarDefInit::instantiate() {
NewRec->appendDumps(Class);
// Substitute and resolve template arguments
- ArrayRef<Init *> TArgs = Class->getTemplateArgs();
+ ArrayRef<const Init *> TArgs = Class->getTemplateArgs();
MapResolver R(NewRec);
- for (Init *Arg : TArgs) {
+ for (const Init *Arg : TArgs) {
R.set(Arg, NewRec->getValue(Arg)->getValue());
NewRec->removeValue(Arg);
}
@@ -2377,13 +2389,13 @@ DefInit *VarDefInit::instantiate() {
return Def = NewRec->getDefInit();
}
-Init *VarDefInit::resolveReferences(Resolver &R) const {
+const Init *VarDefInit::resolveReferences(Resolver &R) const {
TrackUnresolvedResolver UR(&R);
bool Changed = false;
- SmallVector<ArgumentInit *, 8> NewArgs;
+ SmallVector<const ArgumentInit *, 8> NewArgs;
NewArgs.reserve(args_size());
- for (ArgumentInit *Arg : args()) {
+ for (const ArgumentInit *Arg : args()) {
auto *NewArg = cast<ArgumentInit>(Arg->resolveReferences(UR));
NewArgs.push_back(NewArg);
Changed |= NewArg != Arg;
@@ -2392,29 +2404,29 @@ Init *VarDefInit::resolveReferences(Resolver &R) const {
if (Changed) {
auto *New = VarDefInit::get(Loc, Class, NewArgs);
if (!UR.foundUnresolved())
- return New->instantiate();
+ return const_cast<VarDefInit *>(New)->instantiate();
return New;
}
- return const_cast<VarDefInit *>(this);
+ return this;
}
-Init *VarDefInit::Fold() const {
+const Init *VarDefInit::Fold() const {
if (Def)
return Def;
TrackUnresolvedResolver R;
- for (Init *Arg : args())
+ for (const Init *Arg : args())
Arg->resolveReferences(R);
if (!R.foundUnresolved())
return const_cast<VarDefInit *>(this)->instantiate();
- return const_cast<VarDefInit *>(this);
+ return this;
}
std::string VarDefInit::getAsString() const {
std::string Result = Class->getNameInitAsString() + "<";
const char *sep = "";
- for (Init *Arg : args()) {
+ for (const Init *Arg : args()) {
Result += sep;
sep = ", ";
Result += Arg->getAsString();
@@ -2422,7 +2434,7 @@ std::string VarDefInit::getAsString() const {
return Result + ">";
}
-FieldInit *FieldInit::get(Init *R, StringInit *FN) {
+const FieldInit *FieldInit::get(const Init *R, const StringInit *FN) {
detail::RecordKeeperImpl &RK = R->getRecordKeeper().getImpl();
FieldInit *&I = RK.TheFieldInitPool[std::make_pair(R, FN)];
if (!I)
@@ -2430,28 +2442,28 @@ FieldInit *FieldInit::get(Init *R, StringInit *FN) {
return I;
}
-Init *FieldInit::getBit(unsigned Bit) const {
+const Init *FieldInit::getBit(unsigned Bit) const {
if (getType() == BitRecTy::get(getRecordKeeper()))
return const_cast<FieldInit*>(this);
return VarBitInit::get(const_cast<FieldInit*>(this), Bit);
}
-Init *FieldInit::resolveReferences(Resolver &R) const {
- Init *NewRec = Rec->resolveReferences(R);
+const Init *FieldInit::resolveReferences(Resolver &R) const {
+ const Init *NewRec = Rec->resolveReferences(R);
if (NewRec != Rec)
return FieldInit::get(NewRec, FieldName)->Fold(R.getCurrentRecord());
- return const_cast<FieldInit *>(this);
+ return this;
}
-Init *FieldInit::Fold(Record *CurRec) const {
- if (DefInit *DI = dyn_cast<DefInit>(Rec)) {
+const Init *FieldInit::Fold(const Record *CurRec) const {
+ if (const DefInit *DI = dyn_cast<DefInit>(Rec)) {
const Record *Def = DI->getDef();
if (Def == CurRec)
PrintFatalError(CurRec->getLoc(),
Twine("Attempting to access field '") +
FieldName->getAsUnquotedString() + "' of '" +
Rec->getAsString() + "' is a forbidden self-reference");
- Init *FieldVal = Def->getValue(FieldName)->getValue();
+ const Init *FieldVal = Def->getValue(FieldName)->getValue();
if (FieldVal->isConcrete())
return FieldVal;
}
@@ -2459,22 +2471,22 @@ Init *FieldInit::Fold(Record *CurRec) const {
}
bool FieldInit::isConcrete() const {
- if (DefInit *DI = dyn_cast<DefInit>(Rec)) {
- Init *FieldVal = DI->getDef()->getValue(FieldName)->getValue();
+ if (const DefInit *DI = dyn_cast<DefInit>(Rec)) {
+ const Init *FieldVal = DI->getDef()->getValue(FieldName)->getValue();
return FieldVal->isConcrete();
}
return false;
}
static void ProfileCondOpInit(FoldingSetNodeID &ID,
- ArrayRef<Init *> CondRange,
- ArrayRef<Init *> ValRange,
- const RecTy *ValType) {
+ ArrayRef<const Init *> CondRange,
+ ArrayRef<const Init *> ValRange,
+ const RecTy *ValType) {
assert(CondRange.size() == ValRange.size() &&
"Number of conditions and values must match!");
ID.AddPointer(ValType);
- ArrayRef<Init *>::iterator Case = CondRange.begin();
- ArrayRef<Init *>::iterator Val = ValRange.begin();
+ ArrayRef<const Init *>::iterator Case = CondRange.begin();
+ ArrayRef<const Init *>::iterator Val = ValRange.begin();
while (Case != CondRange.end()) {
ID.AddPointer(*Case++);
@@ -2483,13 +2495,15 @@ static void ProfileCondOpInit(FoldingSetNodeID &ID,
}
void CondOpInit::Profile(FoldingSetNodeID &ID) const {
- ProfileCondOpInit(ID, ArrayRef(getTrailingObjects<Init *>(), NumConds),
- ArrayRef(getTrailingObjects<Init *>() + NumConds, NumConds),
- ValType);
+ ProfileCondOpInit(
+ ID, ArrayRef(getTrailingObjects<const Init *>(), NumConds),
+ ArrayRef(getTrailingObjects<const Init *>() + NumConds, NumConds),
+ ValType);
}
-CondOpInit *CondOpInit::get(ArrayRef<Init *> CondRange,
- ArrayRef<Init *> ValRange, const RecTy *Ty) {
+const CondOpInit *CondOpInit::get(ArrayRef<const Init *> CondRange,
+ ArrayRef<const Init *> ValRange,
+ const RecTy *Ty) {
assert(CondRange.size() == ValRange.size() &&
"Number of conditions and values must match!");
@@ -2498,33 +2512,34 @@ CondOpInit *CondOpInit::get(ArrayRef<Init *> CondRange,
detail::RecordKeeperImpl &RK = Ty->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (CondOpInit *I = RK.TheCondOpInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const CondOpInit *I = RK.TheCondOpInitPool.FindNodeOrInsertPos(ID, IP))
return I;
void *Mem = RK.Allocator.Allocate(
- totalSizeToAlloc<Init *>(2 * CondRange.size()), alignof(BitsInit));
+ totalSizeToAlloc<const Init *>(2 * CondRange.size()), alignof(BitsInit));
CondOpInit *I = new(Mem) CondOpInit(CondRange.size(), Ty);
std::uninitialized_copy(CondRange.begin(), CondRange.end(),
- I->getTrailingObjects<Init *>());
+ I->getTrailingObjects<const Init *>());
std::uninitialized_copy(ValRange.begin(), ValRange.end(),
- I->getTrailingObjects<Init *>()+CondRange.size());
+ I->getTrailingObjects<const Init *>() +
+ CondRange.size());
RK.TheCondOpInitPool.InsertNode(I, IP);
return I;
}
-Init *CondOpInit::resolveReferences(Resolver &R) const {
- SmallVector<Init*, 4> NewConds;
+const Init *CondOpInit::resolveReferences(Resolver &R) const {
+ SmallVector<const Init *, 4> NewConds;
bool Changed = false;
for (const Init *Case : getConds()) {
- Init *NewCase = Case->resolveReferences(R);
+ const Init *NewCase = Case->resolveReferences(R);
NewConds.push_back(NewCase);
Changed |= NewCase != Case;
}
- SmallVector<Init*, 4> NewVals;
+ SmallVector<const Init *, 4> NewVals;
for (const Init *Val : getVals()) {
- Init *NewVal = Val->resolveReferences(R);
+ const Init *NewVal = Val->resolveReferences(R);
NewVals.push_back(NewVal);
Changed |= NewVal != Val;
}
@@ -2533,16 +2548,16 @@ Init *CondOpInit::resolveReferences(Resolver &R) const {
return (CondOpInit::get(NewConds, NewVals,
getValType()))->Fold(R.getCurrentRecord());
- return const_cast<CondOpInit *>(this);
+ return this;
}
-Init *CondOpInit::Fold(Record *CurRec) const {
+const Init *CondOpInit::Fold(const Record *CurRec) const {
RecordKeeper &RK = getRecordKeeper();
- for ( unsigned i = 0; i < NumConds; ++i) {
- Init *Cond = getCond(i);
- Init *Val = getVal(i);
+ for (unsigned i = 0; i < NumConds; ++i) {
+ const Init *Cond = getCond(i);
+ const Init *Val = getVal(i);
- if (IntInit *CondI = dyn_cast_or_null<IntInit>(
+ if (const IntInit *CondI = dyn_cast_or_null<IntInit>(
Cond->convertInitializerTo(IntRecTy::get(RK)))) {
if (CondI->getValue())
return Val->convertInitializerTo(getValType());
@@ -2593,18 +2608,19 @@ std::string CondOpInit::getAsString() const {
return Result + ")";
}
-Init *CondOpInit::getBit(unsigned Bit) const {
+const Init *CondOpInit::getBit(unsigned Bit) const {
return VarBitInit::get(const_cast<CondOpInit *>(this), Bit);
}
-static void ProfileDagInit(FoldingSetNodeID &ID, Init *V, StringInit *VN,
- ArrayRef<Init *> ArgRange,
- ArrayRef<StringInit *> NameRange) {
+static void ProfileDagInit(FoldingSetNodeID &ID, const Init *V,
+ const StringInit *VN,
+ ArrayRef<const Init *> ArgRange,
+ ArrayRef<const StringInit *> NameRange) {
ID.AddPointer(V);
ID.AddPointer(VN);
- ArrayRef<Init *>::iterator Arg = ArgRange.begin();
- ArrayRef<StringInit *>::iterator Name = NameRange.begin();
+ ArrayRef<const Init *>::iterator Arg = ArgRange.begin();
+ ArrayRef<const StringInit *>::iterator Name = NameRange.begin();
while (Arg != ArgRange.end()) {
assert(Name != NameRange.end() && "Arg name underflow!");
ID.AddPointer(*Arg++);
@@ -2613,34 +2629,36 @@ static void ProfileDagInit(FoldingSetNodeID &ID, Init *V, StringInit *VN,
assert(Name == NameRange.end() && "Arg name overflow!");
}
-DagInit *DagInit::get(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
- ArrayRef<StringInit *> NameRange) {
+const DagInit *DagInit::get(const Init *V, const StringInit *VN,
+ ArrayRef<const Init *> ArgRange,
+ ArrayRef<const StringInit *> NameRange) {
assert(ArgRange.size() == NameRange.size());
FoldingSetNodeID ID;
ProfileDagInit(ID, V, VN, ArgRange, NameRange);
detail::RecordKeeperImpl &RK = V->getRecordKeeper().getImpl();
void *IP = nullptr;
- if (DagInit *I = RK.TheDagInitPool.FindNodeOrInsertPos(ID, IP))
+ if (const DagInit *I = RK.TheDagInitPool.FindNodeOrInsertPos(ID, IP))
return I;
- void *Mem = RK.Allocator.Allocate(
- totalSizeToAlloc<Init *, StringInit *>(ArgRange.size(), NameRange.size()),
- alignof(BitsInit));
+ void *Mem =
+ RK.Allocator.Allocate(totalSizeToAlloc<const Init *, const StringInit *>(
+ ArgRange.size(), NameRange.size()),
+ alignof(BitsInit));
DagInit *I = new (Mem) DagInit(V, VN, ArgRange.size(), NameRange.size());
std::uninitialized_copy(ArgRange.begin(), ArgRange.end(),
- I->getTrailingObjects<Init *>());
+ I->getTrailingObjects<const Init *>());
std::uninitialized_copy(NameRange.begin(), NameRange.end(),
- I->getTrailingObjects<StringInit *>());
+ I->getTrailingObjects<const StringInit *>());
RK.TheDagInitPool.InsertNode(I, IP);
return I;
}
-DagInit *
-DagInit::get(Init *V, StringInit *VN,
- ArrayRef<std::pair<Init*, StringInit*>> args) {
- SmallVector<Init *, 8> Args;
- SmallVector<StringInit *, 8> Names;
+const DagInit *
+DagInit::get(const Init *V, const StringInit *VN,
+ ArrayRef<std::pair<const Init *, const StringInit *>> args) {
+ SmallVector<const Init *, 8> Args;
+ SmallVector<const StringInit *, 8> Names;
for (const auto &Arg : args) {
Args.push_back(Arg.first);
@@ -2651,13 +2669,13 @@ DagInit::get(Init *V, StringInit *VN,
}
void DagInit::Profile(FoldingSetNodeID &ID) const {
- ProfileDagInit(ID, Val, ValName,
- ArrayRef(getTrailingObjects<Init *>(), NumArgs),
- ArrayRef(getTrailingObjects<StringInit *>(), NumArgNames));
+ ProfileDagInit(
+ ID, Val, ValName, ArrayRef(getTrailingObjects<const Init *>(), NumArgs),
+ ArrayRef(getTrailingObjects<const StringInit *>(), NumArgNames));
}
const Record *DagInit::getOperatorAsDef(ArrayRef<SMLoc> Loc) const {
- if (DefInit *DefI = dyn_cast<DefInit>(Val))
+ if (const DefInit *DefI = dyn_cast<DefInit>(Val))
return DefI->getDef();
PrintFatalError(Loc, "Expected record as operator");
return nullptr;
@@ -2665,28 +2683,28 @@ const Record *DagInit::getOperatorAsDef(ArrayRef<SMLoc> Loc) const {
std::optional<unsigned> DagInit::getArgNo(StringRef Name) const {
for (unsigned i = 0, e = getNumArgs(); i < e; ++i) {
- StringInit *ArgName = getArgName(i);
+ const StringInit *ArgName = getArgName(i);
if (ArgName && ArgName->getValue() == Name)
return i;
}
return std::nullopt;
}
-Init *DagInit::resolveReferences(Resolver &R) const {
- SmallVector<Init*, 8> NewArgs;
+const Init *DagInit::resolveReferences(Resolver &R) const {
+ SmallVector<const Init *, 8> NewArgs;
NewArgs.reserve(arg_size());
bool ArgsChanged = false;
for (const Init *Arg : getArgs()) {
- Init *NewArg = Arg->resolveReferences(R);
+ const Init *NewArg = Arg->resolveReferences(R);
NewArgs.push_back(NewArg);
ArgsChanged |= NewArg != Arg;
}
- Init *Op = Val->resolveReferences(R);
+ const Init *Op = Val->resolveReferences(R);
if (Op != Val || ArgsChanged)
return DagInit::get(Op, ValName, NewArgs, getArgNames());
- return const_cast<DagInit *>(this);
+ return this;
}
bool DagInit::isConcrete() const {
@@ -2718,7 +2736,7 @@ std::string DagInit::getAsString() const {
// Other implementations
//===----------------------------------------------------------------------===//
-RecordVal::RecordVal(Init *N, const RecTy *T, FieldKind K)
+RecordVal::RecordVal(const Init *N, const RecTy *T, FieldKind K)
: Name(N), TyAndKind(T, K) {
setValue(UnsetInit::get(N->getRecordKeeper()));
assert(Value && "Cannot create unset value for current type!");
@@ -2726,7 +2744,7 @@ RecordVal::RecordVal(Init *N, const RecTy *T, FieldKind K)
// This constructor accepts the same arguments as the above, but also
// a source location.
-RecordVal::RecordVal(Init *N, SMLoc Loc, const RecTy *T, FieldKind K)
+RecordVal::RecordVal(const Init *N, SMLoc Loc, const RecTy *T, FieldKind K)
: Name(N), Loc(Loc), TyAndKind(T, K) {
setValue(UnsetInit::get(N->getRecordKeeper()));
assert(Value && "Cannot create unset value for current type!");
@@ -2751,7 +2769,7 @@ std::string RecordVal::getPrintType() const {
}
}
-bool RecordVal::setValue(Init *V) {
+bool RecordVal::setValue(const Init *V) {
if (V) {
Value = V->getCastTo(getType());
if (Value) {
@@ -2759,7 +2777,7 @@ bool RecordVal::setValue(Init *V) {
cast<TypedInit>(Value)->getType()->typeIsA(getType()));
if (const BitsRecTy *BTy = dyn_cast<BitsRecTy>(getType())) {
if (!isa<BitsInit>(Value)) {
- SmallVector<Init *, 64> Bits;
+ SmallVector<const Init *, 64> Bits;
Bits.reserve(BTy->getNumBits());
for (unsigned I = 0, E = BTy->getNumBits(); I < E; ++I)
Bits.push_back(Value->getBit(I));
@@ -2775,7 +2793,7 @@ bool RecordVal::setValue(Init *V) {
// This version of setValue takes a source location and resets the
// location in the RecordVal.
-bool RecordVal::setValue(Init *V, SMLoc NewLoc) {
+bool RecordVal::setValue(const Init *V, SMLoc NewLoc) {
Loc = NewLoc;
if (V) {
Value = V->getCastTo(getType());
@@ -2784,7 +2802,7 @@ bool RecordVal::setValue(Init *V, SMLoc NewLoc) {
cast<TypedInit>(Value)->getType()->typeIsA(getType()));
if (const BitsRecTy *BTy = dyn_cast<BitsRecTy>(getType())) {
if (!isa<BitsInit>(Value)) {
- SmallVector<Init *, 64> Bits;
+ SmallVector<const Init *, 64> Bits;
Bits.reserve(BTy->getNumBits());
for (unsigned I = 0, E = BTy->getNumBits(); I < E; ++I)
Bits.push_back(Value->getBit(I));
@@ -2847,7 +2865,7 @@ unsigned Record::getNewUID(RecordKeeper &RK) {
return RK.getImpl().LastRecordID++;
}
-void Record::setName(Init *NewName) {
+void Record::setName(const Init *NewName) {
Name = NewName;
checkName();
// DO NOT resolve record values to the name at this point because
@@ -2893,8 +2911,8 @@ void Record::getDirectSuperClasses(
}
void Record::resolveReferences(Resolver &R, const RecordVal *SkipVal) {
- Init *OldName = getNameInit();
- Init *NewName = Name->resolveReferences(R);
+ const Init *OldName = getNameInit();
+ const Init *NewName = Name->resolveReferences(R);
if (NewName != OldName) {
// Re-register with RecordKeeper.
setName(NewName);
@@ -2904,11 +2922,11 @@ void Record::resolveReferences(Resolver &R, const RecordVal *SkipVal) {
for (RecordVal &Value : Values) {
if (SkipVal == &Value) // Skip resolve the same field as the given one
continue;
- if (Init *V = Value.getValue()) {
- Init *VR = V->resolveReferences(R);
+ if (const Init *V = Value.getValue()) {
+ const Init *VR = V->resolveReferences(R);
if (Value.setValue(VR)) {
std::string Type;
- if (TypedInit *VRT = dyn_cast<TypedInit>(VR))
+ if (const TypedInit *VRT = dyn_cast<TypedInit>(VR))
Type =
(Twine("of type '") + VRT->getType()->getAsString() + "' ").str();
PrintFatalError(
@@ -2924,19 +2942,19 @@ void Record::resolveReferences(Resolver &R, const RecordVal *SkipVal) {
// Resolve the assertion expressions.
for (auto &Assertion : Assertions) {
- Init *Value = Assertion.Condition->resolveReferences(R);
+ const Init *Value = Assertion.Condition->resolveReferences(R);
Assertion.Condition = Value;
Value = Assertion.Message->resolveReferences(R);
Assertion.Message = Value;
}
// Resolve the dump expressions.
for (auto &Dump : Dumps) {
- Init *Value = Dump.Message->resolveReferences(R);
+ const Init *Value = Dump.Message->resolveReferences(R);
Dump.Message = Value;
}
}
-void Record::resolveReferences(Init *NewName) {
+void Record::resolveReferences(const Init *NewName) {
RecordResolver R(*this);
R.setName(NewName);
R.setFinal(true);
@@ -2950,7 +2968,7 @@ LLVM_DUMP_METHOD void Record::dump() const { errs() << *this; }
raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) {
OS << R.getNameInitAsString();
- ArrayRef<Init *> TArgs = R.getTemplateArgs();
+ ArrayRef<const Init *> TArgs = R.getTemplateArgs();
if (!TArgs.empty()) {
OS << "<";
bool NeedComma = false;
@@ -2991,7 +3009,7 @@ SMLoc Record::getFieldLoc(StringRef FieldName) const {
return R->getLoc();
}
-Init *Record::getValueInit(StringRef FieldName) const {
+const Init *Record::getValueInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
@@ -3015,7 +3033,7 @@ Record::getValueAsOptionalString(StringRef FieldName) const {
if (isa<UnsetInit>(R->getValue()))
return std::nullopt;
- if (StringInit *SI = dyn_cast<StringInit>(R->getValue()))
+ if (const StringInit *SI = dyn_cast<StringInit>(R->getValue()))
return SI->getValue();
PrintFatalError(getLoc(),
@@ -3023,25 +3041,25 @@ Record::getValueAsOptionalString(StringRef FieldName) const {
"' exists but does not have a string initializer!");
}
-BitsInit *Record::getValueAsBitsInit(StringRef FieldName) const {
+const BitsInit *Record::getValueAsBitsInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (BitsInit *BI = dyn_cast<BitsInit>(R->getValue()))
+ if (const BitsInit *BI = dyn_cast<BitsInit>(R->getValue()))
return BI;
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName +
"' exists but does not have a bits value");
}
-ListInit *Record::getValueAsListInit(StringRef FieldName) const {
+const ListInit *Record::getValueAsListInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (ListInit *LI = dyn_cast<ListInit>(R->getValue()))
+ if (const ListInit *LI = dyn_cast<ListInit>(R->getValue()))
return LI;
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName +
"' exists but does not have a list value");
@@ -3049,7 +3067,7 @@ ListInit *Record::getValueAsListInit(StringRef FieldName) const {
std::vector<const Record *>
Record::getValueAsListOfDefs(StringRef FieldName) const {
- ListInit *List = getValueAsListInit(FieldName);
+ const ListInit *List = getValueAsListInit(FieldName);
std::vector<const Record *> Defs;
for (const Init *I : List->getValues()) {
if (const DefInit *DI = dyn_cast<DefInit>(I))
@@ -3068,7 +3086,7 @@ int64_t Record::getValueAsInt(StringRef FieldName) const {
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (IntInit *II = dyn_cast<IntInit>(R->getValue()))
+ if (const IntInit *II = dyn_cast<IntInit>(R->getValue()))
return II->getValue();
PrintFatalError(getLoc(), Twine("Record `") + getName() + "', field `" +
FieldName +
@@ -3078,10 +3096,10 @@ int64_t Record::getValueAsInt(StringRef FieldName) const {
std::vector<int64_t>
Record::getValueAsListOfInts(StringRef FieldName) const {
- ListInit *List = getValueAsListInit(FieldName);
+ const ListInit *List = getValueAsListInit(FieldName);
std::vector<int64_t> Ints;
- for (Init *I : List->getValues()) {
- if (IntInit *II = dyn_cast<IntInit>(I))
+ for (const Init *I : List->getValues()) {
+ if (const IntInit *II = dyn_cast<IntInit>(I))
Ints.push_back(II->getValue());
else
PrintFatalError(getLoc(),
@@ -3094,10 +3112,10 @@ Record::getValueAsListOfInts(StringRef FieldName) const {
std::vector<StringRef>
Record::getValueAsListOfStrings(StringRef FieldName) const {
- ListInit *List = getValueAsListInit(FieldName);
+ const ListInit *List = getValueAsListInit(FieldName);
std::vector<StringRef> Strings;
- for (Init *I : List->getValues()) {
- if (StringInit *SI = dyn_cast<StringInit>(I))
+ for (const Init *I : List->getValues()) {
+ if (const StringInit *SI = dyn_cast<StringInit>(I))
Strings.push_back(SI->getValue());
else
PrintFatalError(getLoc(),
@@ -3114,7 +3132,7 @@ const Record *Record::getValueAsDef(StringRef FieldName) const {
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (DefInit *DI = dyn_cast<DefInit>(R->getValue()))
+ if (const DefInit *DI = dyn_cast<DefInit>(R->getValue()))
return DI->getDef();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
FieldName + "' does not have a def initializer!");
@@ -3126,7 +3144,7 @@ const Record *Record::getValueAsOptionalDef(StringRef FieldName) const {
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (DefInit *DI = dyn_cast<DefInit>(R->getValue()))
+ if (const DefInit *DI = dyn_cast<DefInit>(R->getValue()))
return DI->getDef();
if (isa<UnsetInit>(R->getValue()))
return nullptr;
@@ -3140,7 +3158,7 @@ bool Record::getValueAsBit(StringRef FieldName) const {
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (BitInit *BI = dyn_cast<BitInit>(R->getValue()))
+ if (const BitInit *BI = dyn_cast<BitInit>(R->getValue()))
return BI->getValue();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
FieldName + "' does not have a bit initializer!");
@@ -3157,19 +3175,19 @@ bool Record::getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const {
return false;
}
Unset = false;
- if (BitInit *BI = dyn_cast<BitInit>(R->getValue()))
+ if (const BitInit *BI = dyn_cast<BitInit>(R->getValue()))
return BI->getValue();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
FieldName + "' does not have a bit initializer!");
}
-DagInit *Record::getValueAsDag(StringRef FieldName) const {
+const DagInit *Record::getValueAsDag(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- if (DagInit *DI = dyn_cast<DagInit>(R->getValue()))
+ if (const DagInit *DI = dyn_cast<DagInit>(R->getValue()))
return DI;
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
FieldName + "' does not have a dag initializer!");
@@ -3185,8 +3203,8 @@ void Record::checkRecordAssertions() {
bool AnyFailed = false;
for (const auto &Assertion : getAssertions()) {
- Init *Condition = Assertion.Condition->resolveReferences(R);
- Init *Message = Assertion.Message->resolveReferences(R);
+ const Init *Condition = Assertion.Condition->resolveReferences(R);
+ const Init *Message = Assertion.Message->resolveReferences(R);
AnyFailed |= CheckAssert(Assertion.Loc, Condition, Message);
}
@@ -3203,7 +3221,7 @@ void Record::emitRecordDumps() {
R.setFinal(true);
for (const auto &Dump : getDumps()) {
- Init *Message = Dump.Message->resolveReferences(R);
+ const Init *Message = Dump.Message->resolveReferences(R);
dumpMessage(Dump.Loc, Message);
}
}
@@ -3241,7 +3259,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const RecordKeeper &RK) {
/// GetNewAnonymousName - Generate a unique anonymous name that can be used as
/// an identifier.
-Init *RecordKeeper::getNewAnonymousName() {
+const Init *RecordKeeper::getNewAnonymousName() {
return AnonymousNameInit::get(*this, getImpl().AnonCounter++);
}
@@ -3289,12 +3307,12 @@ void RecordKeeper::dumpAllocationStats(raw_ostream &OS) const {
Impl->dumpAllocationStats(OS);
}
-Init *MapResolver::resolve(Init *VarName) {
+const Init *MapResolver::resolve(const Init *VarName) {
auto It = Map.find(VarName);
if (It == Map.end())
return nullptr;
- Init *I = It->second.V;
+ const Init *I = It->second.V;
if (!It->second.Resolved && Map.size() > 1) {
// Resolve mutual references among the mapped variables, but prevent
@@ -3307,15 +3325,15 @@ Init *MapResolver::resolve(Init *VarName) {
return I;
}
-Init *RecordResolver::resolve(Init *VarName) {
- Init *Val = Cache.lookup(VarName);
+const Init *RecordResolver::resolve(const Init *VarName) {
+ const Init *Val = Cache.lookup(VarName);
if (Val)
return Val;
if (llvm::is_contained(Stack, VarName))
return nullptr; // prevent infinite recursion
- if (RecordVal *RV = getCurrentRecord()->getValue(VarName)) {
+ if (const RecordVal *RV = getCurrentRecord()->getValue(VarName)) {
if (!isa<UnsetInit>(RV->getValue())) {
Val = RV->getValue();
Stack.push_back(VarName);
@@ -3332,8 +3350,8 @@ Init *RecordResolver::resolve(Init *VarName) {
return Val;
}
-Init *TrackUnresolvedResolver::resolve(Init *VarName) {
- Init *I = nullptr;
+const Init *TrackUnresolvedResolver::resolve(const Init *VarName) {
+ const Init *I = nullptr;
if (R) {
I = R->resolve(VarName);
@@ -3352,8 +3370,7 @@ Init *TrackUnresolvedResolver::resolve(Init *VarName) {
return I;
}
-Init *HasReferenceResolver::resolve(Init *VarName)
-{
+const Init *HasReferenceResolver::resolve(const Init *VarName) {
if (VarName == VarNameToTrack)
Found = true;
return nullptr;
diff --git a/llvm/lib/TableGen/TGLexer.h b/llvm/lib/TableGen/TGLexer.h
index 4fa4d84..9a6874c8 100644
--- a/llvm/lib/TableGen/TGLexer.h
+++ b/llvm/lib/TableGen/TGLexer.h
@@ -80,7 +80,6 @@ enum TokKind {
Code,
Dag,
ElseKW,
- FalseKW,
Field,
In,
Include,
@@ -88,7 +87,6 @@ enum TokKind {
List,
String,
Then,
- TrueKW,
// Object start tokens.
OBJECT_START_FIRST,
diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp
index aed4f3f..97a7e68 100644
--- a/llvm/lib/TableGen/TGParser.cpp
+++ b/llvm/lib/TableGen/TGParser.cpp
@@ -35,7 +35,7 @@ namespace llvm {
struct SubClassReference {
SMRange RefRange;
Record *Rec = nullptr;
- SmallVector<ArgumentInit *, 4> TemplateArgs;
+ SmallVector<const ArgumentInit *, 4> TemplateArgs;
SubClassReference() = default;
@@ -45,7 +45,7 @@ struct SubClassReference {
struct SubMultiClassReference {
SMRange RefRange;
MultiClass *MC = nullptr;
- SmallVector<ArgumentInit *, 4> TemplateArgs;
+ SmallVector<const ArgumentInit *, 4> TemplateArgs;
SubMultiClassReference() = default;
@@ -60,7 +60,7 @@ LLVM_DUMP_METHOD void SubMultiClassReference::dump() const {
MC->dump();
errs() << "Template args:\n";
- for (Init *TA : TemplateArgs)
+ for (const Init *TA : TemplateArgs)
TA->dump();
}
#endif
@@ -68,9 +68,9 @@ LLVM_DUMP_METHOD void SubMultiClassReference::dump() const {
} // end namespace llvm
static bool checkBitsConcrete(Record &R, const RecordVal &RV) {
- BitsInit *BV = cast<BitsInit>(RV.getValue());
+ const BitsInit *BV = cast<BitsInit>(RV.getValue());
for (unsigned i = 0, e = BV->getNumBits(); i != e; ++i) {
- Init *Bit = BV->getBit(i);
+ const Init *Bit = BV->getBit(i);
bool IsReference = false;
if (auto VBI = dyn_cast<VarBitInit>(Bit)) {
if (auto VI = dyn_cast<VarInit>(VBI->getBitVar())) {
@@ -95,7 +95,7 @@ static void checkConcrete(Record &R) {
if (RV.isNonconcreteOK())
continue;
- if (Init *V = RV.getValue()) {
+ if (const Init *V = RV.getValue()) {
bool Ok = isa<BitsInit>(V) ? checkBitsConcrete(R, RV) : V->isConcrete();
if (!Ok) {
PrintError(R.getLoc(),
@@ -110,43 +110,45 @@ static void checkConcrete(Record &R) {
/// Return an Init with a qualifier prefix referring
/// to CurRec's name.
-static Init *QualifyName(Record &CurRec, Init *Name) {
+static const Init *QualifyName(Record &CurRec, const Init *Name) {
RecordKeeper &RK = CurRec.getRecords();
- Init *NewName = BinOpInit::getStrConcat(
+ const Init *NewName = BinOpInit::getStrConcat(
CurRec.getNameInit(),
StringInit::get(RK, CurRec.isMultiClass() ? "::" : ":"));
NewName = BinOpInit::getStrConcat(NewName, Name);
- if (BinOpInit *BinOp = dyn_cast<BinOpInit>(NewName))
+ if (const BinOpInit *BinOp = dyn_cast<BinOpInit>(NewName))
NewName = BinOp->Fold(&CurRec);
return NewName;
}
-static Init *QualifyName(MultiClass *MC, Init *Name) {
+static const Init *QualifyName(MultiClass *MC, const Init *Name) {
return QualifyName(MC->Rec, Name);
}
/// Return the qualified version of the implicit 'NAME' template argument.
-static Init *QualifiedNameOfImplicitName(Record &Rec) {
+static const Init *QualifiedNameOfImplicitName(Record &Rec) {
return QualifyName(Rec, StringInit::get(Rec.getRecords(), "NAME"));
}
-static Init *QualifiedNameOfImplicitName(MultiClass *MC) {
+static const Init *QualifiedNameOfImplicitName(MultiClass *MC) {
return QualifiedNameOfImplicitName(MC->Rec);
}
-Init *TGVarScope::getVar(RecordKeeper &Records, MultiClass *ParsingMultiClass,
- StringInit *Name, SMRange NameLoc,
- bool TrackReferenceLocs) const {
+const Init *TGVarScope::getVar(RecordKeeper &Records,
+ MultiClass *ParsingMultiClass,
+ const StringInit *Name, SMRange NameLoc,
+ bool TrackReferenceLocs) const {
// First, we search in local variables.
auto It = Vars.find(Name->getValue());
if (It != Vars.end())
return It->second;
- auto FindValueInArgs = [&](Record *Rec, StringInit *Name) -> Init * {
+ auto FindValueInArgs = [&](Record *Rec,
+ const StringInit *Name) -> const Init * {
if (!Rec)
return nullptr;
- Init *ArgName = QualifyName(*Rec, Name);
+ const Init *ArgName = QualifyName(*Rec, Name);
if (Rec->isTemplateArg(ArgName)) {
RecordVal *RV = Rec->getValue(ArgName);
assert(RV && "Template arg doesn't exist??");
@@ -184,7 +186,7 @@ Init *TGVarScope::getVar(RecordKeeper &Records, MultiClass *ParsingMultiClass,
case SK_ForeachLoop: {
// The variable is a loop iterator?
if (CurLoop->IterVar) {
- VarInit *IterVar = dyn_cast<VarInit>(CurLoop->IterVar);
+ const VarInit *IterVar = dyn_cast<VarInit>(CurLoop->IterVar);
if (IterVar && IterVar->getNameInit() == Name)
return IterVar;
}
@@ -226,8 +228,8 @@ bool TGParser::AddValue(Record *CurRec, SMLoc Loc, const RecordVal &RV) {
/// SetValue -
/// Return true on error, false on success.
-bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
- ArrayRef<unsigned> BitList, Init *V,
+bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const Init *ValName,
+ ArrayRef<unsigned> BitList, const Init *V,
bool AllowSelfAssignment, bool OverrideDefLoc) {
if (!V) return false;
@@ -241,7 +243,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
// Do not allow assignments like 'X = X'. This will just cause infinite loops
// in the resolution machinery.
if (BitList.empty())
- if (VarInit *VI = dyn_cast<VarInit>(V))
+ if (const VarInit *VI = dyn_cast<VarInit>(V))
if (VI->getNameInit() == ValName && !AllowSelfAssignment)
return Error(Loc, "Recursion / self-assignment forbidden");
@@ -250,17 +252,17 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
// initializer.
//
if (!BitList.empty()) {
- BitsInit *CurVal = dyn_cast<BitsInit>(RV->getValue());
+ const BitsInit *CurVal = dyn_cast<BitsInit>(RV->getValue());
if (!CurVal)
return Error(Loc, "Value '" + ValName->getAsUnquotedString() +
"' is not a bits type");
// Convert the incoming value to a bits type of the appropriate size...
- Init *BI = V->getCastTo(BitsRecTy::get(Records, BitList.size()));
+ const Init *BI = V->getCastTo(BitsRecTy::get(Records, BitList.size()));
if (!BI)
return Error(Loc, "Initializer is not compatible with bit range");
- SmallVector<Init *, 16> NewBits(CurVal->getNumBits());
+ SmallVector<const Init *, 16> NewBits(CurVal->getNumBits());
// Loop over bits, assigning values as appropriate.
for (unsigned i = 0, e = BitList.size(); i != e; ++i) {
@@ -280,10 +282,10 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
if (OverrideDefLoc ? RV->setValue(V, Loc) : RV->setValue(V)) {
std::string InitType;
- if (BitsInit *BI = dyn_cast<BitsInit>(V))
+ if (const BitsInit *BI = dyn_cast<BitsInit>(V))
InitType = (Twine("' of type bit initializer with length ") +
Twine(BI->getNumBits())).str();
- else if (TypedInit *TI = dyn_cast<TypedInit>(V))
+ else if (const TypedInit *TI = dyn_cast<TypedInit>(V))
InitType = (Twine("' of type '") + TI->getType()->getAsString()).str();
return Error(Loc, "Field '" + ValName->getAsUnquotedString() +
"' of type '" + RV->getType()->getAsString() +
@@ -316,7 +318,7 @@ bool TGParser::AddSubClass(Record *CurRec, SubClassReference &SubClass) {
// Copy the subclass record's dumps to the new record.
CurRec->appendDumps(SC);
- Init *Name;
+ const Init *Name;
if (CurRec->isClass())
Name = VarInit::get(QualifiedNameOfImplicitName(*CurRec),
StringRecTy::get(Records));
@@ -427,7 +429,7 @@ bool TGParser::resolve(const ForeachLoop &Loop, SubstStack &Substs,
MapResolver R;
for (const auto &S : Substs)
R.set(S.first, S.second);
- Init *List = Loop.ListValue->resolveReferences(R);
+ const Init *List = Loop.ListValue->resolveReferences(R);
// For if-then-else blocks, we lower to a foreach loop whose list is a
// ternary selection between lists of different length. Since we don't
@@ -437,17 +439,17 @@ bool TGParser::resolve(const ForeachLoop &Loop, SubstStack &Substs,
// e.g. !if(!exists<SchedWrite>("__does_not_exist__"), [1], [])
if (auto *TI = dyn_cast<TernOpInit>(List);
TI && TI->getOpcode() == TernOpInit::IF && Final) {
- Init *OldLHS = TI->getLHS();
+ const Init *OldLHS = TI->getLHS();
R.setFinal(true);
- Init *LHS = OldLHS->resolveReferences(R);
+ const Init *LHS = OldLHS->resolveReferences(R);
if (LHS == OldLHS) {
PrintError(Loop.Loc,
Twine("unable to resolve if condition '") +
LHS->getAsString() + "' at end of containing scope");
return true;
}
- Init *MHS = TI->getMHS();
- Init *RHS = TI->getRHS();
+ const Init *MHS = TI->getMHS();
+ const Init *RHS = TI->getRHS();
List = TernOpInit::get(TernOpInit::IF, LHS, MHS, RHS, TI->getType())
->Fold(nullptr);
}
@@ -496,8 +498,8 @@ bool TGParser::resolve(const std::vector<RecordsEntry> &Source,
MapResolver R;
for (const auto &S : Substs)
R.set(S.first, S.second);
- Init *Condition = E.Assertion->Condition->resolveReferences(R);
- Init *Message = E.Assertion->Message->resolveReferences(R);
+ const Init *Condition = E.Assertion->Condition->resolveReferences(R);
+ const Init *Message = E.Assertion->Message->resolveReferences(R);
if (Dest)
Dest->push_back(std::make_unique<Record::AssertionInfo>(
@@ -509,7 +511,7 @@ bool TGParser::resolve(const std::vector<RecordsEntry> &Source,
MapResolver R;
for (const auto &S : Substs)
R.set(S.first, S.second);
- Init *Message = E.Dump->Message->resolveReferences(R);
+ const Init *Message = E.Dump->Message->resolveReferences(R);
if (Dest)
Dest->push_back(
@@ -540,7 +542,7 @@ bool TGParser::resolve(const std::vector<RecordsEntry> &Source,
/// Resolve the record fully and add it to the record keeper.
bool TGParser::addDefOne(std::unique_ptr<Record> Rec) {
- Init *NewName = nullptr;
+ const Init *NewName = nullptr;
if (const Record *Prev = Records.getDef(Rec->getNameInitAsString())) {
if (!Rec->isAnonymous()) {
PrintError(Rec->getLoc(),
@@ -586,17 +588,18 @@ bool TGParser::addDefOne(std::unique_ptr<Record> Rec) {
return false;
}
-bool TGParser::resolveArguments(Record *Rec, ArrayRef<ArgumentInit *> ArgValues,
+bool TGParser::resolveArguments(Record *Rec,
+ ArrayRef<const ArgumentInit *> ArgValues,
SMLoc Loc, ArgValueHandler ArgValueHandler) {
- ArrayRef<Init *> ArgNames = Rec->getTemplateArgs();
+ ArrayRef<const Init *> ArgNames = Rec->getTemplateArgs();
assert(ArgValues.size() <= ArgNames.size() &&
"Too many template arguments allowed");
// Loop over the template arguments and handle the (name, value) pair.
- SmallVector<Init *, 2> UnsolvedArgNames(ArgNames);
+ SmallVector<const Init *, 2> UnsolvedArgNames(ArgNames);
for (auto *Arg : ArgValues) {
- Init *ArgName = nullptr;
- Init *ArgValue = Arg->getValue();
+ const Init *ArgName = nullptr;
+ const Init *ArgValue = Arg->getValue();
if (Arg->isPositional())
ArgName = ArgNames[Arg->getIndex()];
if (Arg->isNamed())
@@ -613,7 +616,7 @@ bool TGParser::resolveArguments(Record *Rec, ArrayRef<ArgumentInit *> ArgValues,
// For unsolved arguments, if there is no default value, complain.
for (auto *UnsolvedArgName : UnsolvedArgNames) {
- Init *Default = Rec->getValue(UnsolvedArgName)->getValue();
+ const Init *Default = Rec->getValue(UnsolvedArgName)->getValue();
if (!Default->isComplete()) {
std::string Name = UnsolvedArgName->getAsUnquotedString();
Error(Loc, "value not specified for template argument '" + Name + "'");
@@ -630,22 +633,24 @@ bool TGParser::resolveArguments(Record *Rec, ArrayRef<ArgumentInit *> ArgValues,
/// Resolve the arguments of class and set them to MapResolver.
/// Returns true if failed.
bool TGParser::resolveArgumentsOfClass(MapResolver &R, Record *Rec,
- ArrayRef<ArgumentInit *> ArgValues,
+ ArrayRef<const ArgumentInit *> ArgValues,
SMLoc Loc) {
- return resolveArguments(Rec, ArgValues, Loc,
- [&](Init *Name, Init *Value) { R.set(Name, Value); });
+ return resolveArguments(
+ Rec, ArgValues, Loc,
+ [&](const Init *Name, const Init *Value) { R.set(Name, Value); });
}
/// Resolve the arguments of multiclass and store them into SubstStack.
/// Returns true if failed.
-bool TGParser::resolveArgumentsOfMultiClass(SubstStack &Substs, MultiClass *MC,
- ArrayRef<ArgumentInit *> ArgValues,
- Init *DefmName, SMLoc Loc) {
+bool TGParser::resolveArgumentsOfMultiClass(
+ SubstStack &Substs, MultiClass *MC,
+ ArrayRef<const ArgumentInit *> ArgValues, const Init *DefmName, SMLoc Loc) {
// Add an implicit argument NAME.
Substs.emplace_back(QualifiedNameOfImplicitName(MC), DefmName);
- return resolveArguments(
- &MC->Rec, ArgValues, Loc,
- [&](Init *Name, Init *Value) { Substs.emplace_back(Name, Value); });
+ return resolveArguments(&MC->Rec, ArgValues, Loc,
+ [&](const Init *Name, const Init *Value) {
+ Substs.emplace_back(Name, Value);
+ });
}
//===----------------------------------------------------------------------===//
@@ -666,7 +671,7 @@ bool TGParser::consume(tgtok::TokKind K) {
/// ObjectName ::= Value [ '#' Value ]*
/// ObjectName ::= /*empty*/
///
-Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
+const Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
switch (Lex.getCode()) {
case tgtok::colon:
case tgtok::semi:
@@ -683,12 +688,13 @@ Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
if (CurMultiClass)
CurRec = &CurMultiClass->Rec;
- Init *Name = ParseValue(CurRec, StringRecTy::get(Records), ParseNameMode);
+ const Init *Name =
+ ParseValue(CurRec, StringRecTy::get(Records), ParseNameMode);
if (!Name)
return nullptr;
if (CurMultiClass) {
- Init *NameStr = QualifiedNameOfImplicitName(CurMultiClass);
+ const Init *NameStr = QualifiedNameOfImplicitName(CurMultiClass);
HasReferenceResolver R(NameStr);
Name->resolveReferences(R);
if (!R.found())
@@ -827,14 +833,14 @@ ParseSubMultiClassReference(MultiClass *CurMC) {
///
/// SliceElement is either IntRecTy, ListRecTy, or nullptr
///
-TypedInit *TGParser::ParseSliceElement(Record *CurRec) {
+const TypedInit *TGParser::ParseSliceElement(Record *CurRec) {
auto LHSLoc = Lex.getLoc();
auto *CurVal = ParseValue(CurRec);
if (!CurVal)
return nullptr;
auto *LHS = cast<TypedInit>(CurVal);
- TypedInit *RHS = nullptr;
+ const TypedInit *RHS = nullptr;
switch (Lex.getCode()) {
case tgtok::dotdotdot:
case tgtok::minus: { // Deprecated
@@ -891,10 +897,10 @@ TypedInit *TGParser::ParseSliceElement(Record *CurRec) {
/// - Single=true
/// - SliceElements is Value<int> w/o trailing comma
///
-TypedInit *TGParser::ParseSliceElements(Record *CurRec, bool Single) {
- TypedInit *CurVal;
- SmallVector<Init *, 2> Elems; // int
- SmallVector<TypedInit *, 2> Slices; // list<int>
+const TypedInit *TGParser::ParseSliceElements(Record *CurRec, bool Single) {
+ const TypedInit *CurVal;
+ SmallVector<const Init *, 2> Elems; // int
+ SmallVector<const TypedInit *, 2> Slices; // list<int>
auto FlushElems = [&] {
if (!Elems.empty()) {
@@ -950,7 +956,7 @@ TypedInit *TGParser::ParseSliceElements(Record *CurRec, bool Single) {
FlushElems();
// Concatenate lists in Slices
- TypedInit *Result = nullptr;
+ const TypedInit *Result = nullptr;
for (auto *Slice : Slices) {
Result = (Result ? cast<TypedInit>(BinOpInit::getListConcat(Result, Slice))
: Slice);
@@ -966,12 +972,12 @@ TypedInit *TGParser::ParseSliceElements(Record *CurRec, bool Single) {
/// RangePiece ::= INTVAL INTVAL
// The last two forms are deprecated.
bool TGParser::ParseRangePiece(SmallVectorImpl<unsigned> &Ranges,
- TypedInit *FirstItem) {
- Init *CurVal = FirstItem;
+ const TypedInit *FirstItem) {
+ const Init *CurVal = FirstItem;
if (!CurVal)
CurVal = ParseValue(nullptr);
- IntInit *II = dyn_cast_or_null<IntInit>(CurVal);
+ const IntInit *II = dyn_cast_or_null<IntInit>(CurVal);
if (!II)
return TokError("expected integer or bitrange");
@@ -990,8 +996,8 @@ bool TGParser::ParseRangePiece(SmallVectorImpl<unsigned> &Ranges,
case tgtok::minus: {
Lex.Lex(); // eat
- Init *I_End = ParseValue(nullptr);
- IntInit *II_End = dyn_cast_or_null<IntInit>(I_End);
+ const Init *I_End = ParseValue(nullptr);
+ const IntInit *II_End = dyn_cast_or_null<IntInit>(I_End);
if (!II_End) {
TokError("expected integer value as end of range");
return true;
@@ -1149,16 +1155,16 @@ const RecTy *TGParser::ParseType() {
}
/// ParseIDValue
-Init *TGParser::ParseIDValue(Record *CurRec, StringInit *Name, SMRange NameLoc,
- IDParseMode Mode) {
- if (Init *I = CurScope->getVar(Records, CurMultiClass, Name, NameLoc,
- TrackReferenceLocs))
+const Init *TGParser::ParseIDValue(Record *CurRec, const StringInit *Name,
+ SMRange NameLoc, IDParseMode Mode) {
+ if (const Init *I = CurScope->getVar(Records, CurMultiClass, Name, NameLoc,
+ TrackReferenceLocs))
return I;
if (Mode == ParseNameMode)
return Name;
- if (Init *I = Records.getGlobal(Name->getValue())) {
+ if (const Init *I = Records.getGlobal(Name->getValue())) {
// Add a reference to the global if it's a record.
if (TrackReferenceLocs) {
if (auto *Def = dyn_cast<DefInit>(I))
@@ -1181,7 +1187,7 @@ Init *TGParser::ParseIDValue(Record *CurRec, StringInit *Name, SMRange NameLoc,
///
/// Operation ::= XOperator ['<' Type '>'] '(' Args ')'
///
-Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
+const Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
switch (Lex.getCode()) {
default:
TokError("unknown bang operator");
@@ -1291,14 +1297,14 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *LHS = ParseValue(CurRec);
+ const Init *LHS = ParseValue(CurRec);
if (!LHS) return nullptr;
if (Code == UnOpInit::EMPTY || Code == UnOpInit::SIZE) {
- ListInit *LHSl = dyn_cast<ListInit>(LHS);
- StringInit *LHSs = dyn_cast<StringInit>(LHS);
- DagInit *LHSd = dyn_cast<DagInit>(LHS);
- TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
+ const ListInit *LHSl = dyn_cast<ListInit>(LHS);
+ const StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ const DagInit *LHSd = dyn_cast<DagInit>(LHS);
+ const TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
if (!LHSl && !LHSs && !LHSd && !LHSt) {
TokError("expected string, list, or dag type argument in unary operator");
return nullptr;
@@ -1313,8 +1319,8 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
if (Code == UnOpInit::HEAD || Code == UnOpInit::TAIL ||
Code == UnOpInit::LISTFLATTEN) {
- ListInit *LHSl = dyn_cast<ListInit>(LHS);
- TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
+ const ListInit *LHSl = dyn_cast<ListInit>(LHS);
+ const TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
if (!LHSl && !LHSt) {
TokError("expected list type argument in unary operator");
return nullptr;
@@ -1333,8 +1339,8 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
bool UseElementType =
Code == UnOpInit::HEAD || Code == UnOpInit::LISTFLATTEN;
if (LHSl) {
- Init *Item = LHSl->getElement(0);
- TypedInit *Itemt = dyn_cast<TypedInit>(Item);
+ const Init *Item = LHSl->getElement(0);
+ const TypedInit *Itemt = dyn_cast<TypedInit>(Item);
if (!Itemt) {
TokError("untyped list element in unary operator");
return nullptr;
@@ -1381,7 +1387,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *LHS = ParseValue(CurRec);
+ const Init *LHS = ParseValue(CurRec);
if (!LHS)
return nullptr;
@@ -1390,7 +1396,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- return (IsAOpInit::get(Type, LHS))->Fold();
+ return IsAOpInit::get(Type, LHS)->Fold();
}
case tgtok::XExists: {
@@ -1407,11 +1413,11 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
}
SMLoc ExprLoc = Lex.getLoc();
- Init *Expr = ParseValue(CurRec);
+ const Init *Expr = ParseValue(CurRec);
if (!Expr)
return nullptr;
- TypedInit *ExprType = dyn_cast<TypedInit>(Expr);
+ const TypedInit *ExprType = dyn_cast<TypedInit>(Expr);
if (!ExprType) {
Error(ExprLoc, "expected string type argument in !exists operator");
return nullptr;
@@ -1580,7 +1586,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- SmallVector<Init*, 2> InitList;
+ SmallVector<const Init *, 2> InitList;
// Note that this loop consumes an arbitrary number of arguments.
// The actual count is checked later.
@@ -1589,7 +1595,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
InitList.push_back(ParseValue(CurRec, ArgType));
if (!InitList.back()) return nullptr;
- TypedInit *InitListBack = dyn_cast<TypedInit>(InitList.back());
+ const TypedInit *InitListBack = dyn_cast<TypedInit>(InitList.back());
if (!InitListBack) {
Error(OpLoc, Twine("expected value to be a typed value, got '" +
InitList.back()->getAsString() + "'"));
@@ -1759,7 +1765,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
Code == BinOpInit::AND || Code == BinOpInit::OR ||
Code == BinOpInit::XOR || Code == BinOpInit::MUL) {
while (InitList.size() > 2) {
- Init *RHS = InitList.pop_back_val();
+ const Init *RHS = InitList.pop_back_val();
RHS = (BinOpInit::get(Code, InitList.back(), RHS, Type))->Fold(CurRec);
InitList.back() = RHS;
}
@@ -1787,7 +1793,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- SmallVector<Init *, 2> Args;
+ SmallVector<const Init *, 2> Args;
bool FirstArgIsList = false;
for (;;) {
if (Args.size() >= 3) {
@@ -1800,7 +1806,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
if (!Args.back())
return nullptr;
- TypedInit *ArgBack = dyn_cast<TypedInit>(Args.back());
+ const TypedInit *ArgBack = dyn_cast<TypedInit>(Args.back());
if (!ArgBack) {
Error(OpLoc, Twine("expected value to be a typed value, got '" +
Args.back()->getAsString() + "'"));
@@ -1838,7 +1844,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *LHS, *MHS, *RHS;
+ const Init *LHS, *MHS, *RHS;
auto ArgCount = Args.size();
assert(ArgCount >= 1);
auto *Arg0 = cast<TypedInit>(Args[0]);
@@ -1916,7 +1922,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *LHS = ParseValue(CurRec);
+ const Init *LHS = ParseValue(CurRec);
if (!LHS) return nullptr;
if (!consume(tgtok::comma)) {
@@ -1925,7 +1931,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
}
SMLoc MHSLoc = Lex.getLoc();
- Init *MHS = ParseValue(CurRec, ItemType);
+ const Init *MHS = ParseValue(CurRec, ItemType);
if (!MHS)
return nullptr;
@@ -1935,7 +1941,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
}
SMLoc RHSLoc = Lex.getLoc();
- Init *RHS = ParseValue(CurRec, ItemType);
+ const Init *RHS = ParseValue(CurRec, ItemType);
if (!RHS)
return nullptr;
@@ -1947,7 +1953,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
switch (LexCode) {
default: llvm_unreachable("Unhandled code!");
case tgtok::XDag: {
- TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
+ const TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (!MHSt && !isa<UnsetInit>(MHS)) {
Error(MHSLoc, "could not determine type of the child list in !dag");
return nullptr;
@@ -1958,7 +1964,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
+ const TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
if (!RHSt && !isa<UnsetInit>(RHS)) {
Error(RHSLoc, "could not determine type of the name list in !dag");
return nullptr;
@@ -1980,16 +1986,16 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
const RecTy *MHSTy = nullptr;
const RecTy *RHSTy = nullptr;
- if (TypedInit *MHSt = dyn_cast<TypedInit>(MHS))
+ if (const TypedInit *MHSt = dyn_cast<TypedInit>(MHS))
MHSTy = MHSt->getType();
- if (BitsInit *MHSbits = dyn_cast<BitsInit>(MHS))
+ if (const BitsInit *MHSbits = dyn_cast<BitsInit>(MHS))
MHSTy = BitsRecTy::get(Records, MHSbits->getNumBits());
if (isa<BitInit>(MHS))
MHSTy = BitRecTy::get(Records);
- if (TypedInit *RHSt = dyn_cast<TypedInit>(RHS))
+ if (const TypedInit *RHSt = dyn_cast<TypedInit>(RHS))
RHSTy = RHSt->getType();
- if (BitsInit *RHSbits = dyn_cast<BitsInit>(RHS))
+ if (const BitsInit *RHSbits = dyn_cast<BitsInit>(RHS))
RHSTy = BitsRecTy::get(Records, RHSbits->getNumBits());
if (isa<BitInit>(RHS))
RHSTy = BitRecTy::get(Records);
@@ -2014,7 +2020,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
break;
}
case tgtok::XSubst: {
- TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
+ const TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
if (!RHSt) {
TokError("could not get type for !subst");
return nullptr;
@@ -2023,7 +2029,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
break;
}
case tgtok::XSetDagArg: {
- TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
+ const TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (!MHSt || !isa<IntRecTy, StringRecTy>(MHSt->getType())) {
Error(MHSLoc, Twine("expected integer index or string name, got ") +
(MHSt ? ("type '" + MHSt->getType()->getAsString())
@@ -2034,7 +2040,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
break;
}
case tgtok::XSetDagName: {
- TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
+ const TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (!MHSt || !isa<IntRecTy, StringRecTy>(MHSt->getType())) {
Error(MHSLoc, Twine("expected integer index or string name, got ") +
(MHSt ? ("type '" + MHSt->getType()->getAsString())
@@ -2042,7 +2048,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
"'");
return nullptr;
}
- TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
+ const TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
// The name could be a string or unset.
if (RHSt && !isa<StringRecTy>(RHSt->getType())) {
Error(RHSLoc, Twine("expected string or unset name, got type '") +
@@ -2072,11 +2078,11 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *StartUntyped = ParseValue(CurRec);
+ const Init *StartUntyped = ParseValue(CurRec);
if (!StartUntyped)
return nullptr;
- TypedInit *Start = dyn_cast<TypedInit>(StartUntyped);
+ const TypedInit *Start = dyn_cast<TypedInit>(StartUntyped);
if (!Start) {
TokError(Twine("could not get type of !foldl start: '") +
StartUntyped->getAsString() + "'");
@@ -2088,11 +2094,11 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *ListUntyped = ParseValue(CurRec);
+ const Init *ListUntyped = ParseValue(CurRec);
if (!ListUntyped)
return nullptr;
- TypedInit *List = dyn_cast<TypedInit>(ListUntyped);
+ const TypedInit *List = dyn_cast<TypedInit>(ListUntyped);
if (!List) {
TokError(Twine("could not get type of !foldl list: '") +
ListUntyped->getAsString() + "'");
@@ -2116,7 +2122,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *A = StringInit::get(Records, Lex.getCurStrVal());
+ const Init *A = StringInit::get(Records, Lex.getCurStrVal());
if (CurRec && CurRec->getValue(A)) {
TokError((Twine("left !foldl variable '") + A->getAsString() +
"' already defined")
@@ -2134,7 +2140,7 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *B = StringInit::get(Records, Lex.getCurStrVal());
+ const Init *B = StringInit::get(Records, Lex.getCurStrVal());
if (CurRec && CurRec->getValue(B)) {
TokError((Twine("right !foldl variable '") + B->getAsString() +
"' already defined")
@@ -2161,14 +2167,14 @@ Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
ParseRec->addValue(RecordVal(A, Start->getType(), RecordVal::FK_Normal));
ParseRec->addValue(
RecordVal(B, ListType->getElementType(), RecordVal::FK_Normal));
- Init *ExprUntyped = ParseValue(ParseRec);
+ const Init *ExprUntyped = ParseValue(ParseRec);
ParseRec->removeValue(A);
ParseRec->removeValue(B);
PopScope(FoldScope);
if (!ExprUntyped)
return nullptr;
- TypedInit *Expr = dyn_cast<TypedInit>(ExprUntyped);
+ const TypedInit *Expr = dyn_cast<TypedInit>(ExprUntyped);
if (!Expr) {
TokError("could not get type of !foldl expression");
return nullptr;
@@ -2226,7 +2232,8 @@ const RecTy *TGParser::ParseOperatorType() {
/// Parse the !substr operation. Return null on error.
///
/// Substr ::= !substr(string, start-int [, length-int]) => string
-Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
+const Init *TGParser::ParseOperationSubstr(Record *CurRec,
+ const RecTy *ItemType) {
TernOpInit::TernaryOp Code = TernOpInit::SUBSTR;
const RecTy *Type = StringRecTy::get(Records);
@@ -2237,7 +2244,7 @@ Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *LHS = ParseValue(CurRec);
+ const Init *LHS = ParseValue(CurRec);
if (!LHS)
return nullptr;
@@ -2247,12 +2254,12 @@ Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
}
SMLoc MHSLoc = Lex.getLoc();
- Init *MHS = ParseValue(CurRec);
+ const Init *MHS = ParseValue(CurRec);
if (!MHS)
return nullptr;
SMLoc RHSLoc = Lex.getLoc();
- Init *RHS;
+ const Init *RHS;
if (consume(tgtok::comma)) {
RHSLoc = Lex.getLoc();
RHS = ParseValue(CurRec);
@@ -2273,7 +2280,7 @@ Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
Type->getAsString() + "'");
}
- TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
+ const TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
if (!LHSt && !isa<UnsetInit>(LHS)) {
TokError("could not determine type of the string in !substr");
return nullptr;
@@ -2284,7 +2291,7 @@ Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
+ const TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (!MHSt && !isa<UnsetInit>(MHS)) {
TokError("could not determine type of the start position in !substr");
return nullptr;
@@ -2296,7 +2303,7 @@ Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
}
if (RHS) {
- TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
+ const TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
if (!RHSt && !isa<UnsetInit>(RHS)) {
TokError("could not determine type of the length in !substr");
return nullptr;
@@ -2314,7 +2321,8 @@ Init *TGParser::ParseOperationSubstr(Record *CurRec, const RecTy *ItemType) {
/// Parse the !find operation. Return null on error.
///
/// Substr ::= !find(string, string [, start-int]) => int
-Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
+const Init *TGParser::ParseOperationFind(Record *CurRec,
+ const RecTy *ItemType) {
TernOpInit::TernaryOp Code = TernOpInit::FIND;
const RecTy *Type = IntRecTy::get(Records);
@@ -2325,7 +2333,7 @@ Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- Init *LHS = ParseValue(CurRec);
+ const Init *LHS = ParseValue(CurRec);
if (!LHS)
return nullptr;
@@ -2335,12 +2343,12 @@ Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
}
SMLoc MHSLoc = Lex.getLoc();
- Init *MHS = ParseValue(CurRec);
+ const Init *MHS = ParseValue(CurRec);
if (!MHS)
return nullptr;
SMLoc RHSLoc = Lex.getLoc();
- Init *RHS;
+ const Init *RHS;
if (consume(tgtok::comma)) {
RHSLoc = Lex.getLoc();
RHS = ParseValue(CurRec);
@@ -2361,7 +2369,7 @@ Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
Type->getAsString() + "'");
}
- TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
+ const TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
if (!LHSt && !isa<UnsetInit>(LHS)) {
TokError("could not determine type of the source string in !find");
return nullptr;
@@ -2372,7 +2380,7 @@ Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
return nullptr;
}
- TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
+ const TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (!MHSt && !isa<UnsetInit>(MHS)) {
TokError("could not determine type of the target string in !find");
return nullptr;
@@ -2384,7 +2392,7 @@ Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
}
if (RHS) {
- TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
+ const TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
if (!RHSt && !isa<UnsetInit>(RHS)) {
TokError("could not determine type of the start position in !find");
return nullptr;
@@ -2403,8 +2411,8 @@ Init *TGParser::ParseOperationFind(Record *CurRec, const RecTy *ItemType) {
///
/// ForEach ::= !foreach(ID, list-or-dag, expr) => list<expr type>
/// Filter ::= !foreach(ID, list, predicate) ==> list<list type>
-Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
- const RecTy *ItemType) {
+const Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
+ const RecTy *ItemType) {
SMLoc OpLoc = Lex.getLoc();
tgtok::TokKind Operation = Lex.getCode();
Lex.Lex(); // eat the operation
@@ -2418,7 +2426,7 @@ Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
return nullptr;
}
- Init *LHS = StringInit::get(Records, Lex.getCurStrVal());
+ const Init *LHS = StringInit::get(Records, Lex.getCurStrVal());
Lex.Lex(); // eat the ID.
if (CurRec && CurRec->getValue(LHS)) {
@@ -2433,7 +2441,7 @@ Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
return nullptr;
}
- Init *MHS = ParseValue(CurRec);
+ const Init *MHS = ParseValue(CurRec);
if (!MHS)
return nullptr;
@@ -2442,7 +2450,7 @@ Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
return nullptr;
}
- TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
+ const TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (!MHSt) {
TokError("could not get type of !foreach/!filter list or dag");
return nullptr;
@@ -2499,7 +2507,7 @@ Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
}
TGVarScope *TempScope = PushScope(ParseRec);
ParseRec->addValue(RecordVal(LHS, InEltType, RecordVal::FK_Normal));
- Init *RHS = ParseValue(ParseRec, ExprEltType);
+ const Init *RHS = ParseValue(ParseRec, ExprEltType);
ParseRec->removeValue(LHS);
PopScope(TempScope);
if (!RHS)
@@ -2512,7 +2520,7 @@ Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
const RecTy *OutType = InEltType;
if (Operation == tgtok::XForEach && !IsDAG) {
- TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
+ const TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
if (!RHSt) {
TokError("could not get type of !foreach result expression");
return nullptr;
@@ -2528,7 +2536,8 @@ Init *TGParser::ParseOperationForEachFilter(Record *CurRec,
->Fold(CurRec);
}
-Init *TGParser::ParseOperationCond(Record *CurRec, const RecTy *ItemType) {
+const Init *TGParser::ParseOperationCond(Record *CurRec,
+ const RecTy *ItemType) {
Lex.Lex(); // eat the operation 'cond'
if (!consume(tgtok::l_paren)) {
@@ -2537,13 +2546,13 @@ Init *TGParser::ParseOperationCond(Record *CurRec, const RecTy *ItemType) {
}
// Parse through '[Case: Val,]+'
- SmallVector<Init *, 4> Case;
- SmallVector<Init *, 4> Val;
+ SmallVector<const Init *, 4> Case;
+ SmallVector<const Init *, 4> Val;
while (true) {
if (consume(tgtok::r_paren))
break;
- Init *V = ParseValue(CurRec);
+ const Init *V = ParseValue(CurRec);
if (!V)
return nullptr;
Case.push_back(V);
@@ -2574,11 +2583,11 @@ Init *TGParser::ParseOperationCond(Record *CurRec, const RecTy *ItemType) {
// resolve type
const RecTy *Type = nullptr;
- for (Init *V : Val) {
+ for (const Init *V : Val) {
const RecTy *VTy = nullptr;
- if (TypedInit *Vt = dyn_cast<TypedInit>(V))
+ if (const TypedInit *Vt = dyn_cast<TypedInit>(V))
VTy = Vt->getType();
- if (BitsInit *Vbits = dyn_cast<BitsInit>(V))
+ if (const BitsInit *Vbits = dyn_cast<BitsInit>(V))
VTy = BitsRecTy::get(Records, Vbits->getNumBits());
if (isa<BitInit>(V))
VTy = BitRecTy::get(Records);
@@ -2633,9 +2642,9 @@ Init *TGParser::ParseOperationCond(Record *CurRec, const RecTy *ItemType) {
/// SimpleValue ::= STRCONCATTOK '(' Value ',' Value ')'
/// SimpleValue ::= COND '(' [Value ':' Value,]+ ')'
///
-Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
- IDParseMode Mode) {
- Init *R = nullptr;
+const Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
+ IDParseMode Mode) {
+ const Init *R = nullptr;
tgtok::TokKind Code = Lex.getCode();
// Parse bang operators.
@@ -2689,7 +2698,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
break;
case tgtok::Id: {
SMRange NameLoc = Lex.getLocRange();
- StringInit *Name = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *Name = StringInit::get(Records, Lex.getCurStrVal());
tgtok::TokKind Next = Lex.Lex();
if (Next == tgtok::equal) // Named argument.
return Name;
@@ -2706,7 +2715,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
return nullptr;
}
- SmallVector<ArgumentInit *, 8> Args;
+ SmallVector<const ArgumentInit *, 8> Args;
Lex.Lex(); // consume the <
if (ParseTemplateArgValueList(Args, CurRec, Class))
return nullptr; // Error parsing value list.
@@ -2724,7 +2733,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
case tgtok::l_brace: { // Value ::= '{' ValueList '}'
SMLoc BraceLoc = Lex.getLoc();
Lex.Lex(); // eat the '{'
- SmallVector<Init*, 16> Vals;
+ SmallVector<const Init *, 16> Vals;
if (Lex.getCode() != tgtok::r_brace) {
ParseValueList(Vals, CurRec);
@@ -2735,7 +2744,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
return nullptr;
}
- SmallVector<Init *, 16> NewBits;
+ SmallVector<const Init *, 16> NewBits;
// As we parse { a, b, ... }, 'a' is the highest bit, but we parse it
// first. We'll first read everything in to a vector, then we can reverse
@@ -2745,13 +2754,13 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
// if the API was a little more orthogonal.
// bits<n> values are allowed to initialize n bits.
- if (BitsInit *BI = dyn_cast<BitsInit>(Vals[i])) {
+ if (const BitsInit *BI = dyn_cast<BitsInit>(Vals[i])) {
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i)
NewBits.push_back(BI->getBit((e - i) - 1));
continue;
}
// bits<n> can also come from variable initializers.
- if (VarInit *VI = dyn_cast<VarInit>(Vals[i])) {
+ if (const VarInit *VI = dyn_cast<VarInit>(Vals[i])) {
if (const BitsRecTy *BitsRec = dyn_cast<BitsRecTy>(VI->getType())) {
for (unsigned i = 0, e = BitsRec->getNumBits(); i != e; ++i)
NewBits.push_back(VI->getBit((e - i) - 1));
@@ -2760,7 +2769,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
// Fallthrough to try convert this to a bit.
}
// All other values must be convertible to just a single bit.
- Init *Bit = Vals[i]->getCastTo(BitRecTy::get(Records));
+ const Init *Bit = Vals[i]->getCastTo(BitRecTy::get(Records));
if (!Bit) {
Error(BraceLoc, "Element #" + Twine(i) + " (" + Vals[i]->getAsString() +
") is not convertable to a bit");
@@ -2773,7 +2782,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
}
case tgtok::l_square: { // Value ::= '[' ValueList ']'
Lex.Lex(); // eat the '['
- SmallVector<Init*, 16> Vals;
+ SmallVector<const Init *, 16> Vals;
const RecTy *DeducedEltTy = nullptr;
const ListRecTy *GivenListTy = nullptr;
@@ -2815,8 +2824,8 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
// Check elements
const RecTy *EltTy = nullptr;
- for (Init *V : Vals) {
- TypedInit *TArg = dyn_cast<TypedInit>(V);
+ for (const Init *V : Vals) {
+ const TypedInit *TArg = dyn_cast<TypedInit>(V);
if (TArg) {
if (EltTy) {
EltTy = resolveTypes(EltTy, TArg->getType());
@@ -2872,11 +2881,11 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
return nullptr;
}
- Init *Operator = ParseValue(CurRec);
+ const Init *Operator = ParseValue(CurRec);
if (!Operator) return nullptr;
// If the operator name is present, parse it.
- StringInit *OperatorName = nullptr;
+ const StringInit *OperatorName = nullptr;
if (consume(tgtok::colon)) {
if (Lex.getCode() != tgtok::VarName) { // eat the ':'
TokError("expected variable name in dag operator");
@@ -2886,7 +2895,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
Lex.Lex(); // eat the VarName.
}
- SmallVector<std::pair<llvm::Init*, StringInit*>, 8> DagArgs;
+ SmallVector<std::pair<const Init *, const StringInit *>, 8> DagArgs;
if (Lex.getCode() != tgtok::r_paren) {
ParseDagArgList(DagArgs, CurRec);
if (DagArgs.empty()) return nullptr;
@@ -2911,10 +2920,10 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
/// ValueSuffix ::= '[' SliceElements ']'
/// ValueSuffix ::= '.' ID
///
-Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
- IDParseMode Mode) {
+const Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
+ IDParseMode Mode) {
SMLoc LHSLoc = Lex.getLoc();
- Init *Result = ParseSimpleValue(CurRec, ItemType, Mode);
+ const Init *Result = ParseSimpleValue(CurRec, ItemType, Mode);
if (!Result) return nullptr;
// Parse the suffixes now if present.
@@ -2962,7 +2971,7 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
}
Lex.Lex(); // eat the '['
- TypedInit *RHS = ParseSliceElements(CurRec, /*Single=*/true);
+ const TypedInit *RHS = ParseSliceElements(CurRec, /*Single=*/true);
if (!RHS)
return nullptr;
@@ -2990,7 +2999,8 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
return nullptr;
}
SMRange FieldNameLoc = Lex.getLocRange();
- StringInit *FieldName = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *FieldName =
+ StringInit::get(Records, Lex.getCurStrVal());
if (!Result->getFieldType(FieldName)) {
TokError("Cannot access field '" + Lex.getCurStrVal() + "' of value '" +
Result->getAsString() + "'");
@@ -3018,7 +3028,7 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
case tgtok::paste:
SMLoc PasteLoc = Lex.getLoc();
- TypedInit *LHS = dyn_cast<TypedInit>(Result);
+ const TypedInit *LHS = dyn_cast<TypedInit>(Result);
if (!LHS) {
Error(PasteLoc, "LHS of paste is not typed!");
return nullptr;
@@ -3037,7 +3047,7 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
Result = LHS; // trailing paste, ignore.
break;
default:
- Init *RHSResult = ParseValue(CurRec, ItemType, ParseValueMode);
+ const Init *RHSResult = ParseValue(CurRec, ItemType, ParseValueMode);
if (!RHSResult)
return nullptr;
Result = BinOpInit::getListConcat(LHS, RHSResult);
@@ -3060,7 +3070,7 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
LHS = CastLHS;
}
- TypedInit *RHS = nullptr;
+ const TypedInit *RHS = nullptr;
Lex.Lex(); // Eat the '#'.
switch (Lex.getCode()) {
@@ -3076,7 +3086,7 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
break;
default:
- Init *RHSResult = ParseValue(CurRec, nullptr, ParseNameMode);
+ const Init *RHSResult = ParseValue(CurRec, nullptr, ParseNameMode);
if (!RHSResult)
return nullptr;
RHS = dyn_cast<TypedInit>(RHSResult);
@@ -3113,26 +3123,26 @@ Init *TGParser::ParseValue(Record *CurRec, const RecTy *ItemType,
/// DagArgList ::= DagArg
/// DagArgList ::= DagArgList ',' DagArg
void TGParser::ParseDagArgList(
- SmallVectorImpl<std::pair<llvm::Init*, StringInit*>> &Result,
+ SmallVectorImpl<std::pair<const Init *, const StringInit *>> &Result,
Record *CurRec) {
while (true) {
// DagArg ::= VARNAME
if (Lex.getCode() == tgtok::VarName) {
// A missing value is treated like '?'.
- StringInit *VarName = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *VarName = StringInit::get(Records, Lex.getCurStrVal());
Result.emplace_back(UnsetInit::get(Records), VarName);
Lex.Lex();
} else {
// DagArg ::= Value (':' VARNAME)?
- Init *Val = ParseValue(CurRec);
+ const Init *Val = ParseValue(CurRec);
if (!Val) {
Result.clear();
return;
}
// If the variable name is present, add it.
- StringInit *VarName = nullptr;
+ const StringInit *VarName = nullptr;
if (Lex.getCode() == tgtok::colon) {
if (Lex.Lex() != tgtok::VarName) { // eat the ':'
TokError("expected variable name in dag literal");
@@ -3156,8 +3166,8 @@ void TGParser::ParseDagArgList(
///
/// ValueList ::= Value (',' Value)
///
-void TGParser::ParseValueList(SmallVectorImpl<Init *> &Result, Record *CurRec,
- const RecTy *ItemType) {
+void TGParser::ParseValueList(SmallVectorImpl<const Init *> &Result,
+ Record *CurRec, const RecTy *ItemType) {
Result.push_back(ParseValue(CurRec, ItemType));
if (!Result.back()) {
Result.clear();
@@ -3185,9 +3195,10 @@ void TGParser::ParseValueList(SmallVectorImpl<Init *> &Result, Record *CurRec,
// PostionalArgValueList ::= [Value {',' Value}*]
// NamedArgValueList ::= [NameValue '=' Value {',' NameValue '=' Value}*]
bool TGParser::ParseTemplateArgValueList(
- SmallVectorImpl<ArgumentInit *> &Result, Record *CurRec, Record *ArgsRec) {
+ SmallVectorImpl<const ArgumentInit *> &Result, Record *CurRec,
+ Record *ArgsRec) {
assert(Result.empty() && "Result vector is not empty");
- ArrayRef<Init *> TArgs = ArgsRec->getTemplateArgs();
+ ArrayRef<const Init *> TArgs = ArgsRec->getTemplateArgs();
if (consume(tgtok::greater)) // empty value list
return false;
@@ -3203,7 +3214,7 @@ bool TGParser::ParseTemplateArgValueList(
SMLoc ValueLoc = Lex.getLoc();
// If we are parsing named argument, we don't need to know the argument name
// and argument type will be resolved after we know the name.
- Init *Value = ParseValue(
+ const Init *Value = ParseValue(
CurRec,
HasNamedArg ? nullptr : ArgsRec->getValue(TArgs[ArgIndex])->getType());
if (!Value)
@@ -3216,7 +3227,7 @@ bool TGParser::ParseTemplateArgValueList(
"The name of named argument should be a valid identifier");
auto *Name = cast<StringInit>(Value);
- Init *QualifiedName = QualifyName(*ArgsRec, Name);
+ const Init *QualifiedName = QualifyName(*ArgsRec, Name);
auto *NamedArg = ArgsRec->getValue(QualifiedName);
if (!NamedArg)
return Error(ValueLoc,
@@ -3261,7 +3272,7 @@ bool TGParser::ParseTemplateArgValueList(
///
/// Declaration ::= FIELD? Type ID ('=' Value)?
///
-Init *TGParser::ParseDeclaration(Record *CurRec,
+const Init *TGParser::ParseDeclaration(Record *CurRec,
bool ParsingTemplateArgs) {
// Read the field prefix if present.
bool HasField = consume(tgtok::Field);
@@ -3286,7 +3297,7 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
}
SMLoc IdLoc = Lex.getLoc();
- Init *DeclName = StringInit::get(Records, Str);
+ const Init *DeclName = StringInit::get(Records, Str);
Lex.Lex();
bool BadField;
@@ -3313,7 +3324,7 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
// If a value is present, parse it and set new field's value.
if (consume(tgtok::equal)) {
SMLoc ValLoc = Lex.getLoc();
- Init *Val = ParseValue(CurRec, Type);
+ const Init *Val = ParseValue(CurRec, Type);
if (!Val ||
SetValue(CurRec, ValLoc, DeclName, {}, Val,
/*AllowSelfAssignment=*/false, /*OverrideDefLoc=*/false)) {
@@ -3335,13 +3346,14 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
/// ForeachDeclaration ::= ID '=' RangePiece
/// ForeachDeclaration ::= ID '=' Value
///
-VarInit *TGParser::ParseForeachDeclaration(Init *&ForeachListValue) {
+const VarInit *
+TGParser::ParseForeachDeclaration(const Init *&ForeachListValue) {
if (Lex.getCode() != tgtok::Id) {
TokError("Expected identifier in foreach declaration");
return nullptr;
}
- Init *DeclName = StringInit::get(Records, Lex.getCurStrVal());
+ const Init *DeclName = StringInit::get(Records, Lex.getCurStrVal());
Lex.Lex();
// If a value is present, parse it.
@@ -3366,11 +3378,11 @@ VarInit *TGParser::ParseForeachDeclaration(Init *&ForeachListValue) {
default: {
SMLoc ValueLoc = Lex.getLoc();
- Init *I = ParseValue(nullptr);
+ const Init *I = ParseValue(nullptr);
if (!I)
return nullptr;
- TypedInit *TI = dyn_cast<TypedInit>(I);
+ const TypedInit *TI = dyn_cast<TypedInit>(I);
if (TI && isa<ListRecTy>(TI->getType())) {
ForeachListValue = I;
IterType = cast<ListRecTy>(TI->getType())->getElementType();
@@ -3422,7 +3434,7 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
Record *TheRecToAddTo = CurRec ? CurRec : &CurMultiClass->Rec;
// Read the first declaration.
- Init *TemplArg = ParseDeclaration(CurRec, true/*templateargs*/);
+ const Init *TemplArg = ParseDeclaration(CurRec, true /*templateargs*/);
if (!TemplArg)
return true;
@@ -3479,7 +3491,7 @@ bool TGParser::ParseBodyItem(Record *CurRec) {
return TokError("expected field identifier after let");
SMLoc IdLoc = Lex.getLoc();
- StringInit *FieldName = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *FieldName = StringInit::get(Records, Lex.getCurStrVal());
Lex.Lex(); // eat the field name.
SmallVector<unsigned, 16> BitList;
@@ -3501,7 +3513,7 @@ bool TGParser::ParseBodyItem(Record *CurRec) {
Type = BitsRecTy::get(Records, BitList.size());
}
- Init *Val = ParseValue(CurRec, Type);
+ const Init *Val = ParseValue(CurRec, Type);
if (!Val) return true;
if (!consume(tgtok::semi))
@@ -3629,7 +3641,7 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
// Parse ObjectName and make a record for it.
std::unique_ptr<Record> CurRec;
- Init *Name = ParseObjectName(CurMultiClass);
+ const Init *Name = ParseObjectName(CurMultiClass);
if (!Name)
return true;
@@ -3665,7 +3677,7 @@ bool TGParser::ParseDefset() {
if (Lex.getCode() != tgtok::Id)
return TokError("expected identifier");
- StringInit *DeclName = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *DeclName = StringInit::get(Records, Lex.getCurStrVal());
if (Records.getGlobal(DeclName->getValue()))
return TokError("def or global variable of this name already exists");
@@ -3738,7 +3750,7 @@ bool TGParser::ParseDefvar(Record *CurRec) {
if (Lex.getCode() != tgtok::Id)
return TokError("expected identifier");
- StringInit *DeclName = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *DeclName = StringInit::get(Records, Lex.getCurStrVal());
if (CurScope->varAlreadyDefined(DeclName->getValue()))
return TokError("local variable of this name already exists");
@@ -3758,7 +3770,7 @@ bool TGParser::ParseDefvar(Record *CurRec) {
if (!consume(tgtok::equal))
return TokError("expected '='");
- Init *Value = ParseValue(CurRec);
+ const Init *Value = ParseValue(CurRec);
if (!Value)
return true;
@@ -3786,8 +3798,8 @@ bool TGParser::ParseForeach(MultiClass *CurMultiClass) {
// Make a temporary object to record items associated with the for
// loop.
- Init *ListValue = nullptr;
- VarInit *IterName = ParseForeachDeclaration(ListValue);
+ const Init *ListValue = nullptr;
+ const VarInit *IterName = ParseForeachDeclaration(ListValue);
if (!IterName)
return TokError("expected declaration in for");
@@ -3840,7 +3852,7 @@ bool TGParser::ParseIf(MultiClass *CurMultiClass) {
// Make a temporary object to record items associated with the for
// loop.
- Init *Condition = ParseValue(nullptr);
+ const Init *Condition = ParseValue(nullptr);
if (!Condition)
return true;
@@ -3853,14 +3865,14 @@ bool TGParser::ParseIf(MultiClass *CurMultiClass) {
// loop, over a list of length 0 or 1 depending on the condition, and with no
// iteration variable being assigned.
- ListInit *EmptyList = ListInit::get({}, BitRecTy::get(Records));
- ListInit *SingletonList =
+ const ListInit *EmptyList = ListInit::get({}, BitRecTy::get(Records));
+ const ListInit *SingletonList =
ListInit::get({BitInit::get(Records, true)}, BitRecTy::get(Records));
const RecTy *BitListTy = ListRecTy::get(BitRecTy::get(Records));
// The foreach containing the then-clause selects SingletonList if
// the condition is true.
- Init *ThenClauseList =
+ const Init *ThenClauseList =
TernOpInit::get(TernOpInit::IF, Condition, SingletonList, EmptyList,
BitListTy)
->Fold(nullptr);
@@ -3882,7 +3894,7 @@ bool TGParser::ParseIf(MultiClass *CurMultiClass) {
if (consume(tgtok::ElseKW)) {
// The foreach containing the else-clause uses the same pair of lists as
// above, but this time, selects SingletonList if the condition is *false*.
- Init *ElseClauseList =
+ const Init *ElseClauseList =
TernOpInit::get(TernOpInit::IF, Condition, EmptyList, SingletonList,
BitListTy)
->Fold(nullptr);
@@ -3942,7 +3954,7 @@ bool TGParser::ParseAssert(MultiClass *CurMultiClass, Record *CurRec) {
Lex.Lex(); // Eat the 'assert' token.
SMLoc ConditionLoc = Lex.getLoc();
- Init *Condition = ParseValue(CurRec);
+ const Init *Condition = ParseValue(CurRec);
if (!Condition)
return true;
@@ -3951,7 +3963,7 @@ bool TGParser::ParseAssert(MultiClass *CurMultiClass, Record *CurRec) {
return true;
}
- Init *Message = ParseValue(CurRec);
+ const Init *Message = ParseValue(CurRec);
if (!Message)
return true;
@@ -4032,7 +4044,7 @@ void TGParser::ParseLetList(SmallVectorImpl<LetRecord> &Result) {
return;
}
- StringInit *Name = StringInit::get(Records, Lex.getCurStrVal());
+ const StringInit *Name = StringInit::get(Records, Lex.getCurStrVal());
SMLoc NameLoc = Lex.getLoc();
Lex.Lex(); // Eat the identifier.
@@ -4050,7 +4062,7 @@ void TGParser::ParseLetList(SmallVectorImpl<LetRecord> &Result) {
return;
}
- Init *Val = ParseValue(nullptr);
+ const Init *Val = ParseValue(nullptr);
if (!Val) {
Result.clear();
return;
@@ -4226,7 +4238,7 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Defm && "Unexpected token!");
Lex.Lex(); // eat the defm
- Init *DefmName = ParseObjectName(CurMultiClass);
+ const Init *DefmName = ParseObjectName(CurMultiClass);
if (!DefmName)
return true;
if (isa<UnsetInit>(DefmName)) {
@@ -4399,11 +4411,11 @@ bool TGParser::ParseFile() {
// If necessary, replace an argument with a cast to the required type.
// The argument count has already been checked.
bool TGParser::CheckTemplateArgValues(
- SmallVectorImpl<llvm::ArgumentInit *> &Values, SMLoc Loc, Record *ArgsRec) {
- ArrayRef<Init *> TArgs = ArgsRec->getTemplateArgs();
+ SmallVectorImpl<const ArgumentInit *> &Values, SMLoc Loc, Record *ArgsRec) {
+ ArrayRef<const Init *> TArgs = ArgsRec->getTemplateArgs();
- for (llvm::ArgumentInit *&Value : Values) {
- Init *ArgName = nullptr;
+ for (const ArgumentInit *&Value : Values) {
+ const Init *ArgName = nullptr;
if (Value->isPositional())
ArgName = TArgs[Value->getIndex()];
if (Value->isNamed())
@@ -4412,7 +4424,7 @@ bool TGParser::CheckTemplateArgValues(
RecordVal *Arg = ArgsRec->getValue(ArgName);
const RecTy *ArgType = Arg->getType();
- if (TypedInit *ArgValue = dyn_cast<TypedInit>(Value->getValue())) {
+ if (const TypedInit *ArgValue = dyn_cast<TypedInit>(Value->getValue())) {
auto *CastValue = ArgValue->getCastTo(ArgType);
if (CastValue) {
assert((!isa<TypedInit>(CastValue) ||
@@ -4466,7 +4478,7 @@ bool TGParser::ParseDump(MultiClass *CurMultiClass, Record *CurRec) {
assert(Lex.getCode() == tgtok::Dump && "Unknown tok");
Lex.Lex(); // eat the operation
- Init *Message = ParseValue(CurRec);
+ const Init *Message = ParseValue(CurRec);
if (!Message)
return true;
@@ -4485,7 +4497,7 @@ bool TGParser::ParseDump(MultiClass *CurMultiClass, Record *CurRec) {
HasReferenceResolver resolver{nullptr};
resolver.setFinal(true);
// force a resolution with a dummy resolver
- Init *ResolvedMessage = Message->resolveReferences(resolver);
+ const Init *ResolvedMessage = Message->resolveReferences(resolver);
addEntry(std::make_unique<Record::DumpInfo>(Loc, ResolvedMessage));
}
diff --git a/llvm/lib/TableGen/TGParser.h b/llvm/lib/TableGen/TGParser.h
index f33ae1c..a1f1db6 100644
--- a/llvm/lib/TableGen/TGParser.h
+++ b/llvm/lib/TableGen/TGParser.h
@@ -27,11 +27,11 @@ struct SubClassReference;
struct SubMultiClassReference;
struct LetRecord {
- StringInit *Name;
+ const StringInit *Name;
std::vector<unsigned> Bits;
- Init *Value;
+ const Init *Value;
SMLoc Loc;
- LetRecord(StringInit *N, ArrayRef<unsigned> B, Init *V, SMLoc L)
+ LetRecord(const StringInit *N, ArrayRef<unsigned> B, const Init *V, SMLoc L)
: Name(N), Bits(B), Value(V), Loc(L) {}
};
@@ -62,13 +62,13 @@ struct RecordsEntry {
/// constructed by desugaring an if statement.)
struct ForeachLoop {
SMLoc Loc;
- VarInit *IterVar;
- Init *ListValue;
+ const VarInit *IterVar;
+ const Init *ListValue;
std::vector<RecordsEntry> Entries;
void dump() const;
- ForeachLoop(SMLoc Loc, VarInit *IVar, Init *LValue)
+ ForeachLoop(SMLoc Loc, const VarInit *IVar, const Init *LValue)
: Loc(Loc), IterVar(IVar), ListValue(LValue) {}
};
@@ -96,7 +96,7 @@ private:
ScopeKind Kind;
std::unique_ptr<TGVarScope> Parent;
// A scope to hold variable definitions from defvar.
- std::map<std::string, Init *, std::less<>> Vars;
+ std::map<std::string, const Init *, std::less<>> Vars;
Record *CurRec = nullptr;
ForeachLoop *CurLoop = nullptr;
MultiClass *CurMultiClass = nullptr;
@@ -118,9 +118,9 @@ public:
return std::move(Parent);
}
- Init *getVar(RecordKeeper &Records, MultiClass *ParsingMultiClass,
- StringInit *Name, SMRange NameLoc,
- bool TrackReferenceLocs) const;
+ const Init *getVar(RecordKeeper &Records, MultiClass *ParsingMultiClass,
+ const StringInit *Name, SMRange NameLoc,
+ bool TrackReferenceLocs) const;
bool varAlreadyDefined(StringRef Name) const {
// When we check whether a variable is already defined, for the purpose of
@@ -130,7 +130,7 @@ public:
return Vars.find(Name) != Vars.end();
}
- void addVar(StringRef Name, Init *I) {
+ void addVar(StringRef Name, const Init *I) {
bool Ins = Vars.insert(std::make_pair(std::string(Name), I)).second;
(void)Ins;
assert(Ins && "Local variable already exists");
@@ -228,15 +228,15 @@ private: // Semantic analysis methods.
/// Set the value of a RecordVal within the given record. If `OverrideDefLoc`
/// is set, the provided location overrides any existing location of the
/// RecordVal.
- bool SetValue(Record *TheRec, SMLoc Loc, Init *ValName,
- ArrayRef<unsigned> BitList, Init *V,
+ bool SetValue(Record *TheRec, SMLoc Loc, const Init *ValName,
+ ArrayRef<unsigned> BitList, const Init *V,
bool AllowSelfAssignment = false, bool OverrideDefLoc = true);
bool AddSubClass(Record *Rec, SubClassReference &SubClass);
bool AddSubClass(RecordsEntry &Entry, SubClassReference &SubClass);
bool AddSubMultiClass(MultiClass *CurMC,
SubMultiClassReference &SubMultiClass);
- using SubstStack = SmallVector<std::pair<Init *, Init *>, 8>;
+ using SubstStack = SmallVector<std::pair<const Init *, const Init *>, 8>;
bool addEntry(RecordsEntry E);
bool resolve(const ForeachLoop &Loop, SubstStack &Stack, bool Final,
@@ -246,15 +246,16 @@ private: // Semantic analysis methods.
SMLoc *Loc = nullptr);
bool addDefOne(std::unique_ptr<Record> Rec);
- using ArgValueHandler = std::function<void(Init *, Init *)>;
+ using ArgValueHandler = std::function<void(const Init *, const Init *)>;
bool resolveArguments(
- Record *Rec, ArrayRef<ArgumentInit *> ArgValues, SMLoc Loc,
- ArgValueHandler ArgValueHandler = [](Init *, Init *) {});
+ Record *Rec, ArrayRef<const ArgumentInit *> ArgValues, SMLoc Loc,
+ ArgValueHandler ArgValueHandler = [](const Init *, const Init *) {});
bool resolveArgumentsOfClass(MapResolver &R, Record *Rec,
- ArrayRef<ArgumentInit *> ArgValues, SMLoc Loc);
+ ArrayRef<const ArgumentInit *> ArgValues,
+ SMLoc Loc);
bool resolveArgumentsOfMultiClass(SubstStack &Substs, MultiClass *MC,
- ArrayRef<ArgumentInit *> ArgValues,
- Init *DefmName, SMLoc Loc);
+ ArrayRef<const ArgumentInit *> ArgValues,
+ const Init *DefmName, SMLoc Loc);
private: // Parser methods.
bool consume(tgtok::TokKind K);
@@ -280,45 +281,46 @@ private: // Parser methods.
bool ParseBodyItem(Record *CurRec);
bool ParseTemplateArgList(Record *CurRec);
- Init *ParseDeclaration(Record *CurRec, bool ParsingTemplateArgs);
- VarInit *ParseForeachDeclaration(Init *&ForeachListValue);
+ const Init *ParseDeclaration(Record *CurRec, bool ParsingTemplateArgs);
+ const VarInit *ParseForeachDeclaration(const Init *&ForeachListValue);
SubClassReference ParseSubClassReference(Record *CurRec, bool isDefm);
SubMultiClassReference ParseSubMultiClassReference(MultiClass *CurMC);
- Init *ParseIDValue(Record *CurRec, StringInit *Name, SMRange NameLoc,
- IDParseMode Mode = ParseValueMode);
- Init *ParseSimpleValue(Record *CurRec, const RecTy *ItemType = nullptr,
+ const Init *ParseIDValue(Record *CurRec, const StringInit *Name,
+ SMRange NameLoc, IDParseMode Mode = ParseValueMode);
+ const Init *ParseSimpleValue(Record *CurRec, const RecTy *ItemType = nullptr,
+ IDParseMode Mode = ParseValueMode);
+ const Init *ParseValue(Record *CurRec, const RecTy *ItemType = nullptr,
IDParseMode Mode = ParseValueMode);
- Init *ParseValue(Record *CurRec, const RecTy *ItemType = nullptr,
- IDParseMode Mode = ParseValueMode);
- void ParseValueList(SmallVectorImpl<llvm::Init *> &Result, Record *CurRec,
+ void ParseValueList(SmallVectorImpl<const Init *> &Result, Record *CurRec,
const RecTy *ItemType = nullptr);
- bool ParseTemplateArgValueList(SmallVectorImpl<llvm::ArgumentInit *> &Result,
+ bool ParseTemplateArgValueList(SmallVectorImpl<const ArgumentInit *> &Result,
Record *CurRec, Record *ArgsRec);
void ParseDagArgList(
- SmallVectorImpl<std::pair<llvm::Init*, StringInit*>> &Result,
+ SmallVectorImpl<std::pair<const Init *, const StringInit *>> &Result,
Record *CurRec);
bool ParseOptionalRangeList(SmallVectorImpl<unsigned> &Ranges);
bool ParseOptionalBitList(SmallVectorImpl<unsigned> &Ranges);
- TypedInit *ParseSliceElement(Record *CurRec);
- TypedInit *ParseSliceElements(Record *CurRec, bool Single = false);
+ const TypedInit *ParseSliceElement(Record *CurRec);
+ const TypedInit *ParseSliceElements(Record *CurRec, bool Single = false);
void ParseRangeList(SmallVectorImpl<unsigned> &Result);
bool ParseRangePiece(SmallVectorImpl<unsigned> &Ranges,
- TypedInit *FirstItem = nullptr);
+ const TypedInit *FirstItem = nullptr);
const RecTy *ParseType();
- Init *ParseOperation(Record *CurRec, const RecTy *ItemType);
- Init *ParseOperationSubstr(Record *CurRec, const RecTy *ItemType);
- Init *ParseOperationFind(Record *CurRec, const RecTy *ItemType);
- Init *ParseOperationForEachFilter(Record *CurRec, const RecTy *ItemType);
- Init *ParseOperationCond(Record *CurRec, const RecTy *ItemType);
+ const Init *ParseOperation(Record *CurRec, const RecTy *ItemType);
+ const Init *ParseOperationSubstr(Record *CurRec, const RecTy *ItemType);
+ const Init *ParseOperationFind(Record *CurRec, const RecTy *ItemType);
+ const Init *ParseOperationForEachFilter(Record *CurRec,
+ const RecTy *ItemType);
+ const Init *ParseOperationCond(Record *CurRec, const RecTy *ItemType);
const RecTy *ParseOperatorType();
- Init *ParseObjectName(MultiClass *CurMultiClass);
+ const Init *ParseObjectName(MultiClass *CurMultiClass);
Record *ParseClassID();
MultiClass *ParseMultiClassID();
bool ApplyLetStack(Record *CurRec);
bool ApplyLetStack(RecordsEntry &Entry);
- bool CheckTemplateArgValues(SmallVectorImpl<llvm::ArgumentInit *> &Values,
+ bool CheckTemplateArgValues(SmallVectorImpl<const ArgumentInit *> &Values,
SMLoc Loc, Record *ArgsRec);
};
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index ead6455..321190c 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -314,9 +314,9 @@ def AArch64PostLegalizerLowering
// Post-legalization combines which are primarily optimizations.
def AArch64PostLegalizerCombiner
: GICombiner<"AArch64PostLegalizerCombinerImpl",
- [copy_prop, combines_for_extload,
- combine_indexed_load_store,
- sext_trunc_sextload, mutate_anyext_to_zext,
+ [copy_prop, cast_of_cast_combines, buildvector_of_truncate,
+ integer_of_truncate, mutate_anyext_to_zext,
+ combines_for_extload, combine_indexed_load_store, sext_trunc_sextload,
hoist_logic_op_with_same_opcode_hands,
redundant_and, xor_of_and_with_same_reg,
extractvecelt_pairwise_add, redundant_or,
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 1b8eac7..bbf2f26 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -480,9 +480,9 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
getSVEStackSize(MF) || LowerQRegCopyThroughMem);
}
-/// hasFP - Return true if the specified function should have a dedicated frame
-/// pointer register.
-bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
+/// hasFPImpl - Return true if the specified function should have a dedicated
+/// frame pointer register.
+bool AArch64FrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index c197312..20445e6 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -65,7 +65,6 @@ public:
/// Can this function use the red zone for local allocations.
bool canUseRedZone(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
bool assignCalleeSavedSpillSlots(MachineFunction &MF,
@@ -125,6 +124,9 @@ public:
orderFrameObjects(const MachineFunction &MF,
SmallVectorImpl<int> &ObjectsToAllocate) const override;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
/// Returns true if a homogeneous prolog or epilog code can be emitted
/// for the size optimization. If so, HOM_Prolog/HOM_Epilog pseudo
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b565758..7448416 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -25,6 +25,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallVectorExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
@@ -2111,7 +2112,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
setOperationAction(ISD::BITCAST, VT, PreferNEON ? Legal : Default);
setOperationAction(ISD::BITREVERSE, VT, Default);
setOperationAction(ISD::BSWAP, VT, Default);
- setOperationAction(ISD::BUILD_VECTOR, VT, Default);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::CONCAT_VECTORS, VT, Default);
setOperationAction(ISD::CTLZ, VT, Default);
setOperationAction(ISD::CTPOP, VT, Default);
@@ -14395,24 +14396,72 @@ static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG,
return SDValue();
}
-SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerFixedLengthBuildVectorToSVE(
+ SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ auto *BVN = cast<BuildVectorSDNode>(Op);
- if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable())) {
- if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) {
- SDLoc DL(Op);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT);
- SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second);
- SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps);
- return convertFromScalableVector(DAG, Op.getValueType(), Seq);
- }
+ if (auto SeqInfo = BVN->isConstantSequence()) {
+ SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT);
+ SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second);
+ SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps);
+ return convertFromScalableVector(DAG, VT, Seq);
+ }
+
+ unsigned NumElems = VT.getVectorNumElements();
+ if (!VT.isPow2VectorType() || VT.getFixedSizeInBits() > 128 ||
+ NumElems <= 1 || BVN->isConstant())
+ return SDValue();
+
+ auto IsExtractElt = [](SDValue Op) {
+ return Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT;
+ };
- // Revert to common legalisation for all other variants.
+ // For integer types that are not already in vectors limit to at most four
+ // elements. This is an arbitrary restriction to avoid many fmovs from GPRs.
+ if (VT.getScalarType().isInteger() &&
+ NumElems - count_if(Op->op_values(), IsExtractElt) > 4)
return SDValue();
+
+ // Lower (pow2) BUILD_VECTORS that are <= 128-bit to a sequence of ZIP1s.
+ SDValue ZeroI64 = DAG.getConstant(0, DL, MVT::i64);
+ SmallVector<SDValue, 16> Intermediates = map_to_vector<16>(
+ Op->op_values(), [&, Undef = DAG.getUNDEF(ContainerVT)](SDValue Op) {
+ return Op.isUndef() ? Undef
+ : DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
+ ContainerVT, Undef, Op, ZeroI64);
+ });
+
+ ElementCount ZipEC = ContainerVT.getVectorElementCount();
+ while (Intermediates.size() > 1) {
+ EVT ZipVT = getPackedSVEVectorVT(ZipEC);
+
+ for (unsigned I = 0; I < Intermediates.size(); I += 2) {
+ SDValue Op0 = DAG.getBitcast(ZipVT, Intermediates[I + 0]);
+ SDValue Op1 = DAG.getBitcast(ZipVT, Intermediates[I + 1]);
+ Intermediates[I / 2] =
+ Op1.isUndef() ? Op0
+ : DAG.getNode(AArch64ISD::ZIP1, DL, ZipVT, Op0, Op1);
+ }
+
+ Intermediates.resize(Intermediates.size() / 2);
+ ZipEC = ZipEC.divideCoefficientBy(2);
}
+ assert(Intermediates.size() == 1);
+ SDValue Vec = DAG.getBitcast(ContainerVT, Intermediates[0]);
+ return convertFromScalableVector(DAG, VT, Vec);
+}
+
+SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
+ return LowerFixedLengthBuildVectorToSVE(Op, DAG);
+
// Try to build a simple constant vector.
Op = NormalizeBuildVector(Op, DAG);
// Thought this might return a non-BUILD_VECTOR (e.g. CONCAT_VECTORS), if so,
@@ -20711,7 +20760,7 @@ static SDValue performSubAddMULCombine(SDNode *N, SelectionDAG &DAG) {
if (!Add.hasOneUse())
return SDValue();
- if (DAG.isConstantIntBuildVectorOrConstantInt(peekThroughBitcasts(X)))
+ if (DAG.isConstantIntBuildVectorOrConstantInt(X))
return SDValue();
SDValue M1 = Add.getOperand(0);
@@ -26982,9 +27031,9 @@ void AArch64TargetLowering::ReplaceNodeResults(
}
}
-bool AArch64TargetLowering::useLoadStackGuardNode() const {
+bool AArch64TargetLowering::useLoadStackGuardNode(const Module &M) const {
if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
- return TargetLowering::useLoadStackGuardNode();
+ return TargetLowering::useLoadStackGuardNode(M);
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index cf2ae5f..160cd18 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -811,7 +811,7 @@ public:
TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
- bool useLoadStackGuardNode() const override;
+ bool useLoadStackGuardNode(const Module &M) const override;
TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const override;
@@ -1244,6 +1244,7 @@ private:
SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
SelectionDAG &DAG) const;
+ SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const override;
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 7c6b789..ff3c69f 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -337,10 +337,8 @@ AArch64TTIImpl::getInlineCallPenalty(const Function *F, const CallBase &Call,
bool AArch64TTIImpl::shouldMaximizeVectorBandwidth(
TargetTransformInfo::RegisterKind K) const {
assert(K != TargetTransformInfo::RGK_Scalar);
- return ((K == TargetTransformInfo::RGK_FixedWidthVector &&
- ST->isNeonAvailable()) ||
- (K == TargetTransformInfo::RGK_ScalableVector &&
- ST->isSVEorStreamingSVEAvailable()));
+ return (K == TargetTransformInfo::RGK_FixedWidthVector &&
+ ST->isNeonAvailable());
}
/// Calculate the cost of materializing a 64-bit value. This helper
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index ee5e759..e4ca1ae 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -580,7 +580,7 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
auto *ActiveBits = B.CreatePHI(WaveTy, 2, "ActiveBits");
ActiveBits->addIncoming(Ballot, EntryBB);
- // Use llvm.cttz instrinsic to find the lowest remaining active lane.
+ // Use llvm.cttz intrinsic to find the lowest remaining active lane.
auto *FF1 =
B.CreateIntrinsic(Intrinsic::cttz, WaveTy, {ActiveBits, B.getTrue()});
diff --git a/llvm/lib/Target/AMDGPU/R600FrameLowering.h b/llvm/lib/Target/AMDGPU/R600FrameLowering.h
index f171bc4..c462117 100644
--- a/llvm/lib/Target/AMDGPU/R600FrameLowering.h
+++ b/llvm/lib/Target/AMDGPU/R600FrameLowering.h
@@ -27,9 +27,8 @@ public:
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
Register &FrameReg) const override;
- bool hasFP(const MachineFunction &MF) const override {
- return false;
- }
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override { return false; }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index bc162b09..13a2db7 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -1805,7 +1805,7 @@ static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) {
// The FP for kernels is always known 0, so we never really need to setup an
// explicit register for it. However, DisableFramePointerElim will force us to
// use a register for it.
-bool SIFrameLowering::hasFP(const MachineFunction &MF) const {
+bool SIFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
// For entry & chain functions we can use an immediate offset in most cases,
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.h b/llvm/lib/Target/AMDGPU/SIFrameLowering.h
index b3feb75..938c750 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.h
@@ -66,6 +66,9 @@ public:
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
void emitEntryFunctionFlatScratchInit(MachineFunction &MF,
MachineBasicBlock &MBB,
@@ -82,8 +85,6 @@ private:
Register ScratchWaveOffsetReg) const;
public:
- bool hasFP(const MachineFunction &MF) const override;
-
bool requiresStackPointerReference(const MachineFunction &MF) const;
};
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 5b74022..722a79b 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -41,9 +41,9 @@
/// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0 // Restore the exec mask for the Then
/// // block
/// %exec = S_XOR_B64 %sgpr0, %exec // Update the exec mask
-/// S_BRANCH_EXECZ label1 // Use our branch optimization
+/// S_CBRANCH_EXECZ label1 // Use our branch optimization
/// // instruction again.
-/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block
+/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the ELSE block
/// label1:
/// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/ARC/ARCFrameLowering.cpp b/llvm/lib/Target/ARC/ARCFrameLowering.cpp
index 1227fae..472f1c1 100644
--- a/llvm/lib/Target/ARC/ARCFrameLowering.cpp
+++ b/llvm/lib/Target/ARC/ARCFrameLowering.cpp
@@ -487,7 +487,7 @@ MachineBasicBlock::iterator ARCFrameLowering::eliminateCallFramePseudoInstr(
return MBB.erase(I);
}
-bool ARCFrameLowering::hasFP(const MachineFunction &MF) const {
+bool ARCFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
bool HasFP = MF.getTarget().Options.DisableFramePointerElim(MF) ||
MF.getFrameInfo().hasVarSizedObjects() ||
diff --git a/llvm/lib/Target/ARC/ARCFrameLowering.h b/llvm/lib/Target/ARC/ARCFrameLowering.h
index 9951a09..089326f 100644
--- a/llvm/lib/Target/ARC/ARCFrameLowering.h
+++ b/llvm/lib/Target/ARC/ARCFrameLowering.h
@@ -54,8 +54,6 @@ public:
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
- bool hasFP(const MachineFunction &MF) const override;
-
MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
@@ -64,6 +62,9 @@ public:
llvm::MachineFunction &, const llvm::TargetRegisterInfo *,
std::vector<llvm::CalleeSavedInfo> &) const override;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
void adjustStackToMatchRecords(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index aad305c..3f28ce8 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -116,12 +116,9 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_iOS_SaveList;
if (PushPopSplit == ARMSubtarget::SplitR7)
- return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_R7_SaveList
+ return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_SaveList
: CSR_ATPCS_SplitPush_SaveList;
- if (PushPopSplit == ARMSubtarget::SplitR11AAPCSSignRA)
- return CSR_AAPCS_SplitPush_R11_SaveList;
-
return CSR_AAPCS_SaveList;
}
diff --git a/llvm/lib/Target/ARM/ARMCallingConv.td b/llvm/lib/Target/ARM/ARMCallingConv.td
index 27f175a..d14424c 100644
--- a/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -301,17 +301,14 @@ def CSR_ATPCS_SplitPush_SwiftError : CalleeSavedRegs<(sub CSR_ATPCS_SplitPush,
def CSR_ATPCS_SplitPush_SwiftTail : CalleeSavedRegs<(sub CSR_ATPCS_SplitPush,
R10)>;
-// Sometimes we need to split the push of the callee-saved GPRs into two
-// regions, to ensure that the frame chain record is set up correctly. These
-// list the callee-saved registers in the order they end up on the stack, which
-// depends on whether the frame pointer is r7 or r11.
-def CSR_AAPCS_SplitPush_R11 : CalleeSavedRegs<(add R10, R9, R8, R7, R6, R5, R4,
- LR, R11,
- (sequence "D%u", 15, 8))>;
-def CSR_AAPCS_SplitPush_R7 : CalleeSavedRegs<(add LR, R11,
- R7, R6, R5, R4,
- R10, R9, R8,
- (sequence "D%u", 15, 8))>;
+// When enforcing an AAPCS compliant frame chain, R11 is used as the frame
+// pointer even for Thumb targets, where split pushes are necessary.
+// This AAPCS alternative makes sure the frame index slots match the push
+// order in that case.
+def CSR_AAPCS_SplitPush : CalleeSavedRegs<(add LR, R11,
+ R7, R6, R5, R4,
+ R10, R9, R8,
+ (sequence "D%u", 15, 8))>;
// Constructors and destructors return 'this' in the ARM C++ ABI; since 'this'
// and the pointer return value are both passed in R0 in these cases, this can
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 2706efa..e070345 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -199,11 +199,6 @@ SpillArea getSpillArea(Register Reg,
// push {r0-r10, r12} GPRCS1
// vpush {r8-d15} DPRCS1
// push {r11, lr} GPRCS2
- //
- // SplitR11AAPCSSignRA:
- // push {r0-r10, r12} GPRSC1
- // push {r11, lr} GPRCS2
- // vpush {r8-d15} DPRCS1
// If FPCXTNS is spilled (for CMSE secure entryfunctions), it is always at
// the top of the stack frame.
@@ -251,8 +246,7 @@ SpillArea getSpillArea(Register Reg,
return SpillArea::GPRCS1;
case ARM::LR:
- if (Variation == ARMSubtarget::SplitR11WindowsSEH ||
- Variation == ARMSubtarget::SplitR11AAPCSSignRA)
+ if (Variation == ARMSubtarget::SplitR11WindowsSEH)
return SpillArea::GPRCS2;
else
return SpillArea::GPRCS1;
@@ -323,10 +317,10 @@ bool ARMFrameLowering::enableCalleeSaveSkip(const MachineFunction &MF) const {
return true;
}
-/// hasFP - Return true if the specified function should have a dedicated frame
-/// pointer register. This is true if the function has variable sized allocas
-/// or if frame pointer elimination is disabled.
-bool ARMFrameLowering::hasFP(const MachineFunction &MF) const {
+/// hasFPImpl - Return true if the specified function should have a dedicated
+/// frame pointer register. This is true if the function has variable sized
+/// allocas or if frame pointer elimination is disabled.
+bool ARMFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -869,9 +863,6 @@ static int getMaxFPOffset(const ARMSubtarget &STI, const ARMFunctionInfo &AFI,
// This is a conservative estimation: Assume the frame pointer being r7 and
// pc("r15") up to r8 getting spilled before (= 8 registers).
int MaxRegBytes = 8 * 4;
- if (PushPopSplit == ARMSubtarget::SplitR11AAPCSSignRA)
- // Here, r11 can be stored below all of r4-r15.
- MaxRegBytes = 11 * 4;
if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) {
// Here, r11 can be stored below all of r4-r15 plus d8-d15.
MaxRegBytes = 11 * 4 + 8 * 8;
@@ -944,23 +935,17 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
}
// Determine spill area sizes, and some important frame indices.
- SpillArea FramePtrSpillArea;
- bool BeforeFPPush = true;
for (const CalleeSavedInfo &I : CSI) {
Register Reg = I.getReg();
int FI = I.getFrameIdx();
- SpillArea Area = getSpillArea(Reg, PushPopSplit,
- AFI->getNumAlignedDPRCS2Regs(), RegInfo);
-
- if (Reg == FramePtr) {
+ if (Reg == FramePtr)
FramePtrSpillFI = FI;
- FramePtrSpillArea = Area;
- }
if (Reg == ARM::D8)
D8SpillFI = FI;
- switch (Area) {
+ switch (getSpillArea(Reg, PushPopSplit, AFI->getNumAlignedDPRCS2Regs(),
+ RegInfo)) {
case SpillArea::FPCXT:
FPCXTSaveSize += 4;
break;
@@ -987,7 +972,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Move past FPCXT area.
if (FPCXTSaveSize > 0) {
LastPush = MBBI++;
- DefCFAOffsetCandidates.addInst(LastPush, FPCXTSaveSize, BeforeFPPush);
+ DefCFAOffsetCandidates.addInst(LastPush, FPCXTSaveSize, true);
}
// Allocate the vararg register save area.
@@ -995,15 +980,13 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -ArgRegsSaveSize,
MachineInstr::FrameSetup);
LastPush = std::prev(MBBI);
- DefCFAOffsetCandidates.addInst(LastPush, ArgRegsSaveSize, BeforeFPPush);
+ DefCFAOffsetCandidates.addInst(LastPush, ArgRegsSaveSize, true);
}
// Move past area 1.
if (GPRCS1Size > 0) {
GPRCS1Push = LastPush = MBBI++;
- DefCFAOffsetCandidates.addInst(LastPush, GPRCS1Size, BeforeFPPush);
- if (FramePtrSpillArea == SpillArea::GPRCS1)
- BeforeFPPush = false;
+ DefCFAOffsetCandidates.addInst(LastPush, GPRCS1Size, true);
}
// Determine starting offsets of spill areas. These offsets are all positive
@@ -1027,6 +1010,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
} else {
DPRCSOffset = GPRCS2Offset - DPRGapSize - DPRCSSize;
}
+ int FramePtrOffsetInPush = 0;
if (HasFP) {
// Offset from the CFA to the saved frame pointer, will be negative.
[[maybe_unused]] int FPOffset = MFI.getObjectOffset(FramePtrSpillFI);
@@ -1034,6 +1018,13 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
<< ", FPOffset: " << FPOffset << "\n");
assert(getMaxFPOffset(STI, *AFI, MF) <= FPOffset &&
"Max FP estimation is wrong");
+ // Offset from the top of the GPRCS1 area to the saved frame pointer, will
+ // be negative.
+ FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize + FPCXTSaveSize;
+ LLVM_DEBUG(dbgs() << "FramePtrOffsetInPush=" << FramePtrOffsetInPush
+ << ", FramePtrSpillOffset="
+ << (MFI.getObjectOffset(FramePtrSpillFI) + NumBytes)
+ << "\n");
AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) +
NumBytes);
}
@@ -1045,9 +1036,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// after DPRCS1.
if (GPRCS2Size > 0 && PushPopSplit != ARMSubtarget::SplitR11WindowsSEH) {
GPRCS2Push = LastPush = MBBI++;
- DefCFAOffsetCandidates.addInst(LastPush, GPRCS2Size, BeforeFPPush);
- if (FramePtrSpillArea == SpillArea::GPRCS2)
- BeforeFPPush = false;
+ DefCFAOffsetCandidates.addInst(LastPush, GPRCS2Size);
}
// Prolog/epilog inserter assumes we correctly align DPRs on the stack, so our
@@ -1060,7 +1049,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
else {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRGapSize,
MachineInstr::FrameSetup);
- DefCFAOffsetCandidates.addInst(std::prev(MBBI), DPRGapSize, BeforeFPPush);
+ DefCFAOffsetCandidates.addInst(std::prev(MBBI), DPRGapSize);
}
}
@@ -1069,8 +1058,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
while (MBBI != MBB.end() && MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
- DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI),
- BeforeFPPush);
+ DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI));
LastPush = MBBI++;
}
}
@@ -1089,9 +1077,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Move GPRCS2, if using using SplitR11WindowsSEH.
if (GPRCS2Size > 0 && PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) {
GPRCS2Push = LastPush = MBBI++;
- DefCFAOffsetCandidates.addInst(LastPush, GPRCS2Size, BeforeFPPush);
- if (FramePtrSpillArea == SpillArea::GPRCS2)
- BeforeFPPush = false;
+ DefCFAOffsetCandidates.addInst(LastPush, GPRCS2Size);
}
bool NeedsWinCFIStackAlloc = NeedsWinCFI;
@@ -1192,51 +1178,28 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// into spill area 1, including the FP in R11. In either case, it
// is in area one and the adjustment needs to take place just after
// that push.
+ // FIXME: The above is not necessary true when PACBTI is enabled.
+ // AAPCS requires use of R11, and PACBTI gets in the way of regular pushes,
+ // so FP ends up on area two.
MachineBasicBlock::iterator AfterPush;
if (HasFP) {
- MachineBasicBlock::iterator FPPushInst;
- // Offset from SP immediately after the push which saved the FP to the FP
- // save slot.
- int64_t FPOffsetAfterPush;
- switch (FramePtrSpillArea) {
- case SpillArea::GPRCS1:
- FPPushInst = GPRCS1Push;
- FPOffsetAfterPush = MFI.getObjectOffset(FramePtrSpillFI) +
- ArgRegsSaveSize + FPCXTSaveSize +
- sizeOfSPAdjustment(*FPPushInst);
- LLVM_DEBUG(dbgs() << "Frame pointer in GPRCS1, offset "
- << FPOffsetAfterPush << " after that push\n");
- break;
- case SpillArea::GPRCS2:
- FPPushInst = GPRCS2Push;
- FPOffsetAfterPush = MFI.getObjectOffset(FramePtrSpillFI) +
- ArgRegsSaveSize + FPCXTSaveSize + GPRCS1Size +
- sizeOfSPAdjustment(*FPPushInst);
- if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH)
- FPOffsetAfterPush += DPRCSSize + DPRGapSize;
- LLVM_DEBUG(dbgs() << "Frame pointer in GPRCS2, offset "
- << FPOffsetAfterPush << " after that push\n");
- break;
- default:
- llvm_unreachable("frame pointer in unknown spill area");
- break;
+ AfterPush = std::next(GPRCS1Push);
+ unsigned PushSize = sizeOfSPAdjustment(*GPRCS1Push);
+ int FPOffset = PushSize + FramePtrOffsetInPush;
+ if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) {
+ AfterPush = std::next(GPRCS2Push);
+ emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush, dl, TII,
+ FramePtr, ARM::SP, 0, MachineInstr::FrameSetup);
+ } else {
+ emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush, dl, TII,
+ FramePtr, ARM::SP, FPOffset,
+ MachineInstr::FrameSetup);
}
- AfterPush = std::next(FPPushInst);
- if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH)
- assert(FPOffsetAfterPush == 0);
-
- // Emit the MOV or ADD to set up the frame pointer register.
- emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush, dl, TII,
- FramePtr, ARM::SP, FPOffsetAfterPush,
- MachineInstr::FrameSetup);
-
if (!NeedsWinCFI) {
- // Emit DWARF info to find the CFA using the frame pointer from this
- // point onward.
- if (FPOffsetAfterPush != 0) {
+ if (FramePtrOffsetInPush + PushSize != 0) {
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
nullptr, MRI->getDwarfRegNum(FramePtr, true),
- -MFI.getObjectOffset(FramePtrSpillFI)));
+ FPCXTSaveSize + ArgRegsSaveSize - FramePtrOffsetInPush));
BuildMI(MBB, AfterPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
@@ -1749,8 +1712,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
if (Reg == ARM::LR && !isTailCall && !isVarArg && !isInterrupt &&
!isCmseEntry && !isTrap && AFI->getArgumentStackToRestore() == 0 &&
STI.hasV5TOps() && MBB.succ_empty() && !hasPAC &&
- (PushPopSplit != ARMSubtarget::SplitR11WindowsSEH &&
- PushPopSplit != ARMSubtarget::SplitR11AAPCSSignRA)) {
+ PushPopSplit != ARMSubtarget::SplitR11WindowsSEH) {
Reg = ARM::PC;
// Fold the return instruction into the LDM.
DeleteRet = true;
@@ -2983,29 +2945,18 @@ bool ARMFrameLowering::assignCalleeSavedSpillSlots(
const auto &AFI = *MF.getInfo<ARMFunctionInfo>();
if (AFI.shouldSignReturnAddress()) {
// The order of register must match the order we push them, because the
- // PEI assigns frame indices in that order. That order depends on the
- // PushPopSplitVariation, there are only two cases which we use with return
- // address signing:
- switch (STI.getPushPopSplitVariation(MF)) {
- case ARMSubtarget::SplitR7:
- // LR, R7, R6, R5, R4, <R12>, R11, R10, R9, R8, D15-D8
- CSI.insert(find_if(CSI,
- [=](const auto &CS) {
- Register Reg = CS.getReg();
- return Reg == ARM::R10 || Reg == ARM::R11 ||
- Reg == ARM::R8 || Reg == ARM::R9 ||
- ARM::DPRRegClass.contains(Reg);
- }),
- CalleeSavedInfo(ARM::R12));
- break;
- case ARMSubtarget::SplitR11AAPCSSignRA:
- // With SplitR11AAPCSSignRA, R12 will always be the highest-addressed CSR
- // on the stack.
- CSI.insert(CSI.begin(), CalleeSavedInfo(ARM::R12));
- break;
- default:
- llvm_unreachable("Unexpected CSR split with return address signing");
- }
+ // PEI assigns frame indices in that order. When compiling for return
+ // address sign and authenication, we use split push, therefore the orders
+ // we want are:
+ // LR, R7, R6, R5, R4, <R12>, R11, R10, R9, R8, D15-D8
+ CSI.insert(find_if(CSI,
+ [=](const auto &CS) {
+ Register Reg = CS.getReg();
+ return Reg == ARM::R10 || Reg == ARM::R11 ||
+ Reg == ARM::R8 || Reg == ARM::R9 ||
+ ARM::DPRRegClass.contains(Reg);
+ }),
+ CalleeSavedInfo(ARM::R12));
}
return false;
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.h b/llvm/lib/Target/ARM/ARMFrameLowering.h
index 3c5bc00..ff51f1a 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.h
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.h
@@ -45,7 +45,6 @@ public:
bool enableCalleeSaveSkip(const MachineFunction &MF) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool isFPReserved(const MachineFunction &MF) const;
bool requiresAAPCSFrameRecord(const MachineFunction &MF) const;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
@@ -87,6 +86,9 @@ public:
const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
void emitPushInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
ArrayRef<CalleeSavedInfo> CSI, unsigned StmOpc,
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 64c0500..5d679a1 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21305,7 +21305,7 @@ bool ARMTargetLowering::shouldInsertFencesForAtomic(
return InsertFencesForAtomic;
}
-bool ARMTargetLowering::useLoadStackGuardNode() const {
+bool ARMTargetLowering::useLoadStackGuardNode(const Module &M) const {
// ROPI/RWPI are not supported currently.
return !Subtarget->isROPI() && !Subtarget->isRWPI();
}
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 316f7d3..ef651bc3 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -675,7 +675,7 @@ class VectorType;
TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
- bool useLoadStackGuardNode() const override;
+ bool useLoadStackGuardNode(const Module &M) const override;
void insertSSPDeclarations(Module &M) const override;
Value *getSDagStackGuard(const Module &M) const override;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 9adfb1f..c4a782b 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -514,12 +514,5 @@ ARMSubtarget::getPushPopSplitVariation(const MachineFunction &MF) const {
F.needsUnwindTableEntry() &&
(MFI.hasVarSizedObjects() || getRegisterInfo()->hasStackRealignment(MF)))
return SplitR11WindowsSEH;
-
- // Returns R11SplitAAPCSBranchSigning if R11 and lr are not adjacent to each
- // other in the list of callee saved registers in a frame, and branch
- // signing is enabled.
- if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress() &&
- getFramePointerReg() == ARM::R11)
- return SplitR11AAPCSSignRA;
return NoSplit;
}
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 214c5f1..7917ddc1 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -105,18 +105,6 @@ public:
/// vpush {d8-d15}
/// push {r11, lr}
SplitR11WindowsSEH,
-
- /// When generating AAPCS-compilant frame chains, R11 is the frame pointer,
- /// and must be pushed adjacent to the return address (LR). Normally this
- /// isn't a problem, because the only register between them is r12, which is
- /// the intra-procedure-call scratch register, so doesn't need to be saved.
- /// However, when PACBTI is in use, r12 contains the authentication code, so
- /// does need to be saved. This means that we need a separate push for R11
- /// and LR.
- /// push {r0-r10, r12}
- /// push {r11, lr}
- /// vpush {d8-d15}
- SplitR11AAPCSSignRA,
};
protected:
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 54eb011..906519f 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -2550,7 +2550,7 @@ public:
addVPTPredNOperands(Inst, N-1);
MCRegister RegNum;
if (getVPTPred() == ARMVCC::None) {
- RegNum = MCRegister();
+ RegNum = ARM::NoRegister;
} else {
unsigned NextOpIndex = Inst.getNumOperands();
auto &MCID = Parser->getInstrDesc(Inst.getOpcode());
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 814b71d..38280ad 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -993,7 +993,7 @@ ARMDisassembler::AddThumbPredicate(MCInst &MI) const {
CCI = MI.insert(CCI, MCOperand::createImm(CC));
++CCI;
if (CC == ARMCC::AL)
- MI.insert(CCI, MCOperand::createReg(0));
+ MI.insert(CCI, MCOperand::createReg(ARM::NoRegister));
else
MI.insert(CCI, MCOperand::createReg(ARM::CPSR));
} else if (CC != ARMCC::AL) {
@@ -1060,7 +1060,7 @@ void ARMDisassembler::UpdateThumbVFPPredicate(
I->setImm(CC);
++I;
if (CC == ARMCC::AL)
- I->setReg(0);
+ I->setReg(ARM::NoRegister);
else
I->setReg(ARM::CPSR);
return;
@@ -1648,7 +1648,7 @@ static DecodeStatus DecodePredicateOperand(MCInst &Inst, unsigned Val,
Check(S, MCDisassembler::SoftFail);
Inst.addOperand(MCOperand::createImm(Val));
if (Val == ARMCC::AL) {
- Inst.addOperand(MCOperand::createReg(0));
+ Inst.addOperand(MCOperand::createReg(ARM::NoRegister));
} else
Inst.addOperand(MCOperand::createReg(ARM::CPSR));
return S;
@@ -1660,7 +1660,7 @@ static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val,
if (Val)
Inst.addOperand(MCOperand::createReg(ARM::CPSR));
else
- Inst.addOperand(MCOperand::createReg(0));
+ Inst.addOperand(MCOperand::createReg(ARM::NoRegister));
return MCDisassembler::Success;
}
diff --git a/llvm/lib/Target/AVR/AVRFrameLowering.cpp b/llvm/lib/Target/AVR/AVRFrameLowering.cpp
index 64dd033..91b0f8c 100644
--- a/llvm/lib/Target/AVR/AVRFrameLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRFrameLowering.cpp
@@ -232,7 +232,7 @@ void AVRFrameLowering::emitEpilogue(MachineFunction &MF,
//
// Notice that strictly this is not a frame pointer because it contains SP after
// frame allocation instead of having the original SP in function entry.
-bool AVRFrameLowering::hasFP(const MachineFunction &MF) const {
+bool AVRFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const AVRMachineFunctionInfo *FuncInfo = MF.getInfo<AVRMachineFunctionInfo>();
return (FuncInfo->getHasSpills() || FuncInfo->getHasAllocas() ||
diff --git a/llvm/lib/Target/AVR/AVRFrameLowering.h b/llvm/lib/Target/AVR/AVRFrameLowering.h
index a550c0e..7baa5e9 100644
--- a/llvm/lib/Target/AVR/AVRFrameLowering.h
+++ b/llvm/lib/Target/AVR/AVRFrameLowering.h
@@ -21,7 +21,6 @@ public:
public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
ArrayRef<CalleeSavedInfo> CSI,
@@ -38,6 +37,9 @@ public:
MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/BPF/BPFFrameLowering.cpp b/llvm/lib/Target/BPF/BPFFrameLowering.cpp
index 8812cfd..123b99f 100644
--- a/llvm/lib/Target/BPF/BPFFrameLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFFrameLowering.cpp
@@ -20,7 +20,9 @@
using namespace llvm;
-bool BPFFrameLowering::hasFP(const MachineFunction &MF) const { return true; }
+bool BPFFrameLowering::hasFPImpl(const MachineFunction &MF) const {
+ return true;
+}
void BPFFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {}
diff --git a/llvm/lib/Target/BPF/BPFFrameLowering.h b/llvm/lib/Target/BPF/BPFFrameLowering.h
index a546351..6beffcb 100644
--- a/llvm/lib/Target/BPF/BPFFrameLowering.h
+++ b/llvm/lib/Target/BPF/BPFFrameLowering.h
@@ -26,7 +26,6 @@ public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS) const override;
@@ -35,6 +34,9 @@ public:
MachineBasicBlock::iterator MI) const override {
return MBB.erase(MI);
}
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
}
#endif
diff --git a/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp b/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
index cedcbff..c023b5a 100644
--- a/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
+++ b/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
@@ -33,7 +33,7 @@ static Register getFPReg(const CSKYSubtarget &STI) { return CSKY::R8; }
// callee saved register to save the value.
static Register getBPReg(const CSKYSubtarget &STI) { return CSKY::R7; }
-bool CSKYFrameLowering::hasFP(const MachineFunction &MF) const {
+bool CSKYFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/CSKY/CSKYFrameLowering.h b/llvm/lib/Target/CSKY/CSKYFrameLowering.h
index 69bf01c..0b3b287 100644
--- a/llvm/lib/Target/CSKY/CSKYFrameLowering.h
+++ b/llvm/lib/Target/CSKY/CSKYFrameLowering.h
@@ -61,7 +61,6 @@ public:
MutableArrayRef<CalleeSavedInfo> CSI,
const TargetRegisterInfo *TRI) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool hasBP(const MachineFunction &MF) const;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
@@ -69,6 +68,9 @@ public:
MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // namespace llvm
#endif
diff --git a/llvm/lib/Target/DirectX/DirectXFrameLowering.h b/llvm/lib/Target/DirectX/DirectXFrameLowering.h
index 76a1450..8582355 100644
--- a/llvm/lib/Target/DirectX/DirectXFrameLowering.h
+++ b/llvm/lib/Target/DirectX/DirectXFrameLowering.h
@@ -29,7 +29,8 @@ public:
void emitPrologue(MachineFunction &, MachineBasicBlock &) const override {}
void emitEpilogue(MachineFunction &, MachineBasicBlock &) const override {}
- bool hasFP(const MachineFunction &) const override { return false; }
+protected:
+ bool hasFPImpl(const MachineFunction &) const override { return false; }
};
} // namespace llvm
#endif // LLVM_DIRECTX_DIRECTXFRAMELOWERING_H
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 7c82f5e..48acd9d 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1144,10 +1144,7 @@ void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
}
}
-bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
- if (MF.getFunction().hasFnAttribute(Attribute::Naked))
- return false;
-
+bool HexagonFrameLowering::hasFPImpl(const MachineFunction &MF) const {
auto &MFI = MF.getFrameInfo();
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
bool HasExtraAlign = HRI.hasStackRealignment(MF);
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.h b/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
index 98e69dc..926aadb0 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
@@ -89,7 +89,6 @@ public:
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
Register &FrameReg) const override;
- bool hasFP(const MachineFunction &MF) const override;
const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries)
const override {
@@ -114,6 +113,9 @@ public:
void insertCFIInstructions(MachineFunction &MF) const;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
using CSIVect = std::vector<CalleeSavedInfo>;
diff --git a/llvm/lib/Target/Lanai/LanaiFrameLowering.h b/llvm/lib/Target/Lanai/LanaiFrameLowering.h
index 380d63d..9bd78d0 100644
--- a/llvm/lib/Target/Lanai/LanaiFrameLowering.h
+++ b/llvm/lib/Target/Lanai/LanaiFrameLowering.h
@@ -44,10 +44,11 @@ public:
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
- bool hasFP(const MachineFunction & /*MF*/) const override { return true; }
-
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS = nullptr) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction & /*MF*/) const override { return true; }
};
} // namespace llvm
diff --git a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
index 4e50472..1a787c6 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
@@ -31,7 +31,7 @@ using namespace llvm;
// pointer register. This is true if frame pointer elimination is
// disabled, if it needs dynamic stack realignment, if the function has
// variable sized allocas, or if the frame address is taken.
-bool LoongArchFrameLowering::hasFP(const MachineFunction &MF) const {
+bool LoongArchFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h
index bc2ac02..6cbfcf6 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h
@@ -49,13 +49,15 @@ public:
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
Register &FrameReg) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool hasBP(const MachineFunction &MF) const;
uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const;
bool enableShrinkWrapping(const MachineFunction &MF) const override;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
void determineFrameLayout(MachineFunction &MF) const;
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
diff --git a/llvm/lib/Target/M68k/M68kFrameLowering.cpp b/llvm/lib/Target/M68k/M68kFrameLowering.cpp
index 1445bac..4245061 100644
--- a/llvm/lib/Target/M68k/M68kFrameLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kFrameLowering.cpp
@@ -40,7 +40,7 @@ M68kFrameLowering::M68kFrameLowering(const M68kSubtarget &STI, Align Alignment)
StackPtr = TRI->getStackRegister();
}
-bool M68kFrameLowering::hasFP(const MachineFunction &MF) const {
+bool M68kFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
diff --git a/llvm/lib/Target/M68k/M68kFrameLowering.h b/llvm/lib/Target/M68k/M68kFrameLowering.h
index a534937..ed2bfb6 100644
--- a/llvm/lib/Target/M68k/M68kFrameLowering.h
+++ b/llvm/lib/Target/M68k/M68kFrameLowering.h
@@ -121,12 +121,6 @@ public:
MutableArrayRef<CalleeSavedInfo> CSI,
const TargetRegisterInfo *TRI) const override;
- /// Return true if the specified function should have a dedicated frame
- /// pointer register. This is true if the function has variable sized
- /// allocas, if it needs dynamic stack realignment, if frame pointer
- /// elimination is disabled, or if the frame address is taken.
- bool hasFP(const MachineFunction &MF) const override;
-
/// Under normal circumstances, when a frame pointer is not required, we
/// reserve argument space for call sites in the function immediately on
/// entry to the current function. This eliminates the need for add/sub sp
@@ -166,6 +160,13 @@ public:
/// pointer by a constant value.
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
int64_t NumBytes, bool InEpilogue) const;
+
+protected:
+ /// Return true if the specified function should have a dedicated frame
+ /// pointer register. This is true if the function has variable sized
+ /// allocas, if it needs dynamic stack realignment, if frame pointer
+ /// elimination is disabled, or if the frame address is taken.
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // namespace llvm
diff --git a/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp b/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
index dc89fec..f496085 100644
--- a/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
+++ b/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
@@ -772,6 +772,20 @@ static bool isAddressBase(const SDValue &N) {
}
}
+static bool AllowARIIWithZeroDisp(SDNode *Parent) {
+ if (!Parent)
+ return false;
+ switch (Parent->getOpcode()) {
+ case ISD::LOAD:
+ case ISD::STORE:
+ case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_STORE:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool M68kDAGToDAGISel::SelectARII(SDNode *Parent, SDValue N, SDValue &Disp,
SDValue &Base, SDValue &Index) {
M68kISelAddressMode AM(M68kISelAddressMode::AddrType::ARII);
@@ -811,8 +825,7 @@ bool M68kDAGToDAGISel::SelectARII(SDNode *Parent, SDValue N, SDValue &Disp,
// The idea here is that we want to use AddrType::ARII without displacement
// only if necessary like memory operations, otherwise this must be lowered
// into addition
- if (AM.Disp == 0 && (!Parent || (Parent->getOpcode() != ISD::LOAD &&
- Parent->getOpcode() != ISD::STORE))) {
+ if (AM.Disp == 0 && !AllowARIIWithZeroDisp(Parent)) {
LLVM_DEBUG(dbgs() << "REJECT: Displacement is Zero\n");
return false;
}
diff --git a/llvm/lib/Target/M68k/M68kInstrAtomics.td b/llvm/lib/Target/M68k/M68kInstrAtomics.td
index 84a6625..9203a3e 100644
--- a/llvm/lib/Target/M68k/M68kInstrAtomics.td
+++ b/llvm/lib/Target/M68k/M68kInstrAtomics.td
@@ -10,9 +10,16 @@ foreach size = [8, 16, 32] in {
def : Pat<(!cast<SDPatternOperator>("atomic_load_"#size) MxCP_ARI:$ptr),
(!cast<MxInst>("MOV"#size#"dj") !cast<MxMemOp>("MxARI"#size):$ptr)>;
+ def : Pat<(!cast<SDPatternOperator>("atomic_load_"#size) MxCP_ARII:$ptr),
+ (!cast<MxInst>("MOV"#size#"df") !cast<MxMemOp>("MxARII"#size):$ptr)>;
+
def : Pat<(!cast<SDPatternOperator>("atomic_store_"#size) !cast<MxRegOp>("MxDRD"#size):$val, MxCP_ARI:$ptr),
(!cast<MxInst>("MOV"#size#"jd") !cast<MxMemOp>("MxARI"#size):$ptr,
!cast<MxRegOp>("MxDRD"#size):$val)>;
+
+ def : Pat<(!cast<SDPatternOperator>("atomic_store_"#size) !cast<MxRegOp>("MxDRD"#size):$val, MxCP_ARII:$ptr),
+ (!cast<MxInst>("MOV"#size#"fd") !cast<MxMemOp>("MxARII"#size):$ptr,
+ !cast<MxRegOp>("MxDRD"#size):$val)>;
}
let Predicates = [AtLeastM68020] in {
diff --git a/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
index d0dc6dd..045dedf 100644
--- a/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -30,7 +30,7 @@ MSP430FrameLowering::MSP430FrameLowering(const MSP430Subtarget &STI)
Align(2)),
STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {}
-bool MSP430FrameLowering::hasFP(const MachineFunction &MF) const {
+bool MSP430FrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
diff --git a/llvm/lib/Target/MSP430/MSP430FrameLowering.h b/llvm/lib/Target/MSP430/MSP430FrameLowering.h
index 5227d3e..daa4eec 100644
--- a/llvm/lib/Target/MSP430/MSP430FrameLowering.h
+++ b/llvm/lib/Target/MSP430/MSP430FrameLowering.h
@@ -24,6 +24,7 @@ class MSP430RegisterInfo;
class MSP430FrameLowering : public TargetFrameLowering {
protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
public:
MSP430FrameLowering(const MSP430Subtarget &STI);
@@ -51,7 +52,6 @@ public:
MutableArrayRef<CalleeSavedInfo> CSI,
const TargetRegisterInfo *TRI) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS = nullptr) const override;
diff --git a/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index 99d225f..9b3edcd 100644
--- a/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -86,11 +86,11 @@ const MipsFrameLowering *MipsFrameLowering::create(const MipsSubtarget &ST) {
return llvm::createMipsSEFrameLowering(ST);
}
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas,
-// if it needs dynamic stack realignment, if frame pointer elimination is
-// disabled, or if the frame address is taken.
-bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
+// hasFPImpl - Return true if the specified function should have a dedicated
+// frame pointer register. This is true if the function has variable sized
+// allocas, if it needs dynamic stack realignment, if frame pointer elimination
+// is disabled, or if the frame address is taken.
+bool MipsFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
diff --git a/llvm/lib/Target/Mips/MipsFrameLowering.h b/llvm/lib/Target/Mips/MipsFrameLowering.h
index 710a3d4..25adc33 100644
--- a/llvm/lib/Target/Mips/MipsFrameLowering.h
+++ b/llvm/lib/Target/Mips/MipsFrameLowering.h
@@ -23,6 +23,8 @@ class MipsFrameLowering : public TargetFrameLowering {
protected:
const MipsSubtarget &STI;
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
public:
explicit MipsFrameLowering(const MipsSubtarget &sti, Align Alignment)
: TargetFrameLowering(StackGrowsDown, Alignment, 0, Alignment), STI(sti) {
@@ -30,8 +32,6 @@ public:
static const MipsFrameLowering *create(const MipsSubtarget &ST);
- bool hasFP(const MachineFunction &MF) const override;
-
bool hasBP(const MachineFunction &MF) const;
bool allocateScavengingFrameIndexesNearIncomingSP(
diff --git a/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
index 9abe0e3..a5f6cab 100644
--- a/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -27,7 +27,9 @@ using namespace llvm;
NVPTXFrameLowering::NVPTXFrameLowering()
: TargetFrameLowering(TargetFrameLowering::StackGrowsUp, Align(8), 0) {}
-bool NVPTXFrameLowering::hasFP(const MachineFunction &MF) const { return true; }
+bool NVPTXFrameLowering::hasFPImpl(const MachineFunction &MF) const {
+ return true;
+}
void NVPTXFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
diff --git a/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h b/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
index a5d49ac..f8d1f97 100644
--- a/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
@@ -22,7 +22,6 @@ class NVPTXFrameLowering : public TargetFrameLowering {
public:
explicit NVPTXFrameLowering();
- bool hasFP(const MachineFunction &MF) const override;
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
@@ -32,6 +31,9 @@ public:
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // End llvm namespace
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 7f942de..93c2d92 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1028,6 +1028,7 @@ static unsigned int getFenceOp(NVPTX::Ordering O, NVPTX::Scope S,
formatv("Unsupported scope \"{}\" for acquire/release/acq_rel fence.",
ScopeToString(S)));
}
+ break;
}
case NVPTX::Ordering::SequentiallyConsistent: {
switch (S) {
@@ -1046,6 +1047,7 @@ static unsigned int getFenceOp(NVPTX::Ordering O, NVPTX::Scope S,
report_fatal_error(formatv("Unsupported scope \"{}\" for seq_cst fence.",
ScopeToString(S)));
}
+ break;
}
case NVPTX::Ordering::NotAtomic:
case NVPTX::Ordering::Relaxed:
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index f7188b8..1083feb 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -355,9 +355,9 @@ PPCFrameLowering::determineFrameLayout(const MachineFunction &MF,
return FrameSize;
}
-// hasFP - Return true if the specified function actually has a dedicated frame
-// pointer register.
-bool PPCFrameLowering::hasFP(const MachineFunction &MF) const {
+// hasFPImpl - Return true if the specified function actually has a dedicated
+// frame pointer register.
+bool PPCFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
// FIXME: This is pretty much broken by design: hasFP() might be called really
// early, before the stack layout was calculated and thus hasFP() might return
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.h b/llvm/lib/Target/PowerPC/PPCFrameLowering.h
index d74c874..47f2498 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.h
@@ -107,7 +107,6 @@ public:
void inlineStackProbe(MachineFunction &MF,
MachineBasicBlock &PrologMBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool needsFP(const MachineFunction &MF) const;
void replaceFPWithRealFP(MachineFunction &MF) const;
@@ -176,6 +175,9 @@ public:
void updateCalleeSaves(const MachineFunction &MF, BitVector &SavedRegs) const;
uint64_t getStackThreshold() const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // End llvm namespace
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 7b07f6b..5d6c7c7 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -17884,10 +17884,10 @@ SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
}
// Override to enable LOAD_STACK_GUARD lowering on Linux.
-bool PPCTargetLowering::useLoadStackGuardNode() const {
- if (!Subtarget.isTargetLinux())
- return TargetLowering::useLoadStackGuardNode();
- return true;
+bool PPCTargetLowering::useLoadStackGuardNode(const Module &M) const {
+ if (M.getStackProtectorGuard() == "tls" || Subtarget.isTargetLinux())
+ return true;
+ return TargetLowering::useLoadStackGuardNode(M);
}
// Override to disable global variable loading on Linux and insert AIX canary
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 8907c3c..8c7961e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1137,7 +1137,7 @@ namespace llvm {
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
/// Override to support customized stack guard loading.
- bool useLoadStackGuardNode() const override;
+ bool useLoadStackGuardNode(const Module &M) const override;
void insertSSPDeclarations(Module &M) const override;
Value *getSDagStackGuard(const Module &M) const override;
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 48833e8..bc2a1b2 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -35,6 +35,7 @@
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/StackMaps.h"
+#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/TargetRegistry.h"
@@ -3107,9 +3108,16 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
return true;
}
case TargetOpcode::LOAD_STACK_GUARD: {
- assert(Subtarget.isTargetLinux() &&
- "Only Linux target is expected to contain LOAD_STACK_GUARD");
- const int64_t Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
+ auto M = MBB.getParent()->getFunction().getParent();
+ assert(
+ (Subtarget.isTargetLinux() || M->getStackProtectorGuard() == "tls") &&
+ "Only Linux target or tls mode are expected to contain "
+ "LOAD_STACK_GUARD");
+ int64_t Offset;
+ if (M->getStackProtectorGuard() == "tls")
+ Offset = M->getStackProtectorGuardOffset();
+ else
+ Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
MI.setDesc(get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index 5ad09ae..5eba36a 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -19,6 +19,7 @@
#include "RISCV.h"
#include "RISCVConstantPoolValue.h"
#include "RISCVMachineFunctionInfo.h"
+#include "RISCVRegisterInfo.h"
#include "RISCVTargetMachine.h"
#include "TargetInfo/RISCVTargetInfo.h"
#include "llvm/ADT/APInt.h"
@@ -348,6 +349,13 @@ bool RISCVAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (!MO.isReg())
OS << 'i';
return false;
+ case 'N': // Print the register encoding as an integer (0-31)
+ if (!MO.isReg())
+ return true;
+
+ const RISCVRegisterInfo *TRI = STI->getRegisterInfo();
+ OS << TRI->getEncodingValue(MO.getReg());
+ return false;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index f388376..b49cbab1 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -309,7 +309,7 @@ static Register getMaxPushPopReg(const MachineFunction &MF,
// pointer register. This is true if frame pointer elimination is
// disabled, if it needs dynamic stack realignment, if the function has
// variable sized allocas, or if the frame address is taken.
-bool RISCVFrameLowering::hasFP(const MachineFunction &MF) const {
+bool RISCVFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
index d660f3a..f45fcdb 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -37,8 +37,6 @@ public:
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
- bool hasFP(const MachineFunction &MF) const override;
-
bool hasBP(const MachineFunction &MF) const;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
@@ -83,6 +81,8 @@ public:
protected:
const RISCVSubtarget &STI;
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
void determineFrameLayout(MachineFunction &MF) const;
void adjustStackForRVV(MachineFunction &MF, MachineBasicBlock &MBB,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 952072c..60ac58f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -37,6 +37,8 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCInstBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -625,6 +627,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64,
Subtarget.is64Bit() ? Legal : Custom);
+ if (Subtarget.is64Bit()) {
+ setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
+ setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
+ }
+
setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
if (Subtarget.is64Bit())
@@ -7402,6 +7409,10 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return emitFlushICache(DAG, Op.getOperand(0), Op.getOperand(1),
Op.getOperand(2), Flags, DL);
}
+ case ISD::INIT_TRAMPOLINE:
+ return lowerINIT_TRAMPOLINE(Op, DAG);
+ case ISD::ADJUST_TRAMPOLINE:
+ return lowerADJUST_TRAMPOLINE(Op, DAG);
}
}
@@ -7417,6 +7428,126 @@ SDValue RISCVTargetLowering::emitFlushICache(SelectionDAG &DAG, SDValue InChain,
return CallResult.second;
}
+SDValue RISCVTargetLowering::lowerINIT_TRAMPOLINE(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (!Subtarget.is64Bit())
+ llvm::report_fatal_error("Trampolines only implemented for RV64");
+
+ // Create an MCCodeEmitter to encode instructions.
+ TargetLoweringObjectFile *TLO = getTargetMachine().getObjFileLowering();
+ assert(TLO);
+ MCContext &MCCtx = TLO->getContext();
+
+ std::unique_ptr<MCCodeEmitter> CodeEmitter(
+ createRISCVMCCodeEmitter(*getTargetMachine().getMCInstrInfo(), MCCtx));
+
+ SDValue Root = Op.getOperand(0);
+ SDValue Trmp = Op.getOperand(1); // trampoline
+ SDLoc dl(Op);
+
+ const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
+
+ // We store in the trampoline buffer the following instructions and data.
+ // Offset:
+ // 0: auipc t2, 0
+ // 4: ld t0, 24(t2)
+ // 8: ld t2, 16(t2)
+ // 12: jalr t0
+ // 16: <StaticChainOffset>
+ // 24: <FunctionAddressOffset>
+ // 32:
+
+ constexpr unsigned StaticChainOffset = 16;
+ constexpr unsigned FunctionAddressOffset = 24;
+
+ const MCSubtargetInfo *STI = getTargetMachine().getMCSubtargetInfo();
+ assert(STI);
+ auto GetEncoding = [&](const MCInst &MC) {
+ SmallVector<char, 4> CB;
+ SmallVector<MCFixup> Fixups;
+ CodeEmitter->encodeInstruction(MC, CB, Fixups, *STI);
+ uint32_t Encoding = support::endian::read32le(CB.data());
+ return Encoding;
+ };
+
+ SDValue OutChains[6];
+
+ uint32_t Encodings[] = {
+ // auipc t2, 0
+ // Loads the current PC into t2.
+ GetEncoding(MCInstBuilder(RISCV::AUIPC).addReg(RISCV::X7).addImm(0)),
+ // ld t0, 24(t2)
+ // Loads the function address into t0. Note that we are using offsets
+ // pc-relative to the first instruction of the trampoline.
+ GetEncoding(
+ MCInstBuilder(RISCV::LD).addReg(RISCV::X5).addReg(RISCV::X7).addImm(
+ FunctionAddressOffset)),
+ // ld t2, 16(t2)
+ // Load the value of the static chain.
+ GetEncoding(
+ MCInstBuilder(RISCV::LD).addReg(RISCV::X7).addReg(RISCV::X7).addImm(
+ StaticChainOffset)),
+ // jalr t0
+ // Jump to the function.
+ GetEncoding(MCInstBuilder(RISCV::JALR)
+ .addReg(RISCV::X0)
+ .addReg(RISCV::X5)
+ .addImm(0))};
+
+ // Store encoded instructions.
+ for (auto [Idx, Encoding] : llvm::enumerate(Encodings)) {
+ SDValue Addr = Idx > 0 ? DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(Idx * 4, dl, MVT::i64))
+ : Trmp;
+ OutChains[Idx] = DAG.getTruncStore(
+ Root, dl, DAG.getConstant(Encoding, dl, MVT::i64), Addr,
+ MachinePointerInfo(TrmpAddr, Idx * 4), MVT::i32);
+ }
+
+ // Now store the variable part of the trampoline.
+ SDValue FunctionAddress = Op.getOperand(2);
+ SDValue StaticChain = Op.getOperand(3);
+
+ // Store the given static chain and function pointer in the trampoline buffer.
+ struct OffsetValuePair {
+ const unsigned Offset;
+ const SDValue Value;
+ SDValue Addr = SDValue(); // Used to cache the address.
+ } OffsetValues[] = {
+ {StaticChainOffset, StaticChain},
+ {FunctionAddressOffset, FunctionAddress},
+ };
+ for (auto [Idx, OffsetValue] : llvm::enumerate(OffsetValues)) {
+ SDValue Addr =
+ DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(OffsetValue.Offset, dl, MVT::i64));
+ OffsetValue.Addr = Addr;
+ OutChains[Idx + 4] =
+ DAG.getStore(Root, dl, OffsetValue.Value, Addr,
+ MachinePointerInfo(TrmpAddr, OffsetValue.Offset));
+ }
+
+ SDValue StoreToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
+
+ // The end of instructions of trampoline is the same as the static chain
+ // address that we computed earlier.
+ SDValue EndOfTrmp = OffsetValues[0].Addr;
+
+ // Call clear cache on the trampoline instructions.
+ SDValue Chain = DAG.getNode(ISD::CLEAR_CACHE, dl, MVT::Other, StoreToken,
+ Trmp, EndOfTrmp);
+
+ return Chain;
+}
+
+SDValue RISCVTargetLowering::lowerADJUST_TRAMPOLINE(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (!Subtarget.is64Bit())
+ llvm::report_fatal_error("Trampolines only implemented for RV64");
+
+ return Op.getOperand(0);
+}
+
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG, unsigned Flags) {
return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
@@ -20235,6 +20366,8 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
} else {
if (Constraint == "vr" || Constraint == "vd" || Constraint == "vm")
return C_RegisterClass;
+ if (Constraint == "cr" || Constraint == "cf")
+ return C_RegisterClass;
}
return TargetLowering::getConstraintType(Constraint);
}
@@ -20297,6 +20430,22 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
} else if (Constraint == "vm") {
if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
return std::make_pair(0U, &RISCV::VMV0RegClass);
+ } else if (Constraint == "cr") {
+ if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
+ return std::make_pair(0U, &RISCV::GPRF16CRegClass);
+ if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
+ return std::make_pair(0U, &RISCV::GPRF32CRegClass);
+ if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
+ return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+ if (!VT.isVector())
+ return std::make_pair(0U, &RISCV::GPRCRegClass);
+ } else if (Constraint == "cf") {
+ if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
+ return std::make_pair(0U, &RISCV::FPR16CRegClass);
+ if (Subtarget.hasStdExtF() && VT == MVT::f32)
+ return std::make_pair(0U, &RISCV::FPR32CRegClass);
+ if (Subtarget.hasStdExtD() && VT == MVT::f64)
+ return std::make_pair(0U, &RISCV::FPR64CRegClass);
}
// Clang will correctly decode the usage of register name aliases into their
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 3864d58..c374944 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -992,6 +992,9 @@ private:
SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
+
bool isEligibleForTailCallOptimization(
CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
const SmallVector<CCValAssign, 16> &ArgLocs) const;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 68182d2..6b308bc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -50,7 +50,7 @@
/// each of the preceding fields which are relevant for a given instruction
/// in the opcode space.
///
-/// Currently, the policy is represented via the following instrinsic families:
+/// Currently, the policy is represented via the following intrinsic families:
/// * _MASK - Can represent all three policy states for both tail and mask. If
/// passthrough is IMPLICIT_DEF (or NoReg), then represents "undefined".
/// Otherwise, policy operand and tablegen flags drive the interpretation.
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 33363aa8..250f3c1 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -338,6 +338,11 @@ def FPR16 : RISCVRegisterClass<[f16, bf16], 16, (add
(sequence "F%u_H", 18, 27) // fs2-fs11
)>;
+def FPR16C : RISCVRegisterClass<[f16, bf16], 16, (add
+ (sequence "F%u_H", 15, 10),
+ (sequence "F%u_H", 8, 9)
+)>;
+
def FPR32 : RISCVRegisterClass<[f32], 32, (add
(sequence "F%u_F", 15, 10),
(sequence "F%u_F", 0, 7),
@@ -667,6 +672,10 @@ def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
(sequence "X%u_W", 8, 9))>;
def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
+def XLenPairRI : RegInfoByHwMode<
+ [RV32, RV64],
+ [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
+
// Dummy zero register for use in the register pair containing X0 (as X1 is
// not read to or written when the X0 register pair is used).
def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
@@ -698,9 +707,8 @@ let RegAltNameIndices = [ABIRegAltName] in {
}
}
-let RegInfos = RegInfoByHwMode<[RV32, RV64],
- [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>,
- DecoderMethod = "DecodeGPRPairRegisterClass" in
+let RegInfos = XLenPairRI,
+ DecoderMethod = "DecodeGPRPairRegisterClass" in {
def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
X10_X11, X12_X13, X14_X15, X16_X17,
X6_X7,
@@ -710,6 +718,11 @@ def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
X0_Pair, X2_X3, X4_X5
)>;
+def GPRPairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
+ X10_X11, X12_X13, X14_X15, X8_X9
+)>;
+} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
+
// The register class is added for inline assembly for vector mask types.
def VM : VReg<VMaskVTs, (add VR), 1>;
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index df5c6b5..395baa5 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -343,6 +343,28 @@ RISCVTTIImpl::getConstantPoolLoadCost(Type *Ty, TTI::TargetCostKind CostKind) {
/*AddressSpace=*/0, CostKind);
}
+static bool isRepeatedConcatMask(ArrayRef<int> Mask, int &SubVectorSize) {
+ unsigned Size = Mask.size();
+ if (!isPowerOf2_32(Size))
+ return false;
+ for (unsigned I = 0; I != Size; ++I) {
+ if (static_cast<unsigned>(Mask[I]) == I)
+ continue;
+ if (Mask[I] != 0)
+ return false;
+ if (Size % I != 0)
+ return false;
+ for (unsigned J = I + 1; J != Size; ++J)
+ // Check the pattern is repeated.
+ if (static_cast<unsigned>(Mask[J]) != J % I)
+ return false;
+ SubVectorSize = I;
+ return true;
+ }
+ // That means Mask is <0, 1, 2, 3>. This is not a concatenation.
+ return false;
+}
+
static VectorType *getVRGatherIndexType(MVT DataVT, const RISCVSubtarget &ST,
LLVMContext &C) {
assert((DataVT.getScalarSizeInBits() != 8 ||
@@ -394,6 +416,29 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
LT.second, CostKind);
}
}
+ int SubVectorSize;
+ if (LT.second.getScalarSizeInBits() != 1 &&
+ isRepeatedConcatMask(Mask, SubVectorSize)) {
+ InstructionCost Cost = 0;
+ unsigned NumSlides = Log2_32(Mask.size() / SubVectorSize);
+ // The cost of extraction from a subvector is 0 if the index is 0.
+ for (unsigned I = 0; I != NumSlides; ++I) {
+ unsigned InsertIndex = SubVectorSize * (1 << I);
+ FixedVectorType *SubTp =
+ FixedVectorType::get(Tp->getElementType(), InsertIndex);
+ FixedVectorType *DestTp =
+ FixedVectorType::getDoubleElementsVectorType(SubTp);
+ std::pair<InstructionCost, MVT> DestLT =
+ getTypeLegalizationCost(DestTp);
+ // Add the cost of whole vector register move because the
+ // destination vector register group for vslideup cannot overlap the
+ // source.
+ Cost += DestLT.first * TLI->getLMULCost(DestLT.second);
+ Cost += getShuffleCost(TTI::SK_InsertSubvector, DestTp, {},
+ CostKind, InsertIndex, SubTp);
+ }
+ return Cost;
+ }
}
// vrgather + cost of generating the mask constant.
// We model this for an unknown mask with a single vrgather.
diff --git a/llvm/lib/Target/SPIRV/SPIRVFrameLowering.h b/llvm/lib/Target/SPIRV/SPIRVFrameLowering.h
index b98f8d0..c752255 100644
--- a/llvm/lib/Target/SPIRV/SPIRVFrameLowering.h
+++ b/llvm/lib/Target/SPIRV/SPIRVFrameLowering.h
@@ -33,7 +33,8 @@ public:
void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const override {}
- bool hasFP(const MachineFunction &MF) const override { return false; }
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override { return false; }
};
} // namespace llvm
#endif // LLVM_LIB_TARGET_SPIRV_SPIRVFRAMELOWERING_H
diff --git a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
index 000418b..fa38c6c 100644
--- a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -249,10 +249,10 @@ bool SparcFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
return !MF.getFrameInfo().hasVarSizedObjects();
}
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-bool SparcFrameLowering::hasFP(const MachineFunction &MF) const {
+// hasFPImpl - Return true if the specified function should have a dedicated
+// frame pointer register. This is true if the function has variable sized
+// allocas or if frame pointer elimination is disabled.
+bool SparcFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/Sparc/SparcFrameLowering.h b/llvm/lib/Target/Sparc/SparcFrameLowering.h
index ab0ceb6..8038568 100644
--- a/llvm/lib/Target/Sparc/SparcFrameLowering.h
+++ b/llvm/lib/Target/Sparc/SparcFrameLowering.h
@@ -35,7 +35,6 @@ public:
MachineBasicBlock::iterator I) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
- bool hasFP(const MachineFunction &MF) const override;
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS = nullptr) const override;
@@ -47,6 +46,9 @@ public:
/// time).
bool targetHandlesStackFrameRounding() const override { return true; }
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
// Remap input registers to output registers for leaf procedure.
void remapRegsForLeafProc(MachineFunction &MF) const;
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 42b8248..de4986e 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -3548,9 +3548,9 @@ void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
}
// Override to enable LOAD_STACK_GUARD lowering on Linux.
-bool SparcTargetLowering::useLoadStackGuardNode() const {
+bool SparcTargetLowering::useLoadStackGuardNode(const Module &M) const {
if (!Subtarget->isTargetLinux())
- return TargetLowering::useLoadStackGuardNode();
+ return TargetLowering::useLoadStackGuardNode(M);
return true;
}
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 15d09bc..cc67207 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -119,7 +119,7 @@ namespace llvm {
}
/// Override to support customized stack guard loading.
- bool useLoadStackGuardNode() const override;
+ bool useLoadStackGuardNode(const Module &M) const override;
void insertSSPDeclarations(Module &M) const override;
/// getSetCCResultType - Return the ISD::SETCC ValueType
diff --git a/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index 5b26ba0..f0a8564 100644
--- a/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -442,7 +442,7 @@ private:
bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
- // Both the hlasm and att variants still rely on the basic gnu asm
+ // Both the hlasm and gnu variants still rely on the basic gnu asm
// format with respect to inputs, clobbers, outputs etc.
//
// However, calling the overriden getAssemblerDialect() method in
@@ -475,8 +475,8 @@ private:
// Are we parsing using the AD_HLASM dialect?
inline bool isParsingHLASM() { return getMAIAssemblerDialect() == AD_HLASM; }
- // Are we parsing using the AD_ATT dialect?
- inline bool isParsingATT() { return getMAIAssemblerDialect() == AD_ATT; }
+ // Are we parsing using the AD_GNU dialect?
+ inline bool isParsingGNU() { return getMAIAssemblerDialect() == AD_GNU; }
public:
SystemZAsmParser(const MCSubtargetInfo &sti, MCAsmParser &parser,
@@ -848,7 +848,7 @@ ParseStatus SystemZAsmParser::parseRegister(OperandVector &Operands,
}
// Handle register names of the form %<prefix><number>
- if (isParsingATT() && Parser.getTok().is(AsmToken::Percent)) {
+ if (isParsingGNU() && Parser.getTok().is(AsmToken::Percent)) {
if (parseRegister(Reg, /*RequirePercent=*/true))
return ParseStatus::Failure;
@@ -1029,7 +1029,7 @@ bool SystemZAsmParser::parseAddress(bool &HaveReg1, Register &Reg1,
if (getLexer().is(AsmToken::LParen)) {
Parser.Lex();
- if (isParsingATT() && getLexer().is(AsmToken::Percent)) {
+ if (isParsingGNU() && getLexer().is(AsmToken::Percent)) {
// Parse the first register.
HaveReg1 = true;
if (parseRegister(Reg1, /*RequirePercent=*/true))
@@ -1072,7 +1072,7 @@ bool SystemZAsmParser::parseAddress(bool &HaveReg1, Register &Reg1,
if (parseIntegerRegister(Reg2, RegGR))
return true;
} else {
- if (isParsingATT() && parseRegister(Reg2, /*RequirePercent=*/true))
+ if (isParsingGNU() && parseRegister(Reg2, /*RequirePercent=*/true))
return true;
}
}
@@ -1490,7 +1490,7 @@ bool SystemZAsmParser::parseOperand(OperandVector &Operands,
// a context-dependent parse routine, which gives the required register
// class. The code is here to mop up other cases, like those where
// the instruction isn't recognized.
- if (isParsingATT() && Parser.getTok().is(AsmToken::Percent)) {
+ if (isParsingGNU() && Parser.getTok().is(AsmToken::Percent)) {
Register Reg;
if (parseRegister(Reg, /*RequirePercent=*/true))
return true;
@@ -1672,7 +1672,7 @@ ParseStatus SystemZAsmParser::parsePCRel(OperandVector &Operands,
}
bool SystemZAsmParser::isLabel(AsmToken &Token) {
- if (isParsingATT())
+ if (isParsingGNU())
return true;
// HLASM labels are ordinary symbols.
diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index 66555fa..3a1d01c 100644
--- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -13,7 +13,7 @@
using namespace llvm;
SystemZMCAsmInfoELF::SystemZMCAsmInfoELF(const Triple &TT) {
- AssemblerDialect = AD_ATT;
+ AssemblerDialect = AD_GNU;
CalleeSaveStackSlotSize = 8;
CodePointerSize = 8;
Data64bitsDirective = "\t.quad\t";
diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
index b2f1914..58b9a3d 100644
--- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
+++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
@@ -15,7 +15,7 @@
namespace llvm {
class Triple;
-enum SystemZAsmDialect { AD_ATT = 0, AD_HLASM = 1 };
+enum SystemZAsmDialect { AD_GNU = 0, AD_HLASM = 1 };
class SystemZMCAsmInfoELF : public MCAsmInfoELF {
public:
diff --git a/llvm/lib/Target/SystemZ/SystemZ.td b/llvm/lib/Target/SystemZ/SystemZ.td
index e18deed..9d0c77e 100644
--- a/llvm/lib/Target/SystemZ/SystemZ.td
+++ b/llvm/lib/Target/SystemZ/SystemZ.td
@@ -67,11 +67,11 @@ def SystemZAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
}
-def ATTAsmParserVariant : AsmParserVariant {
+def GNUAsmParserVariant : AsmParserVariant {
int Variant = 0;
// Variant name.
- string Name = "att";
+ string Name = "gnu";
}
def HLASMAsmParserVariant : AsmParserVariant {
@@ -88,6 +88,6 @@ def HLASMAsmParserVariant : AsmParserVariant {
def SystemZ : Target {
let InstructionSet = SystemZInstrInfo;
let AssemblyParsers = [SystemZAsmParser];
- let AssemblyParserVariants = [ATTAsmParserVariant, HLASMAsmParserVariant];
+ let AssemblyParserVariants = [GNUAsmParserVariant, HLASMAsmParserVariant];
let AllowRegisterRenaming = 1;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index 8c53b8d..8fbd05e 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -832,7 +832,7 @@ void SystemZELFFrameLowering::inlineStackProbe(
}
}
-bool SystemZELFFrameLowering::hasFP(const MachineFunction &MF) const {
+bool SystemZELFFrameLowering::hasFPImpl(const MachineFunction &MF) const {
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
MF.getFrameInfo().hasVarSizedObjects());
}
@@ -1449,7 +1449,12 @@ void SystemZXPLINKFrameLowering::inlineStackProbe(
fullyRecomputeLiveIns({StackExtMBB, NextMBB});
}
-bool SystemZXPLINKFrameLowering::hasFP(const MachineFunction &MF) const {
+bool SystemZXPLINKFrameLowering::hasFPImpl(const MachineFunction &MF) const {
+ // Naked functions have no stack frame pushed, so we don't have a frame
+ // pointer.
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
+ return false;
+
return (MF.getFrameInfo().hasVarSizedObjects());
}
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
index c4367b4..57fc73b 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -86,7 +86,6 @@ public:
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void inlineStackProbe(MachineFunction &MF,
MachineBasicBlock &PrologMBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
Register &FrameReg) const override;
void
@@ -113,6 +112,9 @@ public:
// Get or create the frame index of where the old frame pointer is stored.
int getOrCreateFramePointerSaveIndex(MachineFunction &MF) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
class SystemZXPLINKFrameLowering : public SystemZFrameLowering {
@@ -147,8 +149,6 @@ public:
void inlineStackProbe(MachineFunction &MF,
MachineBasicBlock &PrologMBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
-
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
@@ -167,6 +167,9 @@ public:
// Get or create the frame index of where the old frame pointer is stored.
int getOrCreateFramePointerSaveIndex(MachineFunction &MF) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 2b06524..3c06c1f 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -570,9 +570,7 @@ public:
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
/// Override to support customized stack guard loading.
- bool useLoadStackGuardNode() const override {
- return true;
- }
+ bool useLoadStackGuardNode(const Module &M) const override { return true; }
void insertSSPDeclarations(Module &M) const override {
}
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
index 9a12718..50f636a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -2050,7 +2050,7 @@ class CondVariant<bits<4> ccmaskin, string suffixin, bit alternatein,
bit alternate = alternatein;
// Whether this needs be to restricted to a specific dialect.
- // Valid values are "att" and "hlasm", which when passed in
+ // Valid values are "gnu" and "hlasm", which when passed in
// will set AsmVariantName.
string asmvariant = asmvariantin;
}
@@ -2063,20 +2063,20 @@ def CondAlways : CondVariant<15, "", 0>;
def CondVariantO : CondVariant<1, "o", 0>;
def CondVariantH : CondVariant<2, "h", 0>;
def CondVariantP : CondVariant<2, "p", 1>;
-def CondVariantNLE : CondVariant<3, "nle", 0, "att">;
+def CondVariantNLE : CondVariant<3, "nle", 0, "gnu">;
def CondVariantL : CondVariant<4, "l", 0>;
def CondVariantM : CondVariant<4, "m", 1>;
-def CondVariantNHE : CondVariant<5, "nhe", 0, "att">;
-def CondVariantLH : CondVariant<6, "lh", 0, "att">;
+def CondVariantNHE : CondVariant<5, "nhe", 0, "gnu">;
+def CondVariantLH : CondVariant<6, "lh", 0, "gnu">;
def CondVariantNE : CondVariant<7, "ne", 0>;
def CondVariantNZ : CondVariant<7, "nz", 1>;
def CondVariantE : CondVariant<8, "e", 0>;
def CondVariantZ : CondVariant<8, "z", 1>;
-def CondVariantNLH : CondVariant<9, "nlh", 0, "att">;
-def CondVariantHE : CondVariant<10, "he", 0, "att">;
+def CondVariantNLH : CondVariant<9, "nlh", 0, "gnu">;
+def CondVariantHE : CondVariant<10, "he", 0, "gnu">;
def CondVariantNL : CondVariant<11, "nl", 0>;
def CondVariantNM : CondVariant<11, "nm", 1>;
-def CondVariantLE : CondVariant<12, "le", 0, "att">;
+def CondVariantLE : CondVariant<12, "le", 0, "gnu">;
def CondVariantNH : CondVariant<13, "nh", 0>;
def CondVariantNP : CondVariant<13, "np", 1>;
def CondVariantNO : CondVariant<14, "no", 0>;
@@ -2093,16 +2093,16 @@ class CV<string name>
// and that the low bit of the mask is therefore always 0. This means
// that each condition has two names. Conditions "o" and "no" are not used.
def IntCondVariantH : CondVariant<2, "h", 0>;
-def IntCondVariantNLE : CondVariant<2, "nle", 1, "att">;
+def IntCondVariantNLE : CondVariant<2, "nle", 1, "gnu">;
def IntCondVariantL : CondVariant<4, "l", 0>;
-def IntCondVariantNHE : CondVariant<4, "nhe", 1, "att">;
-def IntCondVariantLH : CondVariant<6, "lh", 0, "att">;
+def IntCondVariantNHE : CondVariant<4, "nhe", 1, "gnu">;
+def IntCondVariantLH : CondVariant<6, "lh", 0, "gnu">;
def IntCondVariantNE : CondVariant<6, "ne", 1>;
def IntCondVariantE : CondVariant<8, "e", 0>;
-def IntCondVariantNLH : CondVariant<8, "nlh", 1, "att">;
-def IntCondVariantHE : CondVariant<10, "he", 0, "att">;
+def IntCondVariantNLH : CondVariant<8, "nlh", 1, "gnu">;
+def IntCondVariantHE : CondVariant<10, "he", 0, "gnu">;
def IntCondVariantNL : CondVariant<10, "nl", 1>;
-def IntCondVariantLE : CondVariant<12, "le", 0, "att">;
+def IntCondVariantLE : CondVariant<12, "le", 0, "gnu">;
def IntCondVariantNH : CondVariant<12, "nh", 1>;
// A helper class to look up one of the above by name.
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 95ed1a0..f3baf89 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -121,7 +121,7 @@ def NOPR_bare : InstAlias<"nopr", (NOPR R0D), 0>;
def JNOP : InstAlias<"jnop\t$RI2", (BRCAsm 0, brtarget16:$RI2), 0>;
// An alias of BRCL 0, label
-// jgnop on att ; jlnop on hlasm
+// jgnop on gnu ; jlnop on hlasm
def JGNOP : InstAlias<"{jgnop|jlnop}\t$RI2", (BRCLAsm 0, brtarget32:$RI2), 0>;
// Fused compare-and-branch instructions.
@@ -2351,12 +2351,12 @@ def JXHG : MnemonicAlias<"jxhg", "brxhg">;
def JXLEG : MnemonicAlias<"jxleg", "brxlg">;
def BRU : MnemonicAlias<"bru", "j">;
-def BRUL : MnemonicAlias<"brul", "jg", "att">;
+def BRUL : MnemonicAlias<"brul", "jg", "gnu">;
def BRUL_HLASM : MnemonicAlias<"brul", "jlu", "hlasm">;
foreach V = [ "E", "NE", "H", "NH", "L", "NL", "HE", "NHE", "LE", "NLE",
"Z", "NZ", "P", "NP", "M", "NM", "LH", "NLH", "O", "NO" ] in {
defm BRUAsm#V : MnemonicCondBranchAlias <CV<V>, "br#", "j#">;
- defm BRULAsm#V : MnemonicCondBranchAlias <CV<V>, "br#l", "jg#", "att">;
+ defm BRULAsm#V : MnemonicCondBranchAlias <CV<V>, "br#l", "jg#", "gnu">;
defm BRUL_HLASMAsm#V : MnemonicCondBranchAlias <CV<V>, "br#l", "jl#", "hlasm">;
}
diff --git a/llvm/lib/Target/VE/VEFrameLowering.cpp b/llvm/lib/Target/VE/VEFrameLowering.cpp
index 195bd4e..10e94c2 100644
--- a/llvm/lib/Target/VE/VEFrameLowering.cpp
+++ b/llvm/lib/Target/VE/VEFrameLowering.cpp
@@ -415,10 +415,10 @@ void VEFrameLowering::emitEpilogue(MachineFunction &MF,
emitEpilogueInsns(MF, MBB, MBBI, NumBytes, true);
}
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas
-// or if frame pointer elimination is disabled.
-bool VEFrameLowering::hasFP(const MachineFunction &MF) const {
+// hasFPImpl - Return true if the specified function should have a dedicated
+// frame pointer register. This is true if the function has variable sized
+// allocas or if frame pointer elimination is disabled.
+bool VEFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/VE/VEFrameLowering.h b/llvm/lib/Target/VE/VEFrameLowering.h
index 36fc8b2..be9cdc0 100644
--- a/llvm/lib/Target/VE/VEFrameLowering.h
+++ b/llvm/lib/Target/VE/VEFrameLowering.h
@@ -39,7 +39,6 @@ public:
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool hasBP(const MachineFunction &MF) const;
bool hasGOT(const MachineFunction &MF) const;
@@ -69,6 +68,8 @@ public:
protected:
const VESubtarget &STI;
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
// Returns true if MF is a leaf procedure.
bool isLeafProc(MachineFunction &MF) const;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
index 8f3ad16..f0334cc 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
@@ -98,7 +98,7 @@ bool WebAssemblyFrameLowering::hasBP(const MachineFunction &MF) const {
/// Return true if the specified function should have a dedicated frame pointer
/// register.
-bool WebAssemblyFrameLowering::hasFP(const MachineFunction &MF) const {
+bool WebAssemblyFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
// When we have var-sized objects, we move the stack pointer by an unknown
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
index 528b33e..710d517 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
@@ -41,7 +41,6 @@ public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
bool isSupportedStackID(TargetStackID::Value ID) const override;
DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override;
@@ -68,6 +67,9 @@ public:
static unsigned getOpcGlobGet(const MachineFunction &MF);
static unsigned getOpcGlobSet(const MachineFunction &MF);
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
bool hasBP(const MachineFunction &MF) const;
bool needsSPForLocalFrame(const MachineFunction &MF) const;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index 60b3294..2c05438 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -708,7 +708,7 @@ defm "" : ReplaceLane<I64x2, 30>;
defm "" : ReplaceLane<F32x4, 32>;
defm "" : ReplaceLane<F64x2, 34>;
-// For now use an instrinsic for f16x8.replace_lane instead of ReplaceLane above
+// For now use an intrinsic for f16x8.replace_lane instead of ReplaceLane above
// since LLVM IR generated with half type arguments is not well supported and
// creates conversions from f16->f32.
defm REPLACE_LANE_F16x8 :
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 4bf660b..9e4e554 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -3544,7 +3544,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc));
if (NeedLoad)
- MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0);
+ MIB.addReg(Is64Bit ? X86::RIP : X86::NoRegister).addImm(1).addReg(0);
if (Symbol)
MIB.addSym(Symbol, OpFlags);
else
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 4f83267..a35b046 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -91,10 +91,10 @@ bool X86FrameLowering::needsFrameIndexResolution(
MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
-/// hasFP - Return true if the specified function should have a dedicated frame
-/// pointer register. This is true if the function has variable sized allocas
-/// or if frame pointer elimination is disabled.
-bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
+/// hasFPImpl - Return true if the specified function should have a dedicated
+/// frame pointer register. This is true if the function has variable sized
+/// allocas or if frame pointer elimination is disabled.
+bool X86FrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
diff --git a/llvm/lib/Target/X86/X86FrameLowering.h b/llvm/lib/Target/X86/X86FrameLowering.h
index 7821791..02fe8ee 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/llvm/lib/Target/X86/X86FrameLowering.h
@@ -105,7 +105,6 @@ public:
void spillFPBP(MachineFunction &MF) const override;
- bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
bool needsFrameIndexResolution(const MachineFunction &MF) const override;
@@ -201,6 +200,9 @@ public:
/// frame of the top of stack function) as part of it's ABI.
bool has128ByteRedZone(const MachineFunction& MF) const;
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
+
private:
bool isWin64Prologue(const MachineFunction &MF) const;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index db633d1..9d14325 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2682,7 +2682,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
// This has so far only been implemented for 64-bit MachO.
-bool X86TargetLowering::useLoadStackGuardNode() const {
+bool X86TargetLowering::useLoadStackGuardNode(const Module &M) const {
return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}
@@ -29986,7 +29986,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
MVT::getVectorVT(NarrowScalarVT, WideNumElts), dl, AmtWideElts);
AmtWide = DAG.getZExtOrTrunc(AmtWide, dl, WideVT);
// Perform the actual shift.
- unsigned LogicalOpc = Opc == ISD::SRA ? ISD::SRL : Opc;
+ unsigned LogicalOpc = Opc == ISD::SRA ? (unsigned)ISD::SRL : Opc;
SDValue ShiftedR = DAG.getNode(LogicalOpc, dl, WideVT, RWide, AmtWide);
// Now we need to construct a mask which will "drop" bits that get
// shifted past the LSB/MSB. For a logical shift left, it will look
@@ -56543,14 +56543,9 @@ static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
SDValue Op1 = N->getOperand(1);
SDLoc DL(N);
- // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
auto IsNonOpaqueConstant = [&](SDValue Op) {
- if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
- if (auto *Cst = dyn_cast<ConstantSDNode>(C))
- return !Cst->isOpaque();
- return true;
- }
- return false;
+ return DAG.isConstantIntBuildVectorOrConstantInt(Op,
+ /*AllowOpaques*/ false);
};
// X86 can't encode an immediate LHS of a sub. See if we can push the
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 3b1bd0a..14ada17 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1568,7 +1568,7 @@ namespace llvm {
/// returns the address of that location. Otherwise, returns nullptr.
Value *getIRStackGuard(IRBuilderBase &IRB) const override;
- bool useLoadStackGuardNode() const override;
+ bool useLoadStackGuardNode(const Module &M) const override;
bool useStackGuardXorFP() const override;
void insertSSPDeclarations(Module &M) const override;
Value *getSDagStackGuard(const Module &M) const override;
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index e77e56a..036d7d9 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -7741,12 +7741,14 @@ let Predicates = [HasAVX2, NoVLX] in {
// AVX1 broadcast patterns
let Predicates = [HasAVX1Only] in {
-def : Pat<(v8i32 (X86VBroadcastld32 addr:$src)),
- (VBROADCASTSSYrm addr:$src)>;
-def : Pat<(v4i64 (X86VBroadcastld64 addr:$src)),
- (VBROADCASTSDYrm addr:$src)>;
-def : Pat<(v4i32 (X86VBroadcastld32 addr:$src)),
- (VBROADCASTSSrm addr:$src)>;
+ def : Pat<(v8i32 (X86VBroadcastld32 addr:$src)),
+ (VBROADCASTSSYrm addr:$src)>;
+ def : Pat<(v4i64 (X86VBroadcastld64 addr:$src)),
+ (VBROADCASTSDYrm addr:$src)>;
+ def : Pat<(v4i32 (X86VBroadcastld32 addr:$src)),
+ (VBROADCASTSSrm addr:$src)>;
+ def : Pat<(v2i64 (X86VBroadcastld64 addr:$src)),
+ (VMOVDDUPrm addr:$src)>;
}
// Provide fallback in case the load node that is used in the patterns above
@@ -7795,9 +7797,6 @@ let Predicates = [HasAVX1Only] in {
def : Pat<(v2i64 (X86VBroadcast i64:$src)),
(VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)>;
- def : Pat<(v2i64 (X86VBroadcastld64 addr:$src)),
- (VMOVDDUPrm addr:$src)>;
-
def : Pat<(v4i64 (X86VBroadcast v2i64:$src)),
(VINSERTF128rri (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
(v2i64 (VPSHUFDri VR128:$src, 0x44)), sub_xmm),
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index b375369..ec18eca 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -215,7 +215,7 @@ XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti)
// Do nothing
}
-bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const {
+bool XCoreFrameLowering::hasFPImpl(const MachineFunction &MF) const {
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
MF.getFrameInfo().hasVarSizedObjects();
}
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.h b/llvm/lib/Target/XCore/XCoreFrameLowering.h
index a914d82..b06a6f9 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.h
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.h
@@ -46,8 +46,6 @@ namespace llvm {
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
- bool hasFP(const MachineFunction &MF) const override;
-
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS = nullptr) const override;
@@ -58,6 +56,9 @@ namespace llvm {
static int stackSlotSize() {
return 4;
}
+
+ protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
}
diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
index e24cb77..f46d386 100644
--- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
@@ -27,7 +27,7 @@ XtensaFrameLowering::XtensaFrameLowering(const XtensaSubtarget &STI)
Align(4)),
TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {}
-bool XtensaFrameLowering::hasFP(const MachineFunction &MF) const {
+bool XtensaFrameLowering::hasFPImpl(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
MFI.hasVarSizedObjects();
diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.h b/llvm/lib/Target/Xtensa/XtensaFrameLowering.h
index 9120215..3f946e1 100644
--- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.h
+++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.h
@@ -24,8 +24,6 @@ class XtensaFrameLowering : public TargetFrameLowering {
public:
XtensaFrameLowering(const XtensaSubtarget &STI);
- bool hasFP(const MachineFunction &MF) const override;
-
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
void emitPrologue(MachineFunction &, MachineBasicBlock &) const override;
@@ -50,6 +48,9 @@ public:
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override;
};
} // namespace llvm
diff --git a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
index bd0a337..7feebbe 100644
--- a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
@@ -57,9 +57,9 @@ static cl::opt<unsigned> MaxBlockPredecessors(
"considered during the estimation of dead code"));
static cl::opt<unsigned> MinFunctionSize(
- "funcspec-min-function-size", cl::init(300), cl::Hidden, cl::desc(
- "Don't specialize functions that have less than this number of "
- "instructions"));
+ "funcspec-min-function-size", cl::init(500), cl::Hidden,
+ cl::desc("Don't specialize functions that have less than this number of "
+ "instructions"));
static cl::opt<unsigned> MaxCodeSizeGrowth(
"funcspec-max-codesize-growth", cl::init(3), cl::Hidden, cl::desc(
@@ -641,12 +641,17 @@ bool FunctionSpecializer::run() {
Metrics.analyzeBasicBlock(&BB, GetTTI(F), EphValues);
}
+ // When specializing literal constants is enabled, always require functions
+ // to be larger than MinFunctionSize, to prevent excessive specialization.
+ const bool RequireMinSize =
+ !ForceSpecialization &&
+ (SpecializeLiteralConstant || !F.hasFnAttribute(Attribute::NoInline));
+
// If the code metrics reveal that we shouldn't duplicate the function,
// or if the code size implies that this function is easy to get inlined,
// then we shouldn't specialize it.
if (Metrics.notDuplicatable || !Metrics.NumInsts.isValid() ||
- (!ForceSpecialization && !F.hasFnAttribute(Attribute::NoInline) &&
- Metrics.NumInsts < MinFunctionSize))
+ (RequireMinSize && Metrics.NumInsts < MinFunctionSize))
continue;
// TODO: For now only consider recursive functions when running multiple
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 954c4cf..c8b9f16 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -4822,7 +4822,8 @@ bool InstCombinerImpl::tryToSinkInstruction(Instruction *I,
// We can only sink load instructions if there is nothing between the load and
// the end of block that could change the value.
- if (I->mayReadFromMemory()) {
+ if (I->mayReadFromMemory() &&
+ !I->hasMetadata(LLVMContext::MD_invariant_load)) {
// We don't want to do any sophisticated alias analysis, so we only check
// the instructions after I in I's parent block if we try to sink to its
// successor block.
diff --git a/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 7bb4b55..c97a77d 100644
--- a/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -1229,6 +1229,9 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
case LibFunc_logb:
case LibFunc_logbf:
case LibFunc_logbl:
+ case LibFunc_ilogb:
+ case LibFunc_ilogbf:
+ case LibFunc_ilogbl:
case LibFunc_logf:
case LibFunc_logl:
case LibFunc_nearbyint:
diff --git a/llvm/lib/Transforms/Utils/CtorUtils.cpp b/llvm/lib/Transforms/Utils/CtorUtils.cpp
index 507729b..968446c 100644
--- a/llvm/lib/Transforms/Utils/CtorUtils.cpp
+++ b/llvm/lib/Transforms/Utils/CtorUtils.cpp
@@ -45,9 +45,9 @@ static void removeGlobalCtors(GlobalVariable *GCL, const BitVector &CtorsToRemov
}
// Create the new global and insert it next to the existing list.
- GlobalVariable *NGV =
- new GlobalVariable(CA->getType(), GCL->isConstant(), GCL->getLinkage(),
- CA, "", GCL->getThreadLocalMode());
+ GlobalVariable *NGV = new GlobalVariable(
+ CA->getType(), GCL->isConstant(), GCL->getLinkage(), CA, "",
+ GCL->getThreadLocalMode(), GCL->getAddressSpace());
GCL->getParent()->insertGlobalVariable(GCL->getIterator(), NGV);
NGV->takeName(GCL);
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 7ded51d..c1b9779 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -1555,7 +1555,8 @@ VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan,
void VPSlotTracker::assignName(const VPValue *V) {
assert(!VPValue2Name.contains(V) && "VPValue already has a name!");
auto *UV = V->getUnderlyingValue();
- if (!UV) {
+ auto *VPI = dyn_cast_or_null<VPInstruction>(V->getDefiningRecipe());
+ if (!UV && !(VPI && !VPI->getName().empty())) {
VPValue2Name[V] = (Twine("vp<%") + Twine(NextSlot) + ">").str();
NextSlot++;
return;
@@ -1564,10 +1565,15 @@ void VPSlotTracker::assignName(const VPValue *V) {
// Use the name of the underlying Value, wrapped in "ir<>", and versioned by
// appending ".Number" to the name if there are multiple uses.
std::string Name;
- raw_string_ostream S(Name);
- UV->printAsOperand(S, false);
+ if (UV) {
+ raw_string_ostream S(Name);
+ UV->printAsOperand(S, false);
+ } else
+ Name = VPI->getName();
+
assert(!Name.empty() && "Name cannot be empty.");
- std::string BaseName = (Twine("ir<") + Name + Twine(">")).str();
+ StringRef Prefix = UV ? "ir<" : "vp<%";
+ std::string BaseName = (Twine(Prefix) + Name + Twine(">")).str();
// First assign the base name for V.
const auto &[A, _] = VPValue2Name.insert({V, BaseName});
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index fd97dda..59a0844 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1414,6 +1414,9 @@ public:
/// Returns true if this VPInstruction's operands are single scalars and the
/// result is also a single scalar.
bool isSingleScalar() const;
+
+ /// Returns the symbolic name assigned to the VPInstruction.
+ StringRef getName() const { return Name; }
};
/// A recipe to wrap on original IR instruction not to be modified during
diff --git a/llvm/test/Analysis/CostModel/RISCV/fixed-vector-insert-subvector.ll b/llvm/test/Analysis/CostModel/RISCV/fixed-vector-insert-subvector.ll
new file mode 100644
index 0000000..47a2af9
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/fixed-vector-insert-subvector.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define void @test() {
+; CHECK-LABEL: 'test'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %0 = shufflevector <8 x float> poison, <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %1 = shufflevector <4 x i16> poison, <4 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = shufflevector <4 x float> poison, <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %3 = shufflevector <2 x i1> poison, <2 x i1> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+entry:
+ %0 = shufflevector <8 x float> poison, <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %1 = shufflevector <4 x i16> poison, <4 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %2 = shufflevector <4 x float> poison, <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = shufflevector <2 x i1> poison, <2 x i1> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ ret void
+}
diff --git a/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll b/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
index a2d171c..a97ed0a 100644
--- a/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
+++ b/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
@@ -54,7 +54,7 @@ define <vscale x 32 x i8> @ld2.nxv32i8_no_eltty(<vscale x 16 x i1> %Pg, i8 *%bas
ret <vscale x 32 x i8> %res
}
-; ldN instrinsic name with only output type
+; ldN intrinsic name with only output type
define <vscale x 32 x i8> @ld2.nxv32i8_no_predty_pty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
; CHECK-LABEL: @ld2.nxv32i8_no_predty_pty
; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir
index ae04cc7..b045dee 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-cast.mir
@@ -135,20 +135,13 @@ name: test_combine_trunc_build_vector
legalized: true
body: |
bb.1:
- ; CHECK-PRE-LABEL: name: test_combine_trunc_build_vector
- ; CHECK-PRE: %arg1:_(s64) = COPY $x0
- ; CHECK-PRE-NEXT: %arg2:_(s64) = COPY $x0
- ; CHECK-PRE-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %arg1(s64)
- ; CHECK-PRE-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %arg2(s64)
- ; CHECK-PRE-NEXT: %small:_(<2 x s32>) = G_BUILD_VECTOR [[TRUNC]](s32), [[TRUNC1]](s32)
- ; CHECK-PRE-NEXT: $x0 = COPY %small(<2 x s32>)
- ;
- ; CHECK-POST-LABEL: name: test_combine_trunc_build_vector
- ; CHECK-POST: %arg1:_(s64) = COPY $x0
- ; CHECK-POST-NEXT: %arg2:_(s64) = COPY $x0
- ; CHECK-POST-NEXT: %bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
- ; CHECK-POST-NEXT: %small:_(<2 x s32>) = G_TRUNC %bv(<2 x s64>)
- ; CHECK-POST-NEXT: $x0 = COPY %small(<2 x s32>)
+ ; CHECK-LABEL: name: test_combine_trunc_build_vector
+ ; CHECK: %arg1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %arg2:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %arg1(s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %arg2(s64)
+ ; CHECK-NEXT: %small:_(<2 x s32>) = G_BUILD_VECTOR [[TRUNC]](s32), [[TRUNC1]](s32)
+ ; CHECK-NEXT: $x0 = COPY %small(<2 x s32>)
%arg1:_(s64) = COPY $x0
%arg2:_(s64) = COPY $x0
%bv:_(<2 x s64>) = G_BUILD_VECTOR %arg1(s64), %arg2(s64)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-trunc.mir
index 4a38b5d..9a2b9dd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-trunc.mir
@@ -32,20 +32,12 @@ legalized: true
body: |
bb.1:
liveins: $h0
- ; CHECK-PRE-LABEL: name: test_combine_trunc_anyext_s32_s16
- ; CHECK-PRE: liveins: $h0
- ; CHECK-PRE-NEXT: {{ $}}
- ; CHECK-PRE-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
- ; CHECK-PRE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; CHECK-PRE-NEXT: $w0 = COPY [[ANYEXT]](s32)
- ;
- ; CHECK-POST-LABEL: name: test_combine_trunc_anyext_s32_s16
- ; CHECK-POST: liveins: $h0
- ; CHECK-POST-NEXT: {{ $}}
- ; CHECK-POST-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
- ; CHECK-POST-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s16)
- ; CHECK-POST-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ANYEXT]](s64)
- ; CHECK-POST-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-LABEL: name: test_combine_trunc_anyext_s32_s16
+ ; CHECK: liveins: $h0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
+ ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
%0:_(s16) = COPY $h0
%1:_(s64) = G_ANYEXT %0(s16)
%2:_(s32) = G_TRUNC %1(s64)
@@ -82,20 +74,12 @@ legalized: true
body: |
bb.1:
liveins: $h0
- ; CHECK-PRE-LABEL: name: test_combine_trunc_sext_s32_s16
- ; CHECK-PRE: liveins: $h0
- ; CHECK-PRE-NEXT: {{ $}}
- ; CHECK-PRE-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
- ; CHECK-PRE-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16)
- ; CHECK-PRE-NEXT: $w0 = COPY [[SEXT]](s32)
- ;
- ; CHECK-POST-LABEL: name: test_combine_trunc_sext_s32_s16
- ; CHECK-POST: liveins: $h0
- ; CHECK-POST-NEXT: {{ $}}
- ; CHECK-POST-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
- ; CHECK-POST-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16)
- ; CHECK-POST-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SEXT]](s64)
- ; CHECK-POST-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-LABEL: name: test_combine_trunc_sext_s32_s16
+ ; CHECK: liveins: $h0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16)
+ ; CHECK-NEXT: $w0 = COPY [[SEXT]](s32)
%0:_(s16) = COPY $h0
%1:_(s64) = G_SEXT %0(s16)
%2:_(s32) = G_TRUNC %1(s64)
@@ -107,20 +91,12 @@ legalized: true
body: |
bb.1:
liveins: $h0
- ; CHECK-PRE-LABEL: name: test_combine_trunc_zext_s32_s16
- ; CHECK-PRE: liveins: $h0
- ; CHECK-PRE-NEXT: {{ $}}
- ; CHECK-PRE-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
- ; CHECK-PRE-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16)
- ; CHECK-PRE-NEXT: $w0 = COPY [[ZEXT]](s32)
- ;
- ; CHECK-POST-LABEL: name: test_combine_trunc_zext_s32_s16
- ; CHECK-POST: liveins: $h0
- ; CHECK-POST-NEXT: {{ $}}
- ; CHECK-POST-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
- ; CHECK-POST-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s16)
- ; CHECK-POST-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s64)
- ; CHECK-POST-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-LABEL: name: test_combine_trunc_zext_s32_s16
+ ; CHECK: liveins: $h0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16)
+ ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
%0:_(s16) = COPY $h0
%1:_(s64) = G_ZEXT %0(s16)
%2:_(s32) = G_TRUNC %1(s64)
@@ -132,19 +108,11 @@ legalized: true
body: |
bb.1:
liveins: $w0
- ; CHECK-PRE-LABEL: name: test_combine_trunc_anyext_s32_s32
- ; CHECK-PRE: liveins: $w0
- ; CHECK-PRE-NEXT: {{ $}}
- ; CHECK-PRE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
- ; CHECK-PRE-NEXT: $w0 = COPY [[COPY]](s32)
- ;
- ; CHECK-POST-LABEL: name: test_combine_trunc_anyext_s32_s32
- ; CHECK-POST: liveins: $w0
- ; CHECK-POST-NEXT: {{ $}}
- ; CHECK-POST-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
- ; CHECK-POST-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32)
- ; CHECK-POST-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ANYEXT]](s64)
- ; CHECK-POST-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-LABEL: name: test_combine_trunc_anyext_s32_s32
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
%0:_(s32) = COPY $w0
%1:_(s64) = G_ANYEXT %0(s32)
%2:_(s32) = G_TRUNC %1(s64)
@@ -156,20 +124,12 @@ legalized: true
body: |
bb.1:
liveins: $x0
- ; CHECK-PRE-LABEL: name: test_combine_trunc_anyext_s32_s64
- ; CHECK-PRE: liveins: $x0
- ; CHECK-PRE-NEXT: {{ $}}
- ; CHECK-PRE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
- ; CHECK-PRE-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK-PRE-NEXT: $w0 = COPY [[TRUNC]](s32)
- ;
- ; CHECK-POST-LABEL: name: test_combine_trunc_anyext_s32_s64
- ; CHECK-POST: liveins: $x0
- ; CHECK-POST-NEXT: {{ $}}
- ; CHECK-POST-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
- ; CHECK-POST-NEXT: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[COPY]](s64)
- ; CHECK-POST-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ANYEXT]](s128)
- ; CHECK-POST-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-LABEL: name: test_combine_trunc_anyext_s32_s64
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
%0:_(s64) = COPY $x0
%1:_(s128) = G_ANYEXT %0(s64)
%2:_(s32) = G_TRUNC %1(s128)
diff --git a/llvm/test/CodeGen/AArch64/add.ll b/llvm/test/CodeGen/AArch64/add.ll
index fc1a0c7..ce7e310 100644
--- a/llvm/test/CodeGen/AArch64/add.ll
+++ b/llvm/test/CodeGen/AArch64/add.ll
@@ -171,11 +171,7 @@ define void @v4i8(ptr %p1, ptr %p2) {
; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: str w8, [x0]
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
index f005ca4..09f00b3 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
@@ -530,10 +530,10 @@ define i64 @test_2_selects(i8 zeroext %a) {
; CHECK-LABEL: test_2_selects:
; CHECK: ; %bb.0:
; CHECK-NEXT: add w9, w0, #24
-; CHECK-NEXT: mov w8, #131
+; CHECK-NEXT: mov w8, #131 ; =0x83
; CHECK-NEXT: and w9, w9, #0xff
; CHECK-NEXT: cmp w9, #81
-; CHECK-NEXT: mov w9, #57
+; CHECK-NEXT: mov w9, #57 ; =0x39
; CHECK-NEXT: csel x8, x8, xzr, lo
; CHECK-NEXT: csel x9, xzr, x9, eq
; CHECK-NEXT: add x0, x8, x9
diff --git a/llvm/test/CodeGen/AArch64/andorxor.ll b/llvm/test/CodeGen/AArch64/andorxor.ll
index 5385a917..459daec 100644
--- a/llvm/test/CodeGen/AArch64/andorxor.ll
+++ b/llvm/test/CodeGen/AArch64/andorxor.ll
@@ -463,11 +463,7 @@ define void @and_v4i8(ptr %p1, ptr %p2) {
; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: str w8, [x0]
; CHECK-GI-NEXT: ret
@@ -514,11 +510,7 @@ define void @or_v4i8(ptr %p1, ptr %p2) {
; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
; CHECK-GI-NEXT: orr v0.8b, v0.8b, v1.8b
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: str w8, [x0]
; CHECK-GI-NEXT: ret
@@ -565,11 +557,7 @@ define void @xor_v4i8(ptr %p1, ptr %p2) {
; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
; CHECK-GI-NEXT: eor v0.8b, v0.8b, v1.8b
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: str w8, [x0]
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/bitcast.ll b/llvm/test/CodeGen/AArch64/bitcast.ll
index 79cfeed..bbdf8b0 100644
--- a/llvm/test/CodeGen/AArch64/bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast.ll
@@ -60,11 +60,7 @@ define i32 @bitcast_v4i8_i32(<4 x i8> %a, <4 x i8> %b){
; CHECK-GI-LABEL: bitcast_v4i8_i32:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%c = add <4 x i8> %a, %b
@@ -116,9 +112,7 @@ define i32 @bitcast_v2i16_i32(<2 x i16> %a, <2 x i16> %b){
; CHECK-GI-LABEL: bitcast_v2i16_i32:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-GI-NEXT: mov v1.s[0], v0.s[0]
-; CHECK-GI-NEXT: mov v1.s[1], v0.s[1]
-; CHECK-GI-NEXT: xtn v0.4h, v1.4s
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%c = add <2 x i16> %a, %b
@@ -418,9 +412,7 @@ define <4 x i8> @bitcast_v2i16_v4i8(<2 x i16> %a, <2 x i16> %b){
; CHECK-GI-LABEL: bitcast_v2i16_v4i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-GI-NEXT: mov v1.s[0], v0.s[0]
-; CHECK-GI-NEXT: mov v1.s[1], v0.s[1]
-; CHECK-GI-NEXT: xtn v0.4h, v1.4s
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: mov b1, v0.b[1]
; CHECK-GI-NEXT: mov v2.b[0], v0.b[0]
; CHECK-GI-NEXT: mov b3, v0.b[2]
@@ -455,11 +447,7 @@ define <2 x i16> @bitcast_v4i8_v2i16(<4 x i8> %a, <4 x i8> %b){
; CHECK-GI-LABEL: bitcast_v4i8_v2i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: mov h1, v0.h[1]
; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
diff --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index d800b25..0033999 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -33,18 +33,8 @@ define <8 x i8> @concat2(<4 x i8> %A, <4 x i8> %B) {
;
; CHECK-GI-LABEL: concat2:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov v2.h[0], v0.h[0]
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov v3.h[0], v1.h[0]
-; CHECK-GI-NEXT: mov v2.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v3.h[1], v1.h[1]
-; CHECK-GI-NEXT: mov v2.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v3.h[2], v1.h[2]
-; CHECK-GI-NEXT: mov v2.h[3], v0.h[3]
-; CHECK-GI-NEXT: mov v3.h[3], v1.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v2.8h
-; CHECK-GI-NEXT: xtn v1.8b, v3.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: mov v0.s[0], w8
; CHECK-GI-NEXT: fmov w8, s1
@@ -74,15 +64,9 @@ define <4 x i16> @concat4(<2 x i16> %A, <2 x i16> %B) {
;
; CHECK-GI-LABEL: concat4:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov v2.s[0], v0.s[0]
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov v2.s[1], v0.s[1]
-; CHECK-GI-NEXT: mov v0.s[0], v1.s[0]
-; CHECK-GI-NEXT: xtn v2.4h, v2.4s
-; CHECK-GI-NEXT: mov v0.s[1], v1.s[1]
-; CHECK-GI-NEXT: xtn v1.4h, v0.4s
-; CHECK-GI-NEXT: fmov w8, s2
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
+; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: mov v0.s[0], w8
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: mov v0.s[1], w8
@@ -183,12 +167,11 @@ define <8 x i16> @concat_v8s16_v2s16(ptr %ptr) {
;
; CHECK-GI-LABEL: concat_v8s16_v2s16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr h0, [x0]
-; CHECK-GI-NEXT: ldr h1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
-; CHECK-GI-NEXT: xtn v0.4h, v0.4s
-; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: mov v0.s[0], w8
+; CHECK-GI-NEXT: ldrh w8, [x0]
+; CHECK-GI-NEXT: ldrh w9, [x0, #2]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: mov v1.h[1], w9
+; CHECK-GI-NEXT: mov v0.s[0], v1.s[0]
; CHECK-GI-NEXT: ret
%a = load <2 x i16>, ptr %ptr
%b = shufflevector <2 x i16> %a, <2 x i16> %a, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -238,34 +221,14 @@ define <16 x i8> @concat_v16s8_v4s8_reg(<4 x i8> %A, <4 x i8> %B, <4 x i8> %C, <
;
; CHECK-GI-LABEL: concat_v16s8_v4s8_reg:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov v4.h[0], v0.h[0]
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov v5.h[0], v1.h[0]
-; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: mov v6.h[0], v2.h[0]
-; CHECK-GI-NEXT: mov v7.h[0], v3.h[0]
-; CHECK-GI-NEXT: mov v4.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v5.h[1], v1.h[1]
-; CHECK-GI-NEXT: mov v6.h[1], v2.h[1]
-; CHECK-GI-NEXT: mov v7.h[1], v3.h[1]
-; CHECK-GI-NEXT: mov v4.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v5.h[2], v1.h[2]
-; CHECK-GI-NEXT: mov v6.h[2], v2.h[2]
-; CHECK-GI-NEXT: mov v7.h[2], v3.h[2]
-; CHECK-GI-NEXT: mov v4.h[3], v0.h[3]
-; CHECK-GI-NEXT: mov v5.h[3], v1.h[3]
-; CHECK-GI-NEXT: mov v6.h[3], v2.h[3]
-; CHECK-GI-NEXT: mov v7.h[3], v3.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v4.8h
-; CHECK-GI-NEXT: xtn v1.8b, v5.8h
-; CHECK-GI-NEXT: xtn v2.8b, v6.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: mov v0.s[0], w8
; CHECK-GI-NEXT: fmov w8, s1
-; CHECK-GI-NEXT: xtn v1.8b, v7.8h
+; CHECK-GI-NEXT: uzp1 v2.8b, v2.8b, v0.8b
; CHECK-GI-NEXT: mov v0.s[1], w8
+; CHECK-GI-NEXT: uzp1 v1.8b, v3.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s2
; CHECK-GI-NEXT: mov v0.s[2], w8
; CHECK-GI-NEXT: fmov w8, s1
@@ -291,29 +254,17 @@ define <8 x i16> @concat_v8s16_v2s16_reg(<2 x i16> %A, <2 x i16> %B, <2 x i16> %
;
; CHECK-GI-LABEL: concat_v8s16_v2s16_reg:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov v4.s[0], v0.s[0]
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov v5.s[0], v1.s[0]
-; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: mov v4.s[1], v0.s[1]
-; CHECK-GI-NEXT: mov v5.s[1], v1.s[1]
-; CHECK-GI-NEXT: mov v1.s[0], v2.s[0]
-; CHECK-GI-NEXT: xtn v0.4h, v4.4s
-; CHECK-GI-NEXT: xtn v4.4h, v5.4s
-; CHECK-GI-NEXT: mov v1.s[1], v2.s[1]
-; CHECK-GI-NEXT: mov v2.s[0], v3.s[0]
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: xtn v1.4h, v1.4s
-; CHECK-GI-NEXT: mov v2.s[1], v3.s[1]
; CHECK-GI-NEXT: mov v0.s[0], w8
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: xtn v2.4h, v2.4s
-; CHECK-GI-NEXT: mov v0.s[1], w8
; CHECK-GI-NEXT: fmov w8, s1
-; CHECK-GI-NEXT: mov v0.s[2], w8
+; CHECK-GI-NEXT: uzp1 v2.4h, v2.4h, v0.4h
+; CHECK-GI-NEXT: mov v0.s[1], w8
+; CHECK-GI-NEXT: uzp1 v1.4h, v3.4h, v0.4h
; CHECK-GI-NEXT: fmov w8, s2
+; CHECK-GI-NEXT: mov v0.s[2], w8
+; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: mov v0.s[3], w8
; CHECK-GI-NEXT: ret
%b = shufflevector <2 x i16> %A, <2 x i16> %B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index baab53d..66f26fc 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -922,26 +922,27 @@ define <3 x i32> @v3f64_i32(<3 x double> %a, <3 x double> %b, <3 x i32> %d, <3 x
; CHECK-GI-LABEL: v3f64_i32:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: mov w8, #31 // =0x1f
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: // kill: def $d4 killed $d4 def $q4
+; CHECK-GI-NEXT: mov w8, #31 // =0x1f
; CHECK-GI-NEXT: fcmp d2, d5
; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NEXT: mov v3.d[1], v4.d[0]
; CHECK-GI-NEXT: mov v1.s[0], w8
; CHECK-GI-NEXT: cset w9, mi
-; CHECK-GI-NEXT: mov v2.d[0], x9
+; CHECK-GI-NEXT: mov v2.s[0], w9
; CHECK-GI-NEXT: mov w9, #-1 // =0xffffffff
; CHECK-GI-NEXT: fcmgt v0.2d, v3.2d, v0.2d
; CHECK-GI-NEXT: mov v1.s[1], w8
; CHECK-GI-NEXT: mov v3.s[0], w9
+; CHECK-GI-NEXT: xtn v0.2s, v0.2d
; CHECK-GI-NEXT: mov v1.s[2], w8
-; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v2.4s
; CHECK-GI-NEXT: mov v3.s[1], w9
+; CHECK-GI-NEXT: mov v0.d[1], v2.d[0]
+; CHECK-GI-NEXT: mov v3.s[2], w9
; CHECK-GI-NEXT: ushl v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: neg v1.4s, v1.4s
-; CHECK-GI-NEXT: mov v3.s[2], w9
; CHECK-GI-NEXT: sshl v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: eor v1.16b, v0.16b, v3.16b
; CHECK-GI-NEXT: and v0.16b, v6.16b, v0.16b
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index c5bde81..81c1a64 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -7937,10 +7937,7 @@ define <2 x half> @stofp_v2i8_v2f16(<2 x i8> %a) {
;
; CHECK-GI-FP16-LABEL: stofp_v2i8_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-FP16-NEXT: mov v1.s[0], v0.s[0]
-; CHECK-GI-FP16-NEXT: mov v1.s[1], v0.s[1]
-; CHECK-GI-FP16-NEXT: xtn v0.4h, v1.4s
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: shl v0.4h, v0.4h, #8
; CHECK-GI-FP16-NEXT: sshr v0.4h, v0.4h, #8
; CHECK-GI-FP16-NEXT: scvtf v0.4h, v0.4h
diff --git a/llvm/test/CodeGen/AArch64/mul.ll b/llvm/test/CodeGen/AArch64/mul.ll
index 9e748c9..5e7f71c 100644
--- a/llvm/test/CodeGen/AArch64/mul.ll
+++ b/llvm/test/CodeGen/AArch64/mul.ll
@@ -183,11 +183,7 @@ define void @v4i8(ptr %p1, ptr %p2) {
; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: str w8, [x0]
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/AArch64/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..fb55986
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple aarch64 | FileCheck %s -check-prefixes=CHECK-LE
+; RUN: llc < %s -mtriple aarch64_be | FileCheck %s -check-prefixes=CHECK-BE
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LE-LABEL: naked:
+; CHECK-LE: // %bb.0:
+; CHECK-LE-NEXT: bl main
+;
+; CHECK-BE-LABEL: naked:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: bl main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LE-LABEL: normal:
+; CHECK-LE: // %bb.0:
+; CHECK-LE-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-LE-NEXT: mov x29, sp
+; CHECK-LE-NEXT: .cfi_def_cfa w29, 16
+; CHECK-LE-NEXT: .cfi_offset w30, -8
+; CHECK-LE-NEXT: .cfi_offset w29, -16
+; CHECK-LE-NEXT: bl main
+;
+; CHECK-BE-LABEL: normal:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-BE-NEXT: mov x29, sp
+; CHECK-BE-NEXT: .cfi_def_cfa w29, 16
+; CHECK-BE-NEXT: .cfi_offset w30, -8
+; CHECK-BE-NEXT: .cfi_offset w29, -16
+; CHECK-BE-NEXT: bl main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll b/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll
new file mode 100644
index 0000000..fbd7779
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple aarch64-linux-gnu -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=ELF %s
+; RUN: llc -mtriple aarch64-apple-darwin -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=MACHO %s
+
+; ELF-LABEL: _ZTI10Disc:
+; ELF-NEXT: .xword (_ZTVN10__cxxabiv117__class_type_infoE+16)@AUTH(da,45546,addr)
+; ELF-LABEL: _ZTI10NoDisc:
+; ELF-NEXT: .xword (_ZTVN10__cxxabiv117__class_type_infoE+16)@AUTH(da,45546)
+
+; MACHO-LABEL: __ZTI10Disc:
+; MACHO-NEXT: .quad (__ZTVN10__cxxabiv117__class_type_infoE+16)@AUTH(da,45546,addr)
+; MACHO-LABEL: __ZTI10NoDisc:
+; MACHO-NEXT: .quad (__ZTVN10__cxxabiv117__class_type_infoE+16)@AUTH(da,45546)
+
+
+@_ZTI10Disc = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2, i64 45546, ptr @_ZTI10Disc), ptr @_ZTS10Disc }, align 8
+@_ZTS10Disc = constant [4 x i8] c"Disc", align 1
+
+@_ZTI10NoDisc = constant { ptr, ptr } { ptr ptrauth (ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), i32 2, i64 45546), ptr @_ZTS10NoDisc }, align 8
+@_ZTS10NoDisc = constant [6 x i8] c"NoDisc", align 1
+
+@_ZTVN10__cxxabiv117__class_type_infoE = external global [0 x ptr]
diff --git a/llvm/test/CodeGen/AArch64/qshrn.ll b/llvm/test/CodeGen/AArch64/qshrn.ll
new file mode 100644
index 0000000..eaba88d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/qshrn.ll
@@ -0,0 +1,383 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc %s -mtriple=aarch64 -o - | FileCheck %s
+
+define <4 x i16> @NarrowAShrI32By5(<4 x i32> %x) {
+; CHECK-LABEL: NarrowAShrI32By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.4s, v0.4s, #5
+; CHECK-NEXT: sqxtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowAShrU32By5(<4 x i32> %x) {
+; CHECK-LABEL: NarrowAShrU32By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.4s, v0.4s, #5
+; CHECK-NEXT: uqxtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowAShrI32By5ToU16(<4 x i32> %x) {
+; CHECK-LABEL: NarrowAShrI32By5ToU16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.4s, v0.4s, #5
+; CHECK-NEXT: sqxtun v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowLShrI32By5(<4 x i32> %x) {
+; CHECK-LABEL: NarrowLShrI32By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.4s, v0.4s, #5
+; CHECK-NEXT: sqxtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowLShrU32By5(<4 x i32> %x) {
+; CHECK-LABEL: NarrowLShrU32By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.4s, v0.4s, #5
+; CHECK-NEXT: uqxtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowLShrI32By5ToU16(<4 x i32> %x) {
+; CHECK-LABEL: NarrowLShrI32By5ToU16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.4s, v0.4s, #5
+; CHECK-NEXT: sqxtun v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+
+define <2 x i32> @NarrowAShri64By5(<2 x i64> %x) {
+; CHECK-LABEL: NarrowAShri64By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.2d, v0.2d, #5
+; CHECK-NEXT: sqxtn v0.2s, v0.2d
+; CHECK-NEXT: ret
+ %s = ashr <2 x i64> %x, <i64 5, i64 5>
+ %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %s)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @NarrowAShrU64By5(<2 x i64> %x) {
+; CHECK-LABEL: NarrowAShrU64By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.2d, v0.2d, #5
+; CHECK-NEXT: uqxtn v0.2s, v0.2d
+; CHECK-NEXT: ret
+ %s = ashr <2 x i64> %x, <i64 5, i64 5>
+ %r = tail call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %s)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @NarrowAShri64By5ToU32(<2 x i64> %x) {
+; CHECK-LABEL: NarrowAShri64By5ToU32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.2d, v0.2d, #5
+; CHECK-NEXT: sqxtun v0.2s, v0.2d
+; CHECK-NEXT: ret
+ %s = ashr <2 x i64> %x, <i64 5, i64 5>
+ %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %s)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @NarrowLShri64By5(<2 x i64> %x) {
+; CHECK-LABEL: NarrowLShri64By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.2d, v0.2d, #5
+; CHECK-NEXT: sqxtn v0.2s, v0.2d
+; CHECK-NEXT: ret
+ %s = lshr <2 x i64> %x, <i64 5, i64 5>
+ %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %s)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @NarrowLShrU64By5(<2 x i64> %x) {
+; CHECK-LABEL: NarrowLShrU64By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.2d, v0.2d, #5
+; CHECK-NEXT: uqxtn v0.2s, v0.2d
+; CHECK-NEXT: ret
+ %s = lshr <2 x i64> %x, <i64 5, i64 5>
+ %r = tail call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %s)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @NarrowLShri64By5ToU32(<2 x i64> %x) {
+; CHECK-LABEL: NarrowLShri64By5ToU32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.2d, v0.2d, #5
+; CHECK-NEXT: sqxtun v0.2s, v0.2d
+; CHECK-NEXT: ret
+ %s = lshr <2 x i64> %x, <i64 5, i64 5>
+ %r = tail call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %s)
+ ret <2 x i32> %r
+}
+
+
+define <8 x i8> @NarrowAShri16By5(<8 x i16> %x) {
+; CHECK-LABEL: NarrowAShri16By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.8h, v0.8h, #5
+; CHECK-NEXT: sqxtn v0.8b, v0.8h
+; CHECK-NEXT: ret
+ %s = ashr <8 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %s)
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @NarrowAShrU16By5(<8 x i16> %x) {
+; CHECK-LABEL: NarrowAShrU16By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.8h, v0.8h, #5
+; CHECK-NEXT: uqxtn v0.8b, v0.8h
+; CHECK-NEXT: ret
+ %s = ashr <8 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %r = tail call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %s)
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @NarrowAShri16By5ToU8(<8 x i16> %x) {
+; CHECK-LABEL: NarrowAShri16By5ToU8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.8h, v0.8h, #5
+; CHECK-NEXT: sqxtun v0.8b, v0.8h
+; CHECK-NEXT: ret
+ %s = ashr <8 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %s)
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @NarrowLShri16By5(<8 x i16> %x) {
+; CHECK-LABEL: NarrowLShri16By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.8h, v0.8h, #5
+; CHECK-NEXT: sqxtn v0.8b, v0.8h
+; CHECK-NEXT: ret
+ %s = lshr <8 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %s)
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @NarrowLShrU16By5(<8 x i16> %x) {
+; CHECK-LABEL: NarrowLShrU16By5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.8h, v0.8h, #5
+; CHECK-NEXT: uqxtn v0.8b, v0.8h
+; CHECK-NEXT: ret
+ %s = lshr <8 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %r = tail call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %s)
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @NarrowLShri16By5ToU8(<8 x i16> %x) {
+; CHECK-LABEL: NarrowLShri16By5ToU8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.8h, v0.8h, #5
+; CHECK-NEXT: sqxtun v0.8b, v0.8h
+; CHECK-NEXT: ret
+ %s = lshr <8 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %r = tail call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %s)
+ ret <8 x i8> %r
+}
+
+
+
+
+
+define <4 x i16> @NarrowAShrI32By31(<4 x i32> %x) {
+; CHECK-LABEL: NarrowAShrI32By31:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.4s, v0.4s, #16
+; CHECK-NEXT: sqxtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = ashr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowAShrI32By31ToU16(<4 x i32> %x) {
+; CHECK-LABEL: NarrowAShrI32By31ToU16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.4s, v0.4s, #16
+; CHECK-NEXT: sqxtun v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = ashr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @NarrowLShrU32By31(<4 x i32> %x) {
+; CHECK-LABEL: NarrowLShrU32By31:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushr v0.4s, v0.4s, #16
+; CHECK-NEXT: uqxtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %s = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
+ %r = tail call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %s)
+ ret <4 x i16> %r
+}
+
+
+define <16 x i8> @signed_minmax_v8i16_to_v16i8(<16 x i16> %x) {
+; CHECK-LABEL: signed_minmax_v8i16_to_v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshr v0.8h, v0.8h, #5
+; CHECK-NEXT: sshr v1.8h, v1.8h, #5
+; CHECK-NEXT: sqxtn v0.8b, v0.8h
+; CHECK-NEXT: sqxtn2 v0.16b, v1.8h
+; CHECK-NEXT: ret
+entry:
+ %s = ashr <16 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %min = call <16 x i16> @llvm.smin.v8i16(<16 x i16> %s, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>)
+ %max = call <16 x i16> @llvm.smax.v8i16(<16 x i16> %min, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>)
+ %trunc = trunc <16 x i16> %max to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <16 x i8> @unsigned_minmax_v8i16_to_v16i8(<16 x i16> %x) {
+; CHECK-LABEL: unsigned_minmax_v8i16_to_v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushr v0.8h, v0.8h, #5
+; CHECK-NEXT: ushr v1.8h, v1.8h, #5
+; CHECK-NEXT: uqxtn v0.8b, v0.8h
+; CHECK-NEXT: uqxtn2 v0.16b, v1.8h
+; CHECK-NEXT: ret
+entry:
+ %s = lshr <16 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %min = call <16 x i16> @llvm.umin.v8i16(<16 x i16> %s, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>)
+ %trunc = trunc <16 x i16> %min to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <16 x i8> @unsigned_signed_minmax_v8i16_to_v16i8(<16 x i16> %x) {
+; CHECK-LABEL: unsigned_signed_minmax_v8i16_to_v16i8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshr v0.8h, v0.8h, #5
+; CHECK-NEXT: sshr v1.8h, v1.8h, #5
+; CHECK-NEXT: sqxtun v0.8b, v0.8h
+; CHECK-NEXT: sqxtun2 v0.16b, v1.8h
+; CHECK-NEXT: ret
+entry:
+ %s = ashr <16 x i16> %x, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %max = call <16 x i16> @llvm.smax.v8i16(<16 x i16> %s, <16 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
+ %min = call <16 x i16> @llvm.umin.v8i16(<16 x i16> %max, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>)
+ %trunc = trunc <16 x i16> %min to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+
+define <8 x i16> @signed_minmax_v4i32_to_v8i16(<8 x i32> %x) {
+; CHECK-LABEL: signed_minmax_v4i32_to_v8i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshr v0.4s, v0.4s, #5
+; CHECK-NEXT: sshr v1.4s, v1.4s, #5
+; CHECK-NEXT: sqxtn v0.4h, v0.4s
+; CHECK-NEXT: sqxtn2 v0.8h, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %s = ashr <8 x i32> %x, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ %min = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %s, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+ %max = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %min, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>)
+ %trunc = trunc <8 x i32> %max to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @unsigned_minmax_v4i32_to_v8i16(<8 x i32> %x) {
+; CHECK-LABEL: unsigned_minmax_v4i32_to_v8i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushr v0.4s, v0.4s, #5
+; CHECK-NEXT: ushr v1.4s, v1.4s, #5
+; CHECK-NEXT: uqxtn v0.4h, v0.4s
+; CHECK-NEXT: uqxtn2 v0.8h, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %s = lshr <8 x i32> %x, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ %min = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %s, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
+ %trunc = trunc <8 x i32> %min to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @unsigned_signed_minmax_v4i32_to_v8i16(<8 x i32> %x) {
+; CHECK-LABEL: unsigned_signed_minmax_v4i32_to_v8i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshr v0.4s, v0.4s, #5
+; CHECK-NEXT: sshr v1.4s, v1.4s, #5
+; CHECK-NEXT: sqxtun v0.4h, v0.4s
+; CHECK-NEXT: sqxtun2 v0.8h, v1.4s
+; CHECK-NEXT: ret
+entry:
+ %s = ashr <8 x i32> %x, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ %max = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %s, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>)
+ %min = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %max, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
+ %trunc = trunc <8 x i32> %min to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+
+define <4 x i32> @signed_minmax_v4i64_to_v8i32(<4 x i64> %x) {
+; CHECK-LABEL: signed_minmax_v4i64_to_v8i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshr v0.2d, v0.2d, #5
+; CHECK-NEXT: sshr v1.2d, v1.2d, #5
+; CHECK-NEXT: sqxtn v0.2s, v0.2d
+; CHECK-NEXT: sqxtn2 v0.4s, v1.2d
+; CHECK-NEXT: ret
+entry:
+ %s = ashr <4 x i64> %x, <i64 5, i64 5, i64 5, i64 5>
+ %min = call <4 x i64> @llvm.smin.v8i64(<4 x i64> %s, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+ %max = call <4 x i64> @llvm.smax.v8i64(<4 x i64> %min, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>)
+ %trunc = trunc <4 x i64> %max to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @unsigned_minmax_v4i64_to_v8i32(<4 x i64> %x) {
+; CHECK-LABEL: unsigned_minmax_v4i64_to_v8i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ushr v0.2d, v0.2d, #5
+; CHECK-NEXT: ushr v1.2d, v1.2d, #5
+; CHECK-NEXT: uqxtn v0.2s, v0.2d
+; CHECK-NEXT: uqxtn2 v0.4s, v1.2d
+; CHECK-NEXT: ret
+entry:
+ %s = lshr <4 x i64> %x, <i64 5, i64 5, i64 5, i64 5>
+ %min = call <4 x i64> @llvm.umin.v8i64(<4 x i64> %s, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
+ %trunc = trunc <4 x i64> %min to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @unsigned_signed_minmax_v4i64_to_v8i32(<4 x i64> %x) {
+; CHECK-LABEL: unsigned_signed_minmax_v4i64_to_v8i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sshr v0.2d, v0.2d, #5
+; CHECK-NEXT: sshr v1.2d, v1.2d, #5
+; CHECK-NEXT: sqxtun v0.2s, v0.2d
+; CHECK-NEXT: sqxtun2 v0.4s, v1.2d
+; CHECK-NEXT: ret
+entry:
+ %s = ashr <4 x i64> %x, <i64 5, i64 5, i64 5, i64 5>
+ %max = call <4 x i64> @llvm.smax.v8i64(<4 x i64> %s, <4 x i64> <i64 0, i64 0, i64 0, i64 0>)
+ %min = call <4 x i64> @llvm.umin.v8i64(<4 x i64> %max, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
+ %trunc = trunc <4 x i64> %min to <4 x i32>
+ ret <4 x i32> %trunc
+}
diff --git a/llvm/test/CodeGen/AArch64/sub.ll b/llvm/test/CodeGen/AArch64/sub.ll
index 8e7586b..c298e6d 100644
--- a/llvm/test/CodeGen/AArch64/sub.ll
+++ b/llvm/test/CodeGen/AArch64/sub.ll
@@ -171,11 +171,7 @@ define void @v4i8(ptr %p1, ptr %p2) {
; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
; CHECK-GI-NEXT: sub v0.4h, v0.4h, v1.4h
-; CHECK-GI-NEXT: mov v1.h[0], v0.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v0.h[1]
-; CHECK-GI-NEXT: mov v1.h[2], v0.h[2]
-; CHECK-GI-NEXT: mov v1.h[3], v0.h[3]
-; CHECK-GI-NEXT: xtn v0.8b, v1.8h
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: str w8, [x0]
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
index 276f237..20659cd 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
@@ -140,98 +140,65 @@ define <8 x i8> @shuffle_index_indices_from_both_ops(ptr %a, ptr %b) {
;
; SVE2_128_NOMAX-LABEL: shuffle_index_indices_from_both_ops:
; SVE2_128_NOMAX: // %bb.0:
-; SVE2_128_NOMAX-NEXT: sub sp, sp, #16
-; SVE2_128_NOMAX-NEXT: .cfi_def_cfa_offset 16
; SVE2_128_NOMAX-NEXT: ldr d0, [x1]
-; SVE2_128_NOMAX-NEXT: mov z1.b, z0.b[7]
-; SVE2_128_NOMAX-NEXT: mov z2.b, z0.b[6]
-; SVE2_128_NOMAX-NEXT: mov z3.b, z0.b[4]
-; SVE2_128_NOMAX-NEXT: fmov w8, s1
; SVE2_128_NOMAX-NEXT: ldr d1, [x0]
-; SVE2_128_NOMAX-NEXT: fmov w9, s2
; SVE2_128_NOMAX-NEXT: mov z2.b, z0.b[3]
-; SVE2_128_NOMAX-NEXT: mov z1.b, z1.b[1]
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #15]
-; SVE2_128_NOMAX-NEXT: fmov w8, s3
; SVE2_128_NOMAX-NEXT: mov z3.b, z0.b[2]
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #14]
-; SVE2_128_NOMAX-NEXT: mov z0.b, z0.b[1]
-; SVE2_128_NOMAX-NEXT: fmov w9, s2
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #13]
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #12]
-; SVE2_128_NOMAX-NEXT: fmov w8, s3
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #11]
-; SVE2_128_NOMAX-NEXT: fmov w9, s0
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #10]
-; SVE2_128_NOMAX-NEXT: fmov w8, s1
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #9]
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #8]
-; SVE2_128_NOMAX-NEXT: ldr d0, [sp, #8]
-; SVE2_128_NOMAX-NEXT: add sp, sp, #16
+; SVE2_128_NOMAX-NEXT: mov z4.b, z0.b[1]
+; SVE2_128_NOMAX-NEXT: mov z1.b, z1.b[1]
+; SVE2_128_NOMAX-NEXT: mov z5.b, z0.b[7]
+; SVE2_128_NOMAX-NEXT: mov z6.b, z0.b[6]
+; SVE2_128_NOMAX-NEXT: mov z0.b, z0.b[4]
+; SVE2_128_NOMAX-NEXT: zip1 z2.b, z3.b, z2.b
+; SVE2_128_NOMAX-NEXT: zip1 z1.b, z1.b, z4.b
+; SVE2_128_NOMAX-NEXT: zip1 z3.b, z6.b, z5.b
+; SVE2_128_NOMAX-NEXT: zip1 z0.b, z0.b, z0.b
+; SVE2_128_NOMAX-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2_128_NOMAX-NEXT: zip1 z0.h, z0.h, z3.h
+; SVE2_128_NOMAX-NEXT: zip1 z0.s, z1.s, z0.s
+; SVE2_128_NOMAX-NEXT: // kill: def $d0 killed $d0 killed $z0
; SVE2_128_NOMAX-NEXT: ret
;
; SVE2_NOMIN_NOMAX-LABEL: shuffle_index_indices_from_both_ops:
; SVE2_NOMIN_NOMAX: // %bb.0:
-; SVE2_NOMIN_NOMAX-NEXT: sub sp, sp, #16
-; SVE2_NOMIN_NOMAX-NEXT: .cfi_def_cfa_offset 16
; SVE2_NOMIN_NOMAX-NEXT: ldr d0, [x1]
-; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z0.b[7]
-; SVE2_NOMIN_NOMAX-NEXT: mov z2.b, z0.b[6]
-; SVE2_NOMIN_NOMAX-NEXT: mov z3.b, z0.b[4]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s1
; SVE2_NOMIN_NOMAX-NEXT: ldr d1, [x0]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w9, s2
; SVE2_NOMIN_NOMAX-NEXT: mov z2.b, z0.b[3]
-; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z1.b[1]
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #15]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s3
; SVE2_NOMIN_NOMAX-NEXT: mov z3.b, z0.b[2]
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #14]
-; SVE2_NOMIN_NOMAX-NEXT: mov z0.b, z0.b[1]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w9, s2
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #13]
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #12]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s3
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #11]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w9, s0
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #10]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s1
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #9]
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #8]
-; SVE2_NOMIN_NOMAX-NEXT: ldr d0, [sp, #8]
-; SVE2_NOMIN_NOMAX-NEXT: add sp, sp, #16
+; SVE2_NOMIN_NOMAX-NEXT: mov z4.b, z0.b[1]
+; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z1.b[1]
+; SVE2_NOMIN_NOMAX-NEXT: mov z5.b, z0.b[7]
+; SVE2_NOMIN_NOMAX-NEXT: mov z6.b, z0.b[6]
+; SVE2_NOMIN_NOMAX-NEXT: mov z0.b, z0.b[4]
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z2.b, z3.b, z2.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z1.b, z1.b, z4.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z3.b, z6.b, z5.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z0.b, z0.b, z0.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z0.h, z0.h, z3.h
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z0.s, z1.s, z0.s
+; SVE2_NOMIN_NOMAX-NEXT: // kill: def $d0 killed $d0 killed $z0
; SVE2_NOMIN_NOMAX-NEXT: ret
;
; SVE2_MIN_256_NOMAX-LABEL: shuffle_index_indices_from_both_ops:
; SVE2_MIN_256_NOMAX: // %bb.0:
-; SVE2_MIN_256_NOMAX-NEXT: sub sp, sp, #16
-; SVE2_MIN_256_NOMAX-NEXT: .cfi_def_cfa_offset 16
; SVE2_MIN_256_NOMAX-NEXT: ldr d0, [x1]
-; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z0.b[7]
-; SVE2_MIN_256_NOMAX-NEXT: mov z2.b, z0.b[6]
-; SVE2_MIN_256_NOMAX-NEXT: mov z3.b, z0.b[4]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s1
; SVE2_MIN_256_NOMAX-NEXT: ldr d1, [x0]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w9, s2
; SVE2_MIN_256_NOMAX-NEXT: mov z2.b, z0.b[3]
-; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z1.b[1]
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #15]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s3
; SVE2_MIN_256_NOMAX-NEXT: mov z3.b, z0.b[2]
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #14]
-; SVE2_MIN_256_NOMAX-NEXT: mov z0.b, z0.b[1]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w9, s2
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #13]
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #12]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s3
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #11]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w9, s0
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #10]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s1
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #9]
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #8]
-; SVE2_MIN_256_NOMAX-NEXT: ldr d0, [sp, #8]
-; SVE2_MIN_256_NOMAX-NEXT: add sp, sp, #16
+; SVE2_MIN_256_NOMAX-NEXT: mov z4.b, z0.b[1]
+; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z1.b[1]
+; SVE2_MIN_256_NOMAX-NEXT: mov z5.b, z0.b[7]
+; SVE2_MIN_256_NOMAX-NEXT: mov z6.b, z0.b[6]
+; SVE2_MIN_256_NOMAX-NEXT: mov z0.b, z0.b[4]
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z2.b, z3.b, z2.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z1.b, z1.b, z4.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z3.b, z6.b, z5.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z0.b, z0.b, z0.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z0.h, z0.h, z3.h
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z0.s, z1.s, z0.s
+; SVE2_MIN_256_NOMAX-NEXT: // kill: def $d0 killed $d0 killed $z0
; SVE2_MIN_256_NOMAX-NEXT: ret
%op1 = load <8 x i8>, ptr %a
%op2 = load <8 x i8>, ptr %b
@@ -263,89 +230,59 @@ define <8 x i8> @shuffle_index_poison_value(ptr %a, ptr %b) {
;
; SVE2_128_NOMAX-LABEL: shuffle_index_poison_value:
; SVE2_128_NOMAX: // %bb.0:
-; SVE2_128_NOMAX-NEXT: sub sp, sp, #16
-; SVE2_128_NOMAX-NEXT: .cfi_def_cfa_offset 16
; SVE2_128_NOMAX-NEXT: ldr d0, [x1]
-; SVE2_128_NOMAX-NEXT: ldr d3, [x0]
-; SVE2_128_NOMAX-NEXT: mov z1.b, z0.b[6]
-; SVE2_128_NOMAX-NEXT: mov z2.b, z0.b[4]
-; SVE2_128_NOMAX-NEXT: fmov w8, s1
-; SVE2_128_NOMAX-NEXT: mov z1.b, z0.b[3]
-; SVE2_128_NOMAX-NEXT: fmov w9, s2
-; SVE2_128_NOMAX-NEXT: mov z2.b, z0.b[2]
-; SVE2_128_NOMAX-NEXT: mov z0.b, z0.b[1]
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #14]
-; SVE2_128_NOMAX-NEXT: fmov w8, s1
-; SVE2_128_NOMAX-NEXT: mov z1.b, z3.b[1]
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #13]
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #12]
-; SVE2_128_NOMAX-NEXT: fmov w9, s2
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #11]
-; SVE2_128_NOMAX-NEXT: fmov w8, s0
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #10]
-; SVE2_128_NOMAX-NEXT: fmov w9, s1
-; SVE2_128_NOMAX-NEXT: strb w8, [sp, #9]
-; SVE2_128_NOMAX-NEXT: strb w9, [sp, #8]
-; SVE2_128_NOMAX-NEXT: ldr d0, [sp, #8]
-; SVE2_128_NOMAX-NEXT: add sp, sp, #16
+; SVE2_128_NOMAX-NEXT: ldr d1, [x0]
+; SVE2_128_NOMAX-NEXT: mov z2.b, z0.b[3]
+; SVE2_128_NOMAX-NEXT: mov z3.b, z0.b[2]
+; SVE2_128_NOMAX-NEXT: mov z4.b, z0.b[1]
+; SVE2_128_NOMAX-NEXT: mov z1.b, z1.b[1]
+; SVE2_128_NOMAX-NEXT: mov z5.b, z0.b[4]
+; SVE2_128_NOMAX-NEXT: mov z0.b, z0.b[6]
+; SVE2_128_NOMAX-NEXT: zip1 z2.b, z3.b, z2.b
+; SVE2_128_NOMAX-NEXT: zip1 z1.b, z1.b, z4.b
+; SVE2_128_NOMAX-NEXT: zip1 z3.b, z5.b, z5.b
+; SVE2_128_NOMAX-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2_128_NOMAX-NEXT: zip1 z0.h, z3.h, z0.h
+; SVE2_128_NOMAX-NEXT: zip1 z0.s, z1.s, z0.s
+; SVE2_128_NOMAX-NEXT: // kill: def $d0 killed $d0 killed $z0
; SVE2_128_NOMAX-NEXT: ret
;
; SVE2_NOMIN_NOMAX-LABEL: shuffle_index_poison_value:
; SVE2_NOMIN_NOMAX: // %bb.0:
-; SVE2_NOMIN_NOMAX-NEXT: sub sp, sp, #16
-; SVE2_NOMIN_NOMAX-NEXT: .cfi_def_cfa_offset 16
; SVE2_NOMIN_NOMAX-NEXT: ldr d0, [x1]
-; SVE2_NOMIN_NOMAX-NEXT: ldr d3, [x0]
-; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z0.b[6]
-; SVE2_NOMIN_NOMAX-NEXT: mov z2.b, z0.b[4]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s1
-; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z0.b[3]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w9, s2
-; SVE2_NOMIN_NOMAX-NEXT: mov z2.b, z0.b[2]
-; SVE2_NOMIN_NOMAX-NEXT: mov z0.b, z0.b[1]
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #14]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s1
-; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z3.b[1]
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #13]
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #12]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w9, s2
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #11]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w8, s0
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #10]
-; SVE2_NOMIN_NOMAX-NEXT: fmov w9, s1
-; SVE2_NOMIN_NOMAX-NEXT: strb w8, [sp, #9]
-; SVE2_NOMIN_NOMAX-NEXT: strb w9, [sp, #8]
-; SVE2_NOMIN_NOMAX-NEXT: ldr d0, [sp, #8]
-; SVE2_NOMIN_NOMAX-NEXT: add sp, sp, #16
+; SVE2_NOMIN_NOMAX-NEXT: ldr d1, [x0]
+; SVE2_NOMIN_NOMAX-NEXT: mov z2.b, z0.b[3]
+; SVE2_NOMIN_NOMAX-NEXT: mov z3.b, z0.b[2]
+; SVE2_NOMIN_NOMAX-NEXT: mov z4.b, z0.b[1]
+; SVE2_NOMIN_NOMAX-NEXT: mov z1.b, z1.b[1]
+; SVE2_NOMIN_NOMAX-NEXT: mov z5.b, z0.b[4]
+; SVE2_NOMIN_NOMAX-NEXT: mov z0.b, z0.b[6]
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z2.b, z3.b, z2.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z1.b, z1.b, z4.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z3.b, z5.b, z5.b
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z0.h, z3.h, z0.h
+; SVE2_NOMIN_NOMAX-NEXT: zip1 z0.s, z1.s, z0.s
+; SVE2_NOMIN_NOMAX-NEXT: // kill: def $d0 killed $d0 killed $z0
; SVE2_NOMIN_NOMAX-NEXT: ret
;
; SVE2_MIN_256_NOMAX-LABEL: shuffle_index_poison_value:
; SVE2_MIN_256_NOMAX: // %bb.0:
-; SVE2_MIN_256_NOMAX-NEXT: sub sp, sp, #16
-; SVE2_MIN_256_NOMAX-NEXT: .cfi_def_cfa_offset 16
; SVE2_MIN_256_NOMAX-NEXT: ldr d0, [x1]
-; SVE2_MIN_256_NOMAX-NEXT: ldr d3, [x0]
-; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z0.b[6]
-; SVE2_MIN_256_NOMAX-NEXT: mov z2.b, z0.b[4]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s1
-; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z0.b[3]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w9, s2
-; SVE2_MIN_256_NOMAX-NEXT: mov z2.b, z0.b[2]
-; SVE2_MIN_256_NOMAX-NEXT: mov z0.b, z0.b[1]
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #14]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s1
-; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z3.b[1]
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #13]
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #12]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w9, s2
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #11]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w8, s0
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #10]
-; SVE2_MIN_256_NOMAX-NEXT: fmov w9, s1
-; SVE2_MIN_256_NOMAX-NEXT: strb w8, [sp, #9]
-; SVE2_MIN_256_NOMAX-NEXT: strb w9, [sp, #8]
-; SVE2_MIN_256_NOMAX-NEXT: ldr d0, [sp, #8]
-; SVE2_MIN_256_NOMAX-NEXT: add sp, sp, #16
+; SVE2_MIN_256_NOMAX-NEXT: ldr d1, [x0]
+; SVE2_MIN_256_NOMAX-NEXT: mov z2.b, z0.b[3]
+; SVE2_MIN_256_NOMAX-NEXT: mov z3.b, z0.b[2]
+; SVE2_MIN_256_NOMAX-NEXT: mov z4.b, z0.b[1]
+; SVE2_MIN_256_NOMAX-NEXT: mov z1.b, z1.b[1]
+; SVE2_MIN_256_NOMAX-NEXT: mov z5.b, z0.b[4]
+; SVE2_MIN_256_NOMAX-NEXT: mov z0.b, z0.b[6]
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z2.b, z3.b, z2.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z1.b, z1.b, z4.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z3.b, z5.b, z5.b
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z0.h, z3.h, z0.h
+; SVE2_MIN_256_NOMAX-NEXT: zip1 z0.s, z1.s, z0.s
+; SVE2_MIN_256_NOMAX-NEXT: // kill: def $d0 killed $d0 killed $z0
; SVE2_MIN_256_NOMAX-NEXT: ret
%op1 = load <8 x i8>, ptr %a
%op2 = load <8 x i8>, ptr %b
@@ -401,34 +338,23 @@ define <8 x i8> @shuffle_op1_poison(ptr %a, ptr %b) {
define <8 x i8> @negative_test_shuffle_index_size_op_both_maxhw(ptr %a, ptr %b) "target-features"="+sve2" vscale_range(16,16) {
; CHECK-LABEL: negative_test_shuffle_index_size_op_both_maxhw:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr d0, [x1]
-; CHECK-NEXT: mov z1.b, z0.b[7]
-; CHECK-NEXT: mov z2.b, z0.b[6]
-; CHECK-NEXT: mov z3.b, z0.b[4]
-; CHECK-NEXT: fmov w8, s1
; CHECK-NEXT: ldr d1, [x0]
-; CHECK-NEXT: fmov w9, s2
; CHECK-NEXT: mov z2.b, z0.b[3]
-; CHECK-NEXT: mov z1.b, z1.b[1]
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: fmov w8, s3
; CHECK-NEXT: mov z3.b, z0.b[2]
-; CHECK-NEXT: strb w9, [sp, #14]
-; CHECK-NEXT: mov z0.b, z0.b[1]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: strb w8, [sp, #13]
-; CHECK-NEXT: strb w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strb w9, [sp, #11]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strb w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strb w9, [sp, #9]
-; CHECK-NEXT: strb w8, [sp, #8]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: mov z4.b, z0.b[1]
+; CHECK-NEXT: mov z1.b, z1.b[1]
+; CHECK-NEXT: mov z5.b, z0.b[7]
+; CHECK-NEXT: mov z6.b, z0.b[6]
+; CHECK-NEXT: mov z0.b, z0.b[4]
+; CHECK-NEXT: zip1 z2.b, z3.b, z2.b
+; CHECK-NEXT: zip1 z1.b, z1.b, z4.b
+; CHECK-NEXT: zip1 z3.b, z6.b, z5.b
+; CHECK-NEXT: zip1 z0.b, z0.b, z0.b
+; CHECK-NEXT: zip1 z1.h, z1.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z1.s, z0.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%op1 = load <8 x i8>, ptr %a
%op2 = load <8 x i8>, ptr %b
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
index 617b560..478072d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
@@ -184,13 +184,11 @@ define <32 x i8> @vls_sve_and_32xi8(<32 x i8> %ap) nounwind {
define <2 x i16> @vls_sve_and_2xi16(<2 x i16> %b) nounwind {
; CHECK-LABEL: vls_sve_and_2xi16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: fmov s1, wzr
; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: stp wzr, w8, [sp, #8]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: zip1 z0.s, z1.s, z0.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: vls_sve_and_2xi16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
index b9264ad..6644be1 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
@@ -91,19 +91,12 @@ define void @bitcast_v32i8(ptr %a, ptr %b) {
define void @bitcast_v2i16(ptr %a, ptr %b) {
; CHECK-LABEL: bitcast_v2i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: mov z1.s, z0.s[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
+; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: str w8, [x1]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: bitcast_v2i16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
index b8a2e0e..9729a1d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
@@ -222,3 +222,255 @@ define void @build_vector_no_stride_v4f64(ptr %a) {
store <4 x double> <double 0.0, double 4.0, double 1.0, double 8.0>, ptr %a, align 8
ret void
}
+
+define void @build_vector_non_const_v4i1(i1 %a, i1 %b, i1 %c, i1 %d, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, w1, lsl #1
+; CHECK-NEXT: orr w8, w8, w2, lsl #2
+; CHECK-NEXT: orr w8, w8, w3, lsl #3
+; CHECK-NEXT: strb w8, [x4]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v4i1:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: orr w8, w0, w1, lsl #1
+; NONEON-NOSVE-NEXT: orr w8, w8, w2, lsl #2
+; NONEON-NOSVE-NEXT: orr w8, w8, w3, lsl #3
+; NONEON-NOSVE-NEXT: strb w8, [x4]
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <4 x i1> undef, i1 %a, i64 0
+ %2 = insertelement <4 x i1> %1, i1 %b, i64 1
+ %3 = insertelement <4 x i1> %2, i1 %c, i64 2
+ %4 = insertelement <4 x i1> %3, i1 %d, i64 3
+ store <4 x i1> %4, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v2f64(double %a, double %b, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v2f64:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: stp d0, d1, [sp, #-16]!
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
+; NONEON-NOSVE-NEXT: ldr q0, [sp]
+; NONEON-NOSVE-NEXT: str q0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #16
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <2 x double> undef, double %a, i64 0
+ %2 = insertelement <2 x double> %1, double %b, i64 1
+ store <2 x double> %2, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v2f32(float %a, float %b, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT: // kill: def $s1 killed $s1 def $z1
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v2f32:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: sub sp, sp, #16
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
+; NONEON-NOSVE-NEXT: stp s0, s1, [sp, #8]
+; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
+; NONEON-NOSVE-NEXT: str d0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #16
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <2 x float> undef, float %a, i64 0
+ %2 = insertelement <2 x float> %1, float %b, i64 1
+ store <2 x float> %2, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v4f32(float %a, float %b, float %c, float %d, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $s2 killed $s2 def $z2
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT: // kill: def $s3 killed $s3 def $z3
+; CHECK-NEXT: // kill: def $s1 killed $s1 def $z1
+; CHECK-NEXT: zip1 z2.s, z2.s, z3.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: zip1 z0.d, z0.d, z2.d
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v4f32:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: sub sp, sp, #16
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
+; NONEON-NOSVE-NEXT: stp s2, s3, [sp, #8]
+; NONEON-NOSVE-NEXT: stp s0, s1, [sp]
+; NONEON-NOSVE-NEXT: ldr q0, [sp]
+; NONEON-NOSVE-NEXT: str q0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #16
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <4 x float> undef, float %a, i64 0
+ %2 = insertelement <4 x float> %1, float %b, i64 1
+ %3 = insertelement <4 x float> %2, float %c, i64 2
+ %4 = insertelement <4 x float> %3, float %d, i64 3
+ store <4 x float> %4, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v4f64(double %a, double %b, double %c, double %d, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: // kill: def $d3 killed $d3 def $z3
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT: zip1 z2.d, z2.d, z3.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: stp q0, q2, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v4f64:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: stp d0, d1, [sp, #-32]!
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 32
+; NONEON-NOSVE-NEXT: stp d2, d3, [sp, #16]
+; NONEON-NOSVE-NEXT: ldp q1, q0, [sp]
+; NONEON-NOSVE-NEXT: stp q1, q0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #32
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <4 x double> undef, double %a, i64 0
+ %2 = insertelement <4 x double> %1, double %b, i64 1
+ %3 = insertelement <4 x double> %2, double %c, i64 2
+ %4 = insertelement <4 x double> %3, double %d, i64 3
+ store <4 x double> %4, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v8f16(half %a, half %b, half %c, half %d, half %e, half %f, half %g, half %h, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $h6 killed $h6 def $z6
+; CHECK-NEXT: // kill: def $h4 killed $h4 def $z4
+; CHECK-NEXT: // kill: def $h2 killed $h2 def $z2
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT: // kill: def $h7 killed $h7 def $z7
+; CHECK-NEXT: // kill: def $h5 killed $h5 def $z5
+; CHECK-NEXT: // kill: def $h3 killed $h3 def $z3
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $z1
+; CHECK-NEXT: zip1 z6.h, z6.h, z7.h
+; CHECK-NEXT: zip1 z4.h, z4.h, z5.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z3.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
+; CHECK-NEXT: zip1 z1.s, z4.s, z6.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z2.s
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v8f16:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: sub sp, sp, #16
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
+; NONEON-NOSVE-NEXT: str h7, [sp, #14]
+; NONEON-NOSVE-NEXT: str h6, [sp, #12]
+; NONEON-NOSVE-NEXT: str h5, [sp, #10]
+; NONEON-NOSVE-NEXT: str h4, [sp, #8]
+; NONEON-NOSVE-NEXT: str h3, [sp, #6]
+; NONEON-NOSVE-NEXT: str h2, [sp, #4]
+; NONEON-NOSVE-NEXT: str h1, [sp, #2]
+; NONEON-NOSVE-NEXT: str h0, [sp]
+; NONEON-NOSVE-NEXT: ldr q0, [sp]
+; NONEON-NOSVE-NEXT: str q0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #16
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <8 x half> undef, half %a, i64 0
+ %2 = insertelement <8 x half> %1, half %b, i64 1
+ %3 = insertelement <8 x half> %2, half %c, i64 2
+ %4 = insertelement <8 x half> %3, half %d, i64 3
+ %5 = insertelement <8 x half> %4, half %e, i64 4
+ %6 = insertelement <8 x half> %5, half %f, i64 5
+ %7 = insertelement <8 x half> %6, half %g, i64 6
+ %8 = insertelement <8 x half> %7, half %h, i64 7
+ store <8 x half> %8, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v2i32(i32 %a, i32 %b, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov s0, w1
+; CHECK-NEXT: fmov s1, w0
+; CHECK-NEXT: zip1 z0.s, z1.s, z0.s
+; CHECK-NEXT: str d0, [x2]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v2i32:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: sub sp, sp, #16
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
+; NONEON-NOSVE-NEXT: stp w0, w1, [sp, #8]
+; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
+; NONEON-NOSVE-NEXT: str d0, [x2]
+; NONEON-NOSVE-NEXT: add sp, sp, #16
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <2 x i32> undef, i32 %a, i64 0
+ %2 = insertelement <2 x i32> %1, i32 %b, i64 1
+ store <2 x i32> %2, ptr %out
+ ret void
+}
+
+define void @build_vector_non_const_v8i8(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g, i8 %h, ptr %out) {
+; CHECK-LABEL: build_vector_non_const_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: strb w7, [sp, #15]
+; CHECK-NEXT: ldr x8, [sp, #16]
+; CHECK-NEXT: strb w6, [sp, #14]
+; CHECK-NEXT: strb w5, [sp, #13]
+; CHECK-NEXT: strb w4, [sp, #12]
+; CHECK-NEXT: strb w3, [sp, #11]
+; CHECK-NEXT: strb w2, [sp, #10]
+; CHECK-NEXT: strb w1, [sp, #9]
+; CHECK-NEXT: strb w0, [sp, #8]
+; CHECK-NEXT: ldr d0, [sp, #8]
+; CHECK-NEXT: str d0, [x8]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: build_vector_non_const_v8i8:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: sub sp, sp, #16
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
+; NONEON-NOSVE-NEXT: strb w7, [sp, #15]
+; NONEON-NOSVE-NEXT: ldr x8, [sp, #16]
+; NONEON-NOSVE-NEXT: strb w6, [sp, #14]
+; NONEON-NOSVE-NEXT: strb w5, [sp, #13]
+; NONEON-NOSVE-NEXT: strb w4, [sp, #12]
+; NONEON-NOSVE-NEXT: strb w3, [sp, #11]
+; NONEON-NOSVE-NEXT: strb w2, [sp, #10]
+; NONEON-NOSVE-NEXT: strb w1, [sp, #9]
+; NONEON-NOSVE-NEXT: strb w0, [sp, #8]
+; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
+; NONEON-NOSVE-NEXT: str d0, [x8]
+; NONEON-NOSVE-NEXT: add sp, sp, #16
+; NONEON-NOSVE-NEXT: ret
+ %1 = insertelement <8 x i8> undef, i8 %a, i64 0
+ %2 = insertelement <8 x i8> %1, i8 %b, i64 1
+ %3 = insertelement <8 x i8> %2, i8 %c, i64 2
+ %4 = insertelement <8 x i8> %3, i8 %d, i64 3
+ %5 = insertelement <8 x i8> %4, i8 %e, i64 4
+ %6 = insertelement <8 x i8> %5, i8 %f, i64 5
+ %7 = insertelement <8 x i8> %6, i8 %g, i64 6
+ %8 = insertelement <8 x i8> %7, i8 %h, i64 7
+ store <8 x i8> %8, ptr %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
index 4b6285b..c1810c6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
@@ -12,34 +12,22 @@ target triple = "aarch64-unknown-linux-gnu"
define <8 x i8> @concat_v8i8(<4 x i8> %op1, <4 x i8> %op2) {
; CHECK-LABEL: concat_v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT: mov z2.h, z1.h[3]
-; CHECK-NEXT: fmov w8, s1
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: mov z2.h, z1.h[3]
; CHECK-NEXT: mov z3.h, z1.h[2]
-; CHECK-NEXT: mov z1.h, z1.h[1]
-; CHECK-NEXT: mov z4.h, z0.h[3]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strb w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.h, z0.h[2]
-; CHECK-NEXT: mov z0.h, z0.h[1]
-; CHECK-NEXT: strb w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strb w9, [sp, #14]
-; CHECK-NEXT: strb w8, [sp, #13]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: strb w8, [sp, #11]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strb w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strb w8, [sp, #9]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: mov z4.h, z1.h[1]
+; CHECK-NEXT: mov z5.h, z0.h[3]
+; CHECK-NEXT: mov z6.h, z0.h[2]
+; CHECK-NEXT: mov z7.h, z0.h[1]
+; CHECK-NEXT: zip1 z2.b, z3.b, z2.b
+; CHECK-NEXT: zip1 z1.b, z1.b, z4.b
+; CHECK-NEXT: zip1 z3.b, z6.b, z5.b
+; CHECK-NEXT: zip1 z0.b, z0.b, z7.b
+; CHECK-NEXT: zip1 z1.h, z1.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: concat_v8i8:
@@ -152,22 +140,14 @@ define void @concat_v64i8(ptr %a, ptr %b, ptr %c) {
define <4 x i16> @concat_v4i16(<2 x i16> %op1, <2 x i16> %op2) {
; CHECK-LABEL: concat_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov z2.s, z1.s[1]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov z1.s, z0.s[1]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s1
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: strh w9, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: mov z3.s, z0.s[1]
+; CHECK-NEXT: zip1 z1.h, z1.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: concat_v4i16:
@@ -428,18 +408,14 @@ define void @concat_v8i64(ptr %a, ptr %b, ptr %c) {
define <4 x half> @concat_v4f16(<2 x half> %op1, <2 x half> %op2) {
; CHECK-LABEL: concat_v4f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov z2.h, z1.h[1]
-; CHECK-NEXT: str h1, [sp, #12]
-; CHECK-NEXT: mov z1.h, z0.h[1]
-; CHECK-NEXT: str h0, [sp, #8]
-; CHECK-NEXT: str h2, [sp, #14]
-; CHECK-NEXT: str h1, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: mov z3.h, z0.h[1]
+; CHECK-NEXT: zip1 z1.h, z1.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: concat_v4f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index 50a05cb..7d6336a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -326,29 +326,29 @@ define <2 x i256> @load_sext_v2i64i256(ptr %ap) {
; CHECK-LABEL: load_sext_v2i64i256:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
-; CHECK-NEXT: fmov x8, d0
; CHECK-NEXT: mov z1.d, z0.d[1]
-; CHECK-NEXT: asr x9, x8, #63
-; CHECK-NEXT: fmov x10, d1
-; CHECK-NEXT: stp x8, x9, [sp, #-32]!
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: asr x8, x10, #63
-; CHECK-NEXT: mov z0.d, x9
-; CHECK-NEXT: stp x10, x8, [sp, #16]
-; CHECK-NEXT: mov z1.d, x8
-; CHECK-NEXT: ldp q2, q4, [sp], #32
-; CHECK-NEXT: mov z3.d, z0.d[1]
-; CHECK-NEXT: mov z5.d, z1.d[1]
-; CHECK-NEXT: mov z6.d, z2.d[1]
-; CHECK-NEXT: fmov x2, d0
-; CHECK-NEXT: mov z0.d, z4.d[1]
-; CHECK-NEXT: fmov x6, d1
-; CHECK-NEXT: fmov x0, d2
-; CHECK-NEXT: fmov x4, d4
-; CHECK-NEXT: fmov x3, d3
-; CHECK-NEXT: fmov x7, d5
-; CHECK-NEXT: fmov x1, d6
-; CHECK-NEXT: fmov x5, d0
+; CHECK-NEXT: fmov x8, d0
+; CHECK-NEXT: fmov x9, d1
+; CHECK-NEXT: asr x8, x8, #63
+; CHECK-NEXT: fmov d3, x8
+; CHECK-NEXT: mov z2.d, x8
+; CHECK-NEXT: asr x9, x9, #63
+; CHECK-NEXT: fmov d4, x9
+; CHECK-NEXT: zip1 z0.d, z0.d, z3.d
+; CHECK-NEXT: mov z3.d, x9
+; CHECK-NEXT: fmov x2, d2
+; CHECK-NEXT: zip1 z1.d, z1.d, z4.d
+; CHECK-NEXT: mov z4.d, z2.d[1]
+; CHECK-NEXT: mov z5.d, z0.d[1]
+; CHECK-NEXT: mov z6.d, z3.d[1]
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: fmov x6, d3
+; CHECK-NEXT: mov z2.d, z1.d[1]
+; CHECK-NEXT: fmov x3, d4
+; CHECK-NEXT: fmov x1, d5
+; CHECK-NEXT: fmov x4, d1
+; CHECK-NEXT: fmov x7, d6
+; CHECK-NEXT: fmov x5, d2
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: load_sext_v2i64i256:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
index 2665696..a728cbe 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
@@ -10,23 +10,15 @@ target triple = "aarch64-unknown-linux-gnu"
define <4 x i1> @extract_subvector_v8i1(<8 x i1> %op) {
; CHECK-LABEL: extract_subvector_v8i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov z1.b, z0.b[7]
; CHECK-NEXT: mov z2.b, z0.b[6]
; CHECK-NEXT: mov z3.b, z0.b[5]
; CHECK-NEXT: mov z0.b, z0.b[4]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: zip1 z1.h, z2.h, z1.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: extract_subvector_v8i1:
@@ -53,23 +45,15 @@ define <4 x i1> @extract_subvector_v8i1(<8 x i1> %op) {
define <4 x i8> @extract_subvector_v8i8(<8 x i8> %op) {
; CHECK-LABEL: extract_subvector_v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov z1.b, z0.b[7]
; CHECK-NEXT: mov z2.b, z0.b[6]
; CHECK-NEXT: mov z3.b, z0.b[5]
; CHECK-NEXT: mov z0.b, z0.b[4]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: zip1 z1.h, z2.h, z1.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: extract_subvector_v8i8:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index dad53b3..f1771a7 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -1126,49 +1126,39 @@ define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) {
define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) {
; SVE-LABEL: test_copysign_v4f16_v4f64:
; SVE: // %bb.0:
-; SVE-NEXT: sub sp, sp, #16
-; SVE-NEXT: .cfi_def_cfa_offset 16
-; SVE-NEXT: ldp q1, q0, [x1]
-; SVE-NEXT: ldr d4, [x0]
-; SVE-NEXT: and z4.h, z4.h, #0x7fff
-; SVE-NEXT: mov z2.d, z0.d[1]
-; SVE-NEXT: mov z3.d, z1.d[1]
-; SVE-NEXT: fcvt h0, d0
+; SVE-NEXT: ldp q0, q1, [x1]
+; SVE-NEXT: mov z2.d, z1.d[1]
+; SVE-NEXT: mov z3.d, z0.d[1]
; SVE-NEXT: fcvt h1, d1
+; SVE-NEXT: fcvt h0, d0
; SVE-NEXT: fcvt h2, d2
; SVE-NEXT: fcvt h3, d3
-; SVE-NEXT: str h0, [sp, #12]
-; SVE-NEXT: str h1, [sp, #8]
-; SVE-NEXT: str h2, [sp, #14]
-; SVE-NEXT: str h3, [sp, #10]
-; SVE-NEXT: ldr d0, [sp, #8]
+; SVE-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE-NEXT: zip1 z0.h, z0.h, z3.h
+; SVE-NEXT: zip1 z0.s, z0.s, z1.s
+; SVE-NEXT: ldr d1, [x0]
+; SVE-NEXT: and z1.h, z1.h, #0x7fff
; SVE-NEXT: and z0.h, z0.h, #0x8000
-; SVE-NEXT: orr z0.d, z4.d, z0.d
+; SVE-NEXT: orr z0.d, z1.d, z0.d
; SVE-NEXT: str d0, [x0]
-; SVE-NEXT: add sp, sp, #16
; SVE-NEXT: ret
;
; SVE2-LABEL: test_copysign_v4f16_v4f64:
; SVE2: // %bb.0:
-; SVE2-NEXT: sub sp, sp, #16
-; SVE2-NEXT: .cfi_def_cfa_offset 16
-; SVE2-NEXT: ldp q2, q1, [x1]
-; SVE2-NEXT: mov z0.h, #32767 // =0x7fff
-; SVE2-NEXT: ldr d5, [x0]
-; SVE2-NEXT: mov z3.d, z1.d[1]
-; SVE2-NEXT: mov z4.d, z2.d[1]
+; SVE2-NEXT: ldp q0, q1, [x1]
+; SVE2-NEXT: mov z2.d, z1.d[1]
+; SVE2-NEXT: mov z3.d, z0.d[1]
; SVE2-NEXT: fcvt h1, d1
+; SVE2-NEXT: fcvt h0, d0
; SVE2-NEXT: fcvt h2, d2
; SVE2-NEXT: fcvt h3, d3
-; SVE2-NEXT: fcvt h4, d4
-; SVE2-NEXT: str h1, [sp, #12]
-; SVE2-NEXT: str h2, [sp, #8]
-; SVE2-NEXT: str h3, [sp, #14]
-; SVE2-NEXT: str h4, [sp, #10]
-; SVE2-NEXT: ldr d1, [sp, #8]
-; SVE2-NEXT: bsl z5.d, z5.d, z1.d, z0.d
-; SVE2-NEXT: str d5, [x0]
-; SVE2-NEXT: add sp, sp, #16
+; SVE2-NEXT: zip1 z1.h, z1.h, z2.h
+; SVE2-NEXT: zip1 z0.h, z0.h, z3.h
+; SVE2-NEXT: mov z2.h, #32767 // =0x7fff
+; SVE2-NEXT: zip1 z0.s, z0.s, z1.s
+; SVE2-NEXT: ldr d1, [x0]
+; SVE2-NEXT: bsl z1.d, z1.d, z0.d, z2.d
+; SVE2-NEXT: str d1, [x0]
; SVE2-NEXT: ret
;
; NONEON-NOSVE-LABEL: test_copysign_v4f16_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
index a206fbc..11fee26 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
@@ -443,9 +443,10 @@ define <2 x i64> @fcvtzu_v2f16_v2i64(<2 x half> %op1) {
; CHECK-NEXT: mov z1.h, z0.h[1]
; CHECK-NEXT: fcvtzu x8, h0
; CHECK-NEXT: fcvtzu x9, h1
-; CHECK-NEXT: stp x8, x9, [sp, #-16]!
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: fmov d1, x9
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v2f16_v2i64:
@@ -471,19 +472,20 @@ define void @fcvtzu_v4f16_v4i64(ptr %a, ptr %b) {
; CHECK-LABEL: fcvtzu_v4f16_v4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
-; CHECK-NEXT: mov z1.h, z0.h[1]
-; CHECK-NEXT: fcvtzu x8, h0
-; CHECK-NEXT: mov z2.h, z0.h[3]
-; CHECK-NEXT: mov z0.h, z0.h[2]
-; CHECK-NEXT: fcvtzu x9, h1
-; CHECK-NEXT: fcvtzu x10, h2
-; CHECK-NEXT: fcvtzu x11, h0
-; CHECK-NEXT: stp x8, x9, [sp, #-32]!
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: stp x11, x10, [sp, #16]
-; CHECK-NEXT: ldp q1, q0, [sp]
+; CHECK-NEXT: mov z1.h, z0.h[3]
+; CHECK-NEXT: mov z2.h, z0.h[2]
+; CHECK-NEXT: mov z3.h, z0.h[1]
+; CHECK-NEXT: fcvtzu x10, h0
+; CHECK-NEXT: fcvtzu x8, h1
+; CHECK-NEXT: fcvtzu x9, h2
+; CHECK-NEXT: fcvtzu x11, h3
+; CHECK-NEXT: fmov d2, x10
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: fmov d1, x9
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: fmov d1, x11
+; CHECK-NEXT: zip1 z1.d, z2.d, z1.d
; CHECK-NEXT: stp q1, q0, [x1]
-; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v4f16_v4i64:
@@ -521,31 +523,35 @@ define void @fcvtzu_v8f16_v8i64(ptr %a, ptr %b) {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: mov z1.d, z0.d
-; CHECK-NEXT: fcvtzu x12, h0
+; CHECK-NEXT: mov z2.h, z0.h[3]
+; CHECK-NEXT: mov z3.h, z0.h[2]
+; CHECK-NEXT: mov z4.h, z0.h[1]
+; CHECK-NEXT: fcvtzu x10, h0
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
-; CHECK-NEXT: mov z2.h, z1.h[1]
-; CHECK-NEXT: fcvtzu x8, h1
-; CHECK-NEXT: mov z3.h, z1.h[3]
-; CHECK-NEXT: mov z1.h, z1.h[2]
-; CHECK-NEXT: fcvtzu x9, h2
-; CHECK-NEXT: mov z2.h, z0.h[1]
-; CHECK-NEXT: fcvtzu x10, h3
-; CHECK-NEXT: mov z3.h, z0.h[3]
-; CHECK-NEXT: fcvtzu x11, h1
-; CHECK-NEXT: mov z0.h, z0.h[2]
-; CHECK-NEXT: stp x8, x9, [sp, #-64]!
-; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: fcvtzu x8, h2
; CHECK-NEXT: fcvtzu x9, h3
-; CHECK-NEXT: stp x11, x10, [sp, #16]
-; CHECK-NEXT: fcvtzu x10, h0
-; CHECK-NEXT: ldp q2, q3, [sp]
-; CHECK-NEXT: stp x12, x8, [sp, #32]
-; CHECK-NEXT: stp x10, x9, [sp, #48]
-; CHECK-NEXT: ldp q1, q0, [sp, #32]
-; CHECK-NEXT: stp q2, q3, [x1, #32]
-; CHECK-NEXT: stp q1, q0, [x1]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: fcvtzu x11, h4
+; CHECK-NEXT: mov z5.h, z1.h[3]
+; CHECK-NEXT: mov z6.h, z1.h[2]
+; CHECK-NEXT: mov z2.h, z1.h[1]
+; CHECK-NEXT: fcvtzu x14, h1
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: fmov d1, x9
+; CHECK-NEXT: fmov d3, x11
+; CHECK-NEXT: fcvtzu x12, h5
+; CHECK-NEXT: fcvtzu x13, h6
+; CHECK-NEXT: fcvtzu x15, h2
+; CHECK-NEXT: fmov d2, x10
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: fmov d1, x12
+; CHECK-NEXT: fmov d4, x13
+; CHECK-NEXT: zip1 z2.d, z2.d, z3.d
+; CHECK-NEXT: fmov d3, x14
+; CHECK-NEXT: zip1 z1.d, z4.d, z1.d
+; CHECK-NEXT: fmov d4, x15
+; CHECK-NEXT: stp q2, q0, [x1]
+; CHECK-NEXT: zip1 z3.d, z3.d, z4.d
+; CHECK-NEXT: stp q3, q1, [x1, #32]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v8f16_v8i64:
@@ -598,57 +604,67 @@ define void @fcvtzu_v8f16_v8i64(ptr %a, ptr %b) {
define void @fcvtzu_v16f16_v16i64(ptr %a, ptr %b) {
; CHECK-LABEL: fcvtzu_v16f16_v16i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldp q1, q0, [x0]
-; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: mov z3.d, z0.d
-; CHECK-NEXT: ext z2.b, z2.b, z1.b, #8
+; CHECK-NEXT: mov z5.d, z1.d
+; CHECK-NEXT: mov z2.h, z0.h[3]
+; CHECK-NEXT: mov z4.h, z1.h[1]
+; CHECK-NEXT: mov z6.h, z1.h[3]
+; CHECK-NEXT: fcvtzu x9, h1
+; CHECK-NEXT: fcvtzu x8, h0
+; CHECK-NEXT: mov z7.h, z0.h[1]
; CHECK-NEXT: ext z3.b, z3.b, z0.b, #8
-; CHECK-NEXT: mov z4.h, z2.h[1]
-; CHECK-NEXT: fcvtzu x8, h2
-; CHECK-NEXT: mov z5.h, z2.h[3]
-; CHECK-NEXT: mov z2.h, z2.h[2]
-; CHECK-NEXT: fcvtzu x12, h3
-; CHECK-NEXT: fcvtzu x9, h4
-; CHECK-NEXT: mov z4.h, z3.h[1]
-; CHECK-NEXT: fcvtzu x10, h5
-; CHECK-NEXT: mov z5.h, z3.h[3]
-; CHECK-NEXT: fcvtzu x11, h2
-; CHECK-NEXT: mov z2.h, z3.h[2]
-; CHECK-NEXT: stp x8, x9, [sp, #-128]!
-; CHECK-NEXT: .cfi_def_cfa_offset 128
-; CHECK-NEXT: fcvtzu x8, h4
-; CHECK-NEXT: fcvtzu x9, h5
-; CHECK-NEXT: stp x11, x10, [sp, #16]
+; CHECK-NEXT: ext z5.b, z5.b, z1.b, #8
; CHECK-NEXT: fcvtzu x10, h2
-; CHECK-NEXT: mov z3.h, z1.h[1]
-; CHECK-NEXT: mov z4.h, z1.h[3]
-; CHECK-NEXT: fcvtzu x11, h1
+; CHECK-NEXT: fcvtzu x11, h4
+; CHECK-NEXT: fcvtzu x12, h6
; CHECK-NEXT: mov z1.h, z1.h[2]
-; CHECK-NEXT: mov z2.h, z0.h[1]
-; CHECK-NEXT: stp x12, x8, [sp, #64]
-; CHECK-NEXT: fcvtzu x12, h3
-; CHECK-NEXT: fcvtzu x8, h4
-; CHECK-NEXT: stp x10, x9, [sp, #80]
-; CHECK-NEXT: fcvtzu x9, h1
-; CHECK-NEXT: mov z3.h, z0.h[3]
-; CHECK-NEXT: fcvtzu x10, h0
; CHECK-NEXT: mov z0.h, z0.h[2]
-; CHECK-NEXT: stp x11, x12, [sp, #32]
-; CHECK-NEXT: fcvtzu x11, h2
-; CHECK-NEXT: fcvtzu x12, h3
-; CHECK-NEXT: stp x9, x8, [sp, #48]
-; CHECK-NEXT: fcvtzu x8, h0
-; CHECK-NEXT: ldp q0, q1, [sp]
-; CHECK-NEXT: ldp q3, q4, [sp, #64]
-; CHECK-NEXT: stp x10, x11, [sp, #96]
-; CHECK-NEXT: ldp q6, q7, [sp, #32]
-; CHECK-NEXT: stp x8, x12, [sp, #112]
-; CHECK-NEXT: ldp q5, q2, [sp, #96]
-; CHECK-NEXT: stp q0, q1, [x1, #32]
-; CHECK-NEXT: stp q6, q7, [x1]
-; CHECK-NEXT: stp q3, q4, [x1, #96]
-; CHECK-NEXT: stp q5, q2, [x1, #64]
-; CHECK-NEXT: add sp, sp, #128
+; CHECK-NEXT: fmov d16, x9
+; CHECK-NEXT: mov z2.h, z3.h[3]
+; CHECK-NEXT: mov z4.h, z5.h[3]
+; CHECK-NEXT: fcvtzu x14, h3
+; CHECK-NEXT: fcvtzu x13, h1
+; CHECK-NEXT: fcvtzu x15, h5
+; CHECK-NEXT: mov z1.h, z3.h[1]
+; CHECK-NEXT: mov z6.h, z5.h[1]
+; CHECK-NEXT: mov z5.h, z5.h[2]
+; CHECK-NEXT: mov z3.h, z3.h[2]
+; CHECK-NEXT: fcvtzu x9, h2
+; CHECK-NEXT: fmov d2, x10
+; CHECK-NEXT: fcvtzu x10, h4
+; CHECK-NEXT: fmov d4, x11
+; CHECK-NEXT: fcvtzu x11, h7
+; CHECK-NEXT: fmov d7, x12
+; CHECK-NEXT: fcvtzu x12, h0
+; CHECK-NEXT: fmov d0, x13
+; CHECK-NEXT: fcvtzu x13, h1
+; CHECK-NEXT: fmov d1, x14
+; CHECK-NEXT: fcvtzu x14, h6
+; CHECK-NEXT: fmov d6, x15
+; CHECK-NEXT: fcvtzu x15, h5
+; CHECK-NEXT: fmov d5, x9
+; CHECK-NEXT: fcvtzu x9, h3
+; CHECK-NEXT: zip1 z4.d, z16.d, z4.d
+; CHECK-NEXT: fmov d16, x8
+; CHECK-NEXT: zip1 z0.d, z0.d, z7.d
+; CHECK-NEXT: fmov d3, x12
+; CHECK-NEXT: fmov d7, x10
+; CHECK-NEXT: stp q4, q0, [x1, #64]
+; CHECK-NEXT: fmov d0, x14
+; CHECK-NEXT: fmov d4, x9
+; CHECK-NEXT: zip1 z2.d, z3.d, z2.d
+; CHECK-NEXT: fmov d3, x11
+; CHECK-NEXT: zip1 z0.d, z6.d, z0.d
+; CHECK-NEXT: zip1 z4.d, z4.d, z5.d
+; CHECK-NEXT: zip1 z3.d, z16.d, z3.d
+; CHECK-NEXT: fmov d16, x15
+; CHECK-NEXT: stp q3, q2, [x1]
+; CHECK-NEXT: fmov d2, x13
+; CHECK-NEXT: zip1 z7.d, z16.d, z7.d
+; CHECK-NEXT: zip1 z1.d, z1.d, z2.d
+; CHECK-NEXT: stp q0, q7, [x1, #96]
+; CHECK-NEXT: stp q1, q4, [x1, #32]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v16f16_v16i64:
@@ -1216,26 +1232,18 @@ define <2 x i16> @fcvtzu_v2f64_v2i16(<2 x double> %op1) {
define <4 x i16> @fcvtzu_v4f64_v4i16(ptr %a) {
; CHECK-LABEL: fcvtzu_v4f64_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldp q1, q0, [x0]
+; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT: mov z2.s, z0.s[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z0.s, z1.s[1]
-; CHECK-NEXT: fmov w9, s1
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: mov z2.s, z1.s[1]
+; CHECK-NEXT: mov z3.s, z0.s[1]
+; CHECK-NEXT: zip1 z1.h, z1.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v4f64_v4i16:
@@ -1270,40 +1278,29 @@ define <4 x i16> @fcvtzu_v4f64_v4i16(ptr %a) {
define <8 x i16> @fcvtzu_v8f64_v8i16(ptr %a) {
; CHECK-LABEL: fcvtzu_v8f64_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldp q1, q0, [x0, #32]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: ldp q3, q2, [x0]
+; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d
; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s
; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z1.s, z1.s[1]
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z2.s[1]
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.s, z3.s[1]
-; CHECK-NEXT: strh w8, [sp]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT: mov z4.s, z0.s[1]
+; CHECK-NEXT: mov z5.s, z1.s[1]
+; CHECK-NEXT: mov z6.s, z3.s[1]
+; CHECK-NEXT: mov z7.s, z2.s[1]
+; CHECK-NEXT: zip1 z0.h, z0.h, z4.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z5.h
+; CHECK-NEXT: zip1 z3.h, z3.h, z6.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z7.h
+; CHECK-NEXT: zip1 z0.s, z1.s, z0.s
+; CHECK-NEXT: zip1 z1.s, z2.s, z3.s
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v8f64_v8i16:
@@ -1360,73 +1357,50 @@ define <8 x i16> @fcvtzu_v8f64_v8i16(ptr %a) {
define void @fcvtzu_v16f64_v16i16(ptr %a, ptr %b) {
; CHECK-LABEL: fcvtzu_v16f64_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: ldp q0, q1, [x0, #32]
+; CHECK-NEXT: ldp q5, q6, [x0, #96]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: ldp q3, q2, [x0]
-; CHECK-NEXT: ldp q4, q5, [x0, #96]
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT: ldp q0, q4, [x0, #32]
+; CHECK-NEXT: ldp q2, q7, [x0, #64]
+; CHECK-NEXT: ldp q1, q3, [x0]
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.d
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.d
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.d
; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.d
; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d
-; CHECK-NEXT: ldp q6, q7, [x0, #64]
; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.d
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.d
-; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT: uzp1 z6.s, z6.s, z6.s
+; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.d
+; CHECK-NEXT: uzp1 z7.s, z7.s, z7.s
; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s
; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s
-; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z16.s, z1.s[1]
-; CHECK-NEXT: mov z1.s, z0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov z0.s, z2.s[1]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z3.s[1]
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: movprfx z3, z7
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z7.d
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s16
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: uzp1 z1.s, z4.s, z4.s
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: uzp1 z0.s, z3.s, z3.s
-; CHECK-NEXT: mov z3.s, z5.s[1]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: uzp1 z2.s, z6.s, z6.s
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: strh w8, [sp, #28]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z1.s, z1.s[1]
-; CHECK-NEXT: strh w8, [sp, #24]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: strh w8, [sp, #20]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z2.s[1]
-; CHECK-NEXT: strh w8, [sp, #16]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #30]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #26]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #22]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #18]
-; CHECK-NEXT: ldp q1, q0, [sp]
-; CHECK-NEXT: stp q1, q0, [x1]
-; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT: mov z17.s, z6.s[1]
+; CHECK-NEXT: mov z16.s, z4.s[1]
+; CHECK-NEXT: mov z18.s, z5.s[1]
+; CHECK-NEXT: mov z21.s, z0.s[1]
+; CHECK-NEXT: mov z19.s, z7.s[1]
+; CHECK-NEXT: mov z20.s, z2.s[1]
+; CHECK-NEXT: mov z22.s, z3.s[1]
+; CHECK-NEXT: mov z23.s, z1.s[1]
+; CHECK-NEXT: zip1 z6.h, z6.h, z17.h
+; CHECK-NEXT: zip1 z4.h, z4.h, z16.h
+; CHECK-NEXT: zip1 z5.h, z5.h, z18.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z21.h
+; CHECK-NEXT: zip1 z7.h, z7.h, z19.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z20.h
+; CHECK-NEXT: zip1 z3.h, z3.h, z22.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z23.h
+; CHECK-NEXT: zip1 z5.s, z5.s, z6.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z4.s
+; CHECK-NEXT: zip1 z2.s, z2.s, z7.s
+; CHECK-NEXT: zip1 z1.s, z1.s, z3.s
+; CHECK-NEXT: zip1 z2.d, z2.d, z5.d
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: stp q0, q2, [x1]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzu_v16f64_v16i16:
@@ -2187,9 +2161,10 @@ define <2 x i64> @fcvtzs_v2f16_v2i64(<2 x half> %op1) {
; CHECK-NEXT: mov z1.h, z0.h[1]
; CHECK-NEXT: fcvtzs x8, h0
; CHECK-NEXT: fcvtzs x9, h1
-; CHECK-NEXT: stp x8, x9, [sp, #-16]!
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: fmov d1, x9
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v2f16_v2i64:
@@ -2215,19 +2190,20 @@ define void @fcvtzs_v4f16_v4i64(ptr %a, ptr %b) {
; CHECK-LABEL: fcvtzs_v4f16_v4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
-; CHECK-NEXT: mov z1.h, z0.h[1]
-; CHECK-NEXT: fcvtzs x8, h0
-; CHECK-NEXT: mov z2.h, z0.h[3]
-; CHECK-NEXT: mov z0.h, z0.h[2]
-; CHECK-NEXT: fcvtzs x9, h1
-; CHECK-NEXT: fcvtzs x10, h2
-; CHECK-NEXT: fcvtzs x11, h0
-; CHECK-NEXT: stp x8, x9, [sp, #-32]!
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: stp x11, x10, [sp, #16]
-; CHECK-NEXT: ldp q1, q0, [sp]
+; CHECK-NEXT: mov z1.h, z0.h[3]
+; CHECK-NEXT: mov z2.h, z0.h[2]
+; CHECK-NEXT: mov z3.h, z0.h[1]
+; CHECK-NEXT: fcvtzs x10, h0
+; CHECK-NEXT: fcvtzs x8, h1
+; CHECK-NEXT: fcvtzs x9, h2
+; CHECK-NEXT: fcvtzs x11, h3
+; CHECK-NEXT: fmov d2, x10
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: fmov d1, x9
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: fmov d1, x11
+; CHECK-NEXT: zip1 z1.d, z2.d, z1.d
; CHECK-NEXT: stp q1, q0, [x1]
-; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v4f16_v4i64:
@@ -2265,31 +2241,35 @@ define void @fcvtzs_v8f16_v8i64(ptr %a, ptr %b) {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: mov z1.d, z0.d
-; CHECK-NEXT: fcvtzs x12, h0
+; CHECK-NEXT: mov z2.h, z0.h[3]
+; CHECK-NEXT: mov z3.h, z0.h[2]
+; CHECK-NEXT: mov z4.h, z0.h[1]
+; CHECK-NEXT: fcvtzs x10, h0
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
-; CHECK-NEXT: mov z2.h, z1.h[1]
-; CHECK-NEXT: fcvtzs x8, h1
-; CHECK-NEXT: mov z3.h, z1.h[3]
-; CHECK-NEXT: mov z1.h, z1.h[2]
-; CHECK-NEXT: fcvtzs x9, h2
-; CHECK-NEXT: mov z2.h, z0.h[1]
-; CHECK-NEXT: fcvtzs x10, h3
-; CHECK-NEXT: mov z3.h, z0.h[3]
-; CHECK-NEXT: fcvtzs x11, h1
-; CHECK-NEXT: mov z0.h, z0.h[2]
-; CHECK-NEXT: stp x8, x9, [sp, #-64]!
-; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: fcvtzs x8, h2
; CHECK-NEXT: fcvtzs x9, h3
-; CHECK-NEXT: stp x11, x10, [sp, #16]
-; CHECK-NEXT: fcvtzs x10, h0
-; CHECK-NEXT: ldp q2, q3, [sp]
-; CHECK-NEXT: stp x12, x8, [sp, #32]
-; CHECK-NEXT: stp x10, x9, [sp, #48]
-; CHECK-NEXT: ldp q1, q0, [sp, #32]
-; CHECK-NEXT: stp q2, q3, [x1, #32]
-; CHECK-NEXT: stp q1, q0, [x1]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: fcvtzs x11, h4
+; CHECK-NEXT: mov z5.h, z1.h[3]
+; CHECK-NEXT: mov z6.h, z1.h[2]
+; CHECK-NEXT: mov z2.h, z1.h[1]
+; CHECK-NEXT: fcvtzs x14, h1
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: fmov d1, x9
+; CHECK-NEXT: fmov d3, x11
+; CHECK-NEXT: fcvtzs x12, h5
+; CHECK-NEXT: fcvtzs x13, h6
+; CHECK-NEXT: fcvtzs x15, h2
+; CHECK-NEXT: fmov d2, x10
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: fmov d1, x12
+; CHECK-NEXT: fmov d4, x13
+; CHECK-NEXT: zip1 z2.d, z2.d, z3.d
+; CHECK-NEXT: fmov d3, x14
+; CHECK-NEXT: zip1 z1.d, z4.d, z1.d
+; CHECK-NEXT: fmov d4, x15
+; CHECK-NEXT: stp q2, q0, [x1]
+; CHECK-NEXT: zip1 z3.d, z3.d, z4.d
+; CHECK-NEXT: stp q3, q1, [x1, #32]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v8f16_v8i64:
@@ -2342,57 +2322,67 @@ define void @fcvtzs_v8f16_v8i64(ptr %a, ptr %b) {
define void @fcvtzs_v16f16_v16i64(ptr %a, ptr %b) {
; CHECK-LABEL: fcvtzs_v16f16_v16i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldp q1, q0, [x0]
-; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: mov z3.d, z0.d
-; CHECK-NEXT: ext z2.b, z2.b, z1.b, #8
+; CHECK-NEXT: mov z5.d, z1.d
+; CHECK-NEXT: mov z2.h, z0.h[3]
+; CHECK-NEXT: mov z4.h, z1.h[1]
+; CHECK-NEXT: mov z6.h, z1.h[3]
+; CHECK-NEXT: fcvtzs x9, h1
+; CHECK-NEXT: fcvtzs x8, h0
+; CHECK-NEXT: mov z7.h, z0.h[1]
; CHECK-NEXT: ext z3.b, z3.b, z0.b, #8
-; CHECK-NEXT: mov z4.h, z2.h[1]
-; CHECK-NEXT: fcvtzs x8, h2
-; CHECK-NEXT: mov z5.h, z2.h[3]
-; CHECK-NEXT: mov z2.h, z2.h[2]
-; CHECK-NEXT: fcvtzs x12, h3
-; CHECK-NEXT: fcvtzs x9, h4
-; CHECK-NEXT: mov z4.h, z3.h[1]
-; CHECK-NEXT: fcvtzs x10, h5
-; CHECK-NEXT: mov z5.h, z3.h[3]
-; CHECK-NEXT: fcvtzs x11, h2
-; CHECK-NEXT: mov z2.h, z3.h[2]
-; CHECK-NEXT: stp x8, x9, [sp, #-128]!
-; CHECK-NEXT: .cfi_def_cfa_offset 128
-; CHECK-NEXT: fcvtzs x8, h4
-; CHECK-NEXT: fcvtzs x9, h5
-; CHECK-NEXT: stp x11, x10, [sp, #16]
+; CHECK-NEXT: ext z5.b, z5.b, z1.b, #8
; CHECK-NEXT: fcvtzs x10, h2
-; CHECK-NEXT: mov z3.h, z1.h[1]
-; CHECK-NEXT: mov z4.h, z1.h[3]
-; CHECK-NEXT: fcvtzs x11, h1
+; CHECK-NEXT: fcvtzs x11, h4
+; CHECK-NEXT: fcvtzs x12, h6
; CHECK-NEXT: mov z1.h, z1.h[2]
-; CHECK-NEXT: mov z2.h, z0.h[1]
-; CHECK-NEXT: stp x12, x8, [sp, #64]
-; CHECK-NEXT: fcvtzs x12, h3
-; CHECK-NEXT: fcvtzs x8, h4
-; CHECK-NEXT: stp x10, x9, [sp, #80]
-; CHECK-NEXT: fcvtzs x9, h1
-; CHECK-NEXT: mov z3.h, z0.h[3]
-; CHECK-NEXT: fcvtzs x10, h0
; CHECK-NEXT: mov z0.h, z0.h[2]
-; CHECK-NEXT: stp x11, x12, [sp, #32]
-; CHECK-NEXT: fcvtzs x11, h2
-; CHECK-NEXT: fcvtzs x12, h3
-; CHECK-NEXT: stp x9, x8, [sp, #48]
-; CHECK-NEXT: fcvtzs x8, h0
-; CHECK-NEXT: ldp q0, q1, [sp]
-; CHECK-NEXT: ldp q3, q4, [sp, #64]
-; CHECK-NEXT: stp x10, x11, [sp, #96]
-; CHECK-NEXT: ldp q6, q7, [sp, #32]
-; CHECK-NEXT: stp x8, x12, [sp, #112]
-; CHECK-NEXT: ldp q5, q2, [sp, #96]
-; CHECK-NEXT: stp q0, q1, [x1, #32]
-; CHECK-NEXT: stp q6, q7, [x1]
-; CHECK-NEXT: stp q3, q4, [x1, #96]
-; CHECK-NEXT: stp q5, q2, [x1, #64]
-; CHECK-NEXT: add sp, sp, #128
+; CHECK-NEXT: fmov d16, x9
+; CHECK-NEXT: mov z2.h, z3.h[3]
+; CHECK-NEXT: mov z4.h, z5.h[3]
+; CHECK-NEXT: fcvtzs x14, h3
+; CHECK-NEXT: fcvtzs x13, h1
+; CHECK-NEXT: fcvtzs x15, h5
+; CHECK-NEXT: mov z1.h, z3.h[1]
+; CHECK-NEXT: mov z6.h, z5.h[1]
+; CHECK-NEXT: mov z5.h, z5.h[2]
+; CHECK-NEXT: mov z3.h, z3.h[2]
+; CHECK-NEXT: fcvtzs x9, h2
+; CHECK-NEXT: fmov d2, x10
+; CHECK-NEXT: fcvtzs x10, h4
+; CHECK-NEXT: fmov d4, x11
+; CHECK-NEXT: fcvtzs x11, h7
+; CHECK-NEXT: fmov d7, x12
+; CHECK-NEXT: fcvtzs x12, h0
+; CHECK-NEXT: fmov d0, x13
+; CHECK-NEXT: fcvtzs x13, h1
+; CHECK-NEXT: fmov d1, x14
+; CHECK-NEXT: fcvtzs x14, h6
+; CHECK-NEXT: fmov d6, x15
+; CHECK-NEXT: fcvtzs x15, h5
+; CHECK-NEXT: fmov d5, x9
+; CHECK-NEXT: fcvtzs x9, h3
+; CHECK-NEXT: zip1 z4.d, z16.d, z4.d
+; CHECK-NEXT: fmov d16, x8
+; CHECK-NEXT: zip1 z0.d, z0.d, z7.d
+; CHECK-NEXT: fmov d3, x12
+; CHECK-NEXT: fmov d7, x10
+; CHECK-NEXT: stp q4, q0, [x1, #64]
+; CHECK-NEXT: fmov d0, x14
+; CHECK-NEXT: fmov d4, x9
+; CHECK-NEXT: zip1 z2.d, z3.d, z2.d
+; CHECK-NEXT: fmov d3, x11
+; CHECK-NEXT: zip1 z0.d, z6.d, z0.d
+; CHECK-NEXT: zip1 z4.d, z4.d, z5.d
+; CHECK-NEXT: zip1 z3.d, z16.d, z3.d
+; CHECK-NEXT: fmov d16, x15
+; CHECK-NEXT: stp q3, q2, [x1]
+; CHECK-NEXT: fmov d2, x13
+; CHECK-NEXT: zip1 z7.d, z16.d, z7.d
+; CHECK-NEXT: zip1 z1.d, z1.d, z2.d
+; CHECK-NEXT: stp q0, q7, [x1, #96]
+; CHECK-NEXT: stp q1, q4, [x1, #32]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v16f16_v16i64:
@@ -2962,26 +2952,18 @@ define <2 x i16> @fcvtzs_v2f64_v2i16(<2 x double> %op1) {
define <4 x i16> @fcvtzs_v4f64_v4i16(ptr %a) {
; CHECK-LABEL: fcvtzs_v4f64_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldp q1, q0, [x0]
+; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT: mov z2.s, z0.s[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z0.s, z1.s[1]
-; CHECK-NEXT: fmov w9, s1
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: mov z2.s, z1.s[1]
+; CHECK-NEXT: mov z3.s, z0.s[1]
+; CHECK-NEXT: zip1 z1.h, z1.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v4f64_v4i16:
@@ -3016,40 +2998,29 @@ define <4 x i16> @fcvtzs_v4f64_v4i16(ptr %a) {
define <8 x i16> @fcvtzs_v8f64_v8i16(ptr %a) {
; CHECK-LABEL: fcvtzs_v8f64_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldp q1, q0, [x0, #32]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: ldp q3, q2, [x0]
+; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d
; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s
; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z1.s, z1.s[1]
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z2.s[1]
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.s, z3.s[1]
-; CHECK-NEXT: strh w8, [sp]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT: mov z4.s, z0.s[1]
+; CHECK-NEXT: mov z5.s, z1.s[1]
+; CHECK-NEXT: mov z6.s, z3.s[1]
+; CHECK-NEXT: mov z7.s, z2.s[1]
+; CHECK-NEXT: zip1 z0.h, z0.h, z4.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z5.h
+; CHECK-NEXT: zip1 z3.h, z3.h, z6.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z7.h
+; CHECK-NEXT: zip1 z0.s, z1.s, z0.s
+; CHECK-NEXT: zip1 z1.s, z2.s, z3.s
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v8f64_v8i16:
@@ -3106,73 +3077,50 @@ define <8 x i16> @fcvtzs_v8f64_v8i16(ptr %a) {
define void @fcvtzs_v16f64_v16i16(ptr %a, ptr %b) {
; CHECK-LABEL: fcvtzs_v16f64_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: ldp q0, q1, [x0, #32]
+; CHECK-NEXT: ldp q5, q6, [x0, #96]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: ldp q3, q2, [x0]
-; CHECK-NEXT: ldp q4, q5, [x0, #96]
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT: ldp q0, q4, [x0, #32]
+; CHECK-NEXT: ldp q2, q7, [x0, #64]
+; CHECK-NEXT: ldp q1, q3, [x0]
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.d
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.d
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.d
; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.d
; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d
-; CHECK-NEXT: ldp q6, q7, [x0, #64]
; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.d
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.d
-; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT: uzp1 z6.s, z6.s, z6.s
+; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.d
+; CHECK-NEXT: uzp1 z7.s, z7.s, z7.s
; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s
; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s
-; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z16.s, z1.s[1]
-; CHECK-NEXT: mov z1.s, z0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov z0.s, z2.s[1]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z3.s[1]
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: movprfx z3, z7
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z7.d
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s16
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: uzp1 z1.s, z4.s, z4.s
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: uzp1 z0.s, z3.s, z3.s
-; CHECK-NEXT: mov z3.s, z5.s[1]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: uzp1 z2.s, z6.s, z6.s
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: strh w8, [sp, #28]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z1.s, z1.s[1]
-; CHECK-NEXT: strh w8, [sp, #24]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: strh w8, [sp, #20]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z2.s[1]
-; CHECK-NEXT: strh w8, [sp, #16]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #30]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #26]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #22]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #18]
-; CHECK-NEXT: ldp q1, q0, [sp]
-; CHECK-NEXT: stp q1, q0, [x1]
-; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT: mov z17.s, z6.s[1]
+; CHECK-NEXT: mov z16.s, z4.s[1]
+; CHECK-NEXT: mov z18.s, z5.s[1]
+; CHECK-NEXT: mov z21.s, z0.s[1]
+; CHECK-NEXT: mov z19.s, z7.s[1]
+; CHECK-NEXT: mov z20.s, z2.s[1]
+; CHECK-NEXT: mov z22.s, z3.s[1]
+; CHECK-NEXT: mov z23.s, z1.s[1]
+; CHECK-NEXT: zip1 z6.h, z6.h, z17.h
+; CHECK-NEXT: zip1 z4.h, z4.h, z16.h
+; CHECK-NEXT: zip1 z5.h, z5.h, z18.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z21.h
+; CHECK-NEXT: zip1 z7.h, z7.h, z19.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z20.h
+; CHECK-NEXT: zip1 z3.h, z3.h, z22.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z23.h
+; CHECK-NEXT: zip1 z5.s, z5.s, z6.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z4.s
+; CHECK-NEXT: zip1 z2.s, z2.s, z7.s
+; CHECK-NEXT: zip1 z1.s, z1.s, z3.s
+; CHECK-NEXT: zip1 z2.d, z2.d, z5.d
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: stp q0, q2, [x1]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fcvtzs_v16f64_v16i16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
index 035c76b..ad5f91a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
@@ -8,25 +8,18 @@ target triple = "aarch64-unknown-linux-gnu"
define <2 x half> @select_v2f16(<2 x half> %op1, <2 x half> %op2, <2 x i1> %mask) {
; CHECK-LABEL: select_v2f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2
; CHECK-NEXT: mov z3.s, z2.s[1]
-; CHECK-NEXT: fmov w8, s2
+; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d2, [sp, #8]
+; CHECK-NEXT: zip1 z2.h, z2.h, z3.h
; CHECK-NEXT: lsl z2.h, z2.h, #15
; CHECK-NEXT: asr z2.h, z2.h, #15
; CHECK-NEXT: and z2.h, z2.h, #0x1
; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, #0
; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: select_v2f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
index d77473e..275d13e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
@@ -506,14 +506,10 @@ define <4 x i64> @insertelement_v4i64(ptr %a) {
define <2 x half> @insertelement_v2f16(<2 x half> %op1) {
; CHECK-LABEL: insertelement_v2f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: fmov h1, #5.00000000
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT: str h0, [sp, #8]
-; CHECK-NEXT: str h1, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: insertelement_v2f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
index 0c712a1..e595686 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
@@ -1140,18 +1140,14 @@ define void @ucvtf_v8i32_v8f64(ptr %a, ptr %b) {
define <2 x half> @ucvtf_v2i64_v2f16(<2 x i64> %op1) {
; CHECK-LABEL: ucvtf_v2i64_v2f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: mov z1.d, z0.d[1]
; CHECK-NEXT: fmov x8, d0
+; CHECK-NEXT: fmov x9, d1
; CHECK-NEXT: ucvtf h0, x8
-; CHECK-NEXT: fmov x8, d1
-; CHECK-NEXT: ucvtf h1, x8
-; CHECK-NEXT: str h0, [sp, #8]
-; CHECK-NEXT: str h1, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ucvtf h1, x9
+; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ucvtf_v2i64_v2f16:
@@ -2598,18 +2594,14 @@ define void @scvtf_v16i32_v16f64(ptr %a, ptr %b) {
define <2 x half> @scvtf_v2i64_v2f16(<2 x i64> %op1) {
; CHECK-LABEL: scvtf_v2i64_v2f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: mov z1.d, z0.d[1]
; CHECK-NEXT: fmov x8, d0
+; CHECK-NEXT: fmov x9, d1
; CHECK-NEXT: scvtf h0, x8
-; CHECK-NEXT: fmov x8, d1
-; CHECK-NEXT: scvtf h1, x8
-; CHECK-NEXT: str h0, [sp, #8]
-; CHECK-NEXT: str h1, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: scvtf h1, x9
+; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: scvtf_v2i64_v2f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
index 270f05a..6135433 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
@@ -10,25 +10,20 @@ declare void @def(ptr)
define void @alloc_v4i8(ptr %st_ptr) nounwind {
; CHECK-LABEL: alloc_v4i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #48
-; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: add x0, sp, #28
-; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
-; CHECK-NEXT: add x20, sp, #28
+; CHECK-NEXT: add x0, sp, #12
+; CHECK-NEXT: add x20, sp, #12
; CHECK-NEXT: bl def
; CHECK-NEXT: ptrue p0.b, vl2
; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x20]
; CHECK-NEXT: ptrue p0.s, vl2
-; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: mov z2.b, z0.b[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: stp w8, w9, [sp, #8]
-; CHECK-NEXT: ldr d0, [sp, #8]
+; CHECK-NEXT: zip1 z0.s, z0.s, z2.s
; CHECK-NEXT: st1b { z0.s }, p0, [x19]
-; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: alloc_v4i8:
@@ -62,32 +57,28 @@ define void @alloc_v4i8(ptr %st_ptr) nounwind {
define void @alloc_v6i8(ptr %st_ptr) nounwind {
; CHECK-LABEL: alloc_v6i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #48
-; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: add x0, sp, #24
+; CHECK-NEXT: add x0, sp, #8
; CHECK-NEXT: bl def
-; CHECK-NEXT: ldr d0, [sp, #24]
+; CHECK-NEXT: ldr d0, [sp, #8]
; CHECK-NEXT: ptrue p0.h, vl4
+; CHECK-NEXT: add x8, sp, #4
; CHECK-NEXT: ptrue p1.s, vl2
; CHECK-NEXT: mov z1.b, z0.b[3]
-; CHECK-NEXT: mov z2.b, z0.b[5]
-; CHECK-NEXT: mov z0.b, z0.b[1]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: add x8, sp, #20
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: st1b { z0.h }, p0, [x8]
-; CHECK-NEXT: ld1h { z0.s }, p1/z, [x8]
-; CHECK-NEXT: strb w9, [x19, #2]
+; CHECK-NEXT: mov z2.b, z0.b[1]
+; CHECK-NEXT: mov z0.b, z0.b[5]
+; CHECK-NEXT: zip1 z1.h, z2.h, z1.h
+; CHECK-NEXT: zip1 z1.s, z1.s, z0.s
+; CHECK-NEXT: st1b { z1.h }, p0, [x8]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x8]
; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: strb w8, [x19, #2]
+; CHECK-NEXT: fmov w8, s1
; CHECK-NEXT: strh w8, [x19]
-; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: alloc_v6i8:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
index 5f4b9dd..9055b2e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
@@ -1466,23 +1466,18 @@ define <32 x i8> @masked_load_v32i8(ptr %src, <32 x i1> %mask) {
define <2 x half> @masked_load_v2f16(ptr %src, <2 x i1> %mask) {
; CHECK-LABEL: masked_load_v2f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT: mov z1.s, z0.s[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: str wzr, [sp, #12]
+; CHECK-NEXT: fmov s1, wzr
+; CHECK-NEXT: mov z2.s, z0.s[1]
; CHECK-NEXT: ptrue p0.h, vl4
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
+; CHECK-NEXT: zip1 z0.h, z0.h, z2.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z1.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
; CHECK-NEXT: lsl z0.h, z0.h, #15
; CHECK-NEXT: asr z0.h, z0.h, #15
; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: masked_load_v2f16:
@@ -2318,33 +2313,21 @@ define <8 x float> @masked_load_v8f32(ptr %src, <8 x i1> %mask) {
; CHECK-LABEL: masked_load_v8f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: mov z1.b, z0.b[3]
; CHECK-NEXT: mov z2.b, z0.b[2]
+; CHECK-NEXT: mov x8, #4 // =0x4
; CHECK-NEXT: mov z3.b, z0.b[1]
; CHECK-NEXT: mov z4.b, z0.b[7]
-; CHECK-NEXT: strh w8, [sp, #-16]!
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z1.b, z0.b[6]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.b, z0.b[5]
-; CHECK-NEXT: mov z0.b, z0.b[4]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w9, [sp, #4]
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w9, [sp, #14]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: mov x8, #4 // =0x4
-; CHECK-NEXT: ldp d0, d1, [sp]
+; CHECK-NEXT: mov z5.b, z0.b[6]
+; CHECK-NEXT: mov z6.b, z0.b[5]
+; CHECK-NEXT: mov z7.b, z0.b[4]
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: zip1 z1.h, z2.h, z1.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z2.h, z5.h, z4.h
+; CHECK-NEXT: zip1 z3.h, z7.h, z6.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: zip1 z1.s, z3.s, z2.s
; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: uunpklo z1.s, z1.h
; CHECK-NEXT: lsl z0.s, z0.s, #31
@@ -2357,7 +2340,6 @@ define <8 x float> @masked_load_v8f32(ptr %src, <8 x i1> %mask) {
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: masked_load_v8f32:
@@ -2684,23 +2666,21 @@ define <4 x double> @masked_load_v4f64(ptr %src, <4 x i1> %mask) {
define <3 x i32> @masked_load_zext_v3i32(ptr %load_ptr, <3 x i1> %pm) {
; CHECK-LABEL: masked_load_zext_v3i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: strh w3, [sp, #12]
+; CHECK-NEXT: fmov s0, w2
+; CHECK-NEXT: fmov s1, w1
; CHECK-NEXT: adrp x8, .LCPI13_0
; CHECK-NEXT: ptrue p0.s, vl4
-; CHECK-NEXT: strh w2, [sp, #10]
-; CHECK-NEXT: ldr d0, [x8, :lo12:.LCPI13_0]
-; CHECK-NEXT: strh w1, [sp, #8]
-; CHECK-NEXT: ldr d1, [sp, #8]
-; CHECK-NEXT: and z0.d, z1.d, z0.d
+; CHECK-NEXT: zip1 z0.h, z1.h, z0.h
+; CHECK-NEXT: fmov s1, w3
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI13_0]
+; CHECK-NEXT: and z0.d, z0.d, z1.d
; CHECK-NEXT: lsl z0.h, z0.h, #15
; CHECK-NEXT: asr z0.h, z0.h, #15
; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: masked_load_zext_v3i32:
@@ -2759,23 +2739,21 @@ define <3 x i32> @masked_load_zext_v3i32(ptr %load_ptr, <3 x i1> %pm) {
define <3 x i32> @masked_load_sext_v3i32(ptr %load_ptr, <3 x i1> %pm) {
; CHECK-LABEL: masked_load_sext_v3i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: strh w3, [sp, #12]
+; CHECK-NEXT: fmov s0, w2
+; CHECK-NEXT: fmov s1, w1
; CHECK-NEXT: adrp x8, .LCPI14_0
; CHECK-NEXT: ptrue p0.s, vl4
-; CHECK-NEXT: strh w2, [sp, #10]
-; CHECK-NEXT: ldr d0, [x8, :lo12:.LCPI14_0]
-; CHECK-NEXT: strh w1, [sp, #8]
-; CHECK-NEXT: ldr d1, [sp, #8]
-; CHECK-NEXT: and z0.d, z1.d, z0.d
+; CHECK-NEXT: zip1 z0.h, z1.h, z0.h
+; CHECK-NEXT: fmov s1, w3
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT: and z0.d, z0.d, z1.d
; CHECK-NEXT: lsl z0.h, z0.h, #15
; CHECK-NEXT: asr z0.h, z0.h, #15
; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: masked_load_sext_v3i32:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
index 0c3411e..265480b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
@@ -589,23 +589,18 @@ define void @masked_store_v32i8(ptr %dst, <32 x i1> %mask) {
define void @masked_store_v2f16(ptr %dst, <2 x i1> %mask) {
; CHECK-LABEL: masked_store_v2f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT: mov z1.s, z0.s[1]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: str wzr, [sp, #12]
+; CHECK-NEXT: fmov s1, wzr
+; CHECK-NEXT: mov z2.s, z0.s[1]
; CHECK-NEXT: ptrue p0.h, vl4
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
+; CHECK-NEXT: zip1 z0.h, z0.h, z2.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z1.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
; CHECK-NEXT: lsl z0.h, z0.h, #15
; CHECK-NEXT: asr z0.h, z0.h, #15
; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
; CHECK-NEXT: mov z0.h, #0 // =0x0
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: masked_store_v2f16:
@@ -1014,48 +1009,33 @@ define void @masked_store_v4f32(ptr %dst, <4 x i1> %mask) {
define void @masked_store_v8f32(ptr %dst, <8 x i1> %mask) {
; CHECK-LABEL: masked_store_v8f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov z1.b, z0.b[7]
; CHECK-NEXT: mov z2.b, z0.b[6]
+; CHECK-NEXT: mov x8, #4 // =0x4
; CHECK-NEXT: mov z3.b, z0.b[5]
; CHECK-NEXT: mov z4.b, z0.b[4]
+; CHECK-NEXT: mov z5.b, z0.b[3]
+; CHECK-NEXT: mov z6.b, z0.b[2]
+; CHECK-NEXT: mov z7.b, z0.b[1]
; CHECK-NEXT: ptrue p0.s, vl4
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.b, z0.b[3]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z0.b[2]
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: mov z4.b, z0.b[1]
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: mov x8, #4 // =0x4
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ldr d1, [sp, #8]
+; CHECK-NEXT: zip1 z1.h, z2.h, z1.h
+; CHECK-NEXT: zip1 z2.h, z4.h, z3.h
+; CHECK-NEXT: zip1 z3.h, z6.h, z5.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z7.h
+; CHECK-NEXT: zip1 z1.s, z2.s, z1.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z3.s
; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: lsl z0.s, z0.s, #31
; CHECK-NEXT: asr z1.s, z1.s, #31
+; CHECK-NEXT: asr z0.s, z0.s, #31
; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, #0
; CHECK-NEXT: mov z1.s, #0 // =0x0
-; CHECK-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: ldr d0, [sp]
-; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: lsl z0.s, z0.s, #31
-; CHECK-NEXT: asr z0.s, z0.s, #31
; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
+; CHECK-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2]
; CHECK-NEXT: st1w { z1.s }, p0, [x0]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: masked_store_v8f32:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
index b91f813..8b296d9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
@@ -9,65 +9,44 @@ target triple = "aarch64-unknown-linux-gnu"
define void @zip1_v32i8(ptr %a, ptr %b) {
; CHECK-LABEL: zip1_v32i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0, #16]
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1, #16]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: mov z2.b, z0.b[15]
-; CHECK-NEXT: mov z3.b, z0.b[14]
-; CHECK-NEXT: mov z4.b, z0.b[13]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov z3.b, z0.b[11]
-; CHECK-NEXT: mov z2.b, z0.b[12]
-; CHECK-NEXT: strb w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z0.b[10]
-; CHECK-NEXT: strb w9, [sp, #12]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.b, z0.b[9]
-; CHECK-NEXT: strb w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z0.b[8]
-; CHECK-NEXT: strb w9, [sp, #8]
+; CHECK-NEXT: mov z4.b, z0.b[14]
+; CHECK-NEXT: mov z6.b, z0.b[13]
+; CHECK-NEXT: mov z3.b, z1.b[15]
+; CHECK-NEXT: mov z5.b, z1.b[14]
+; CHECK-NEXT: mov z7.b, z1.b[13]
+; CHECK-NEXT: mov z16.b, z0.b[12]
+; CHECK-NEXT: mov z17.b, z1.b[12]
+; CHECK-NEXT: mov z18.b, z0.b[11]
+; CHECK-NEXT: mov z19.b, z1.b[11]
+; CHECK-NEXT: mov z20.b, z0.b[10]
+; CHECK-NEXT: mov z21.b, z1.b[10]
+; CHECK-NEXT: mov z22.b, z0.b[9]
+; CHECK-NEXT: mov z23.b, z1.b[9]
+; CHECK-NEXT: mov z24.b, z0.b[8]
+; CHECK-NEXT: mov z25.b, z1.b[8]
+; CHECK-NEXT: zip1 z2.b, z2.b, z3.b
+; CHECK-NEXT: zip1 z3.b, z4.b, z5.b
+; CHECK-NEXT: zip1 z4.b, z6.b, z7.b
+; CHECK-NEXT: zip1 z5.b, z16.b, z17.b
+; CHECK-NEXT: zip1 z6.b, z18.b, z19.b
+; CHECK-NEXT: zip1 z7.b, z20.b, z21.b
+; CHECK-NEXT: zip1 z16.b, z22.b, z23.b
; CHECK-NEXT: zip1 z0.b, z0.b, z1.b
-; CHECK-NEXT: strb w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[15]
-; CHECK-NEXT: strb w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[14]
-; CHECK-NEXT: strb w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z1.b[13]
-; CHECK-NEXT: strb w8, [sp]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[12]
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[11]
-; CHECK-NEXT: strb w8, [sp, #13]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z1.b[10]
-; CHECK-NEXT: strb w8, [sp, #11]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[9]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: strb w8, [sp, #9]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[8]
-; CHECK-NEXT: strb w9, [sp, #5]
-; CHECK-NEXT: strb w8, [sp, #7]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: strb w8, [sp, #3]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strb w8, [sp, #1]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z17.b, z24.b, z25.b
+; CHECK-NEXT: zip1 z2.h, z3.h, z2.h
+; CHECK-NEXT: zip1 z3.h, z5.h, z4.h
+; CHECK-NEXT: zip1 z4.h, z7.h, z6.h
; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: zip1 z5.h, z17.h, z16.h
+; CHECK-NEXT: zip1 z2.s, z3.s, z2.s
+; CHECK-NEXT: zip1 z3.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: str q1, [x0, #16]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip1_v32i8:
@@ -159,123 +138,97 @@ define void @zip1_v32i8(ptr %a, ptr %b) {
define void @zip_v32i16(ptr %a, ptr %b) {
; CHECK-LABEL: zip_v32i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
+; CHECK-NEXT: stp d15, d14, [sp, #-64]! // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: ldp q1, q3, [x1]
-; CHECK-NEXT: ldp q0, q4, [x0]
-; CHECK-NEXT: ldp q2, q5, [x0, #32]
-; CHECK-NEXT: mov z16.h, z3.h[7]
-; CHECK-NEXT: mov z18.h, z3.h[6]
-; CHECK-NEXT: mov z17.h, z4.h[7]
-; CHECK-NEXT: ldp q6, q7, [x1, #32]
-; CHECK-NEXT: mov z19.h, z4.h[6]
-; CHECK-NEXT: fmov w8, s16
+; CHECK-NEXT: .cfi_offset b8, -8
+; CHECK-NEXT: .cfi_offset b9, -16
+; CHECK-NEXT: .cfi_offset b10, -24
+; CHECK-NEXT: .cfi_offset b11, -32
+; CHECK-NEXT: .cfi_offset b12, -40
+; CHECK-NEXT: .cfi_offset b13, -48
+; CHECK-NEXT: .cfi_offset b14, -56
+; CHECK-NEXT: .cfi_offset b15, -64
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: ldp q2, q3, [x1]
+; CHECK-NEXT: mov z5.h, z1.h[7]
+; CHECK-NEXT: mov z7.h, z1.h[6]
+; CHECK-NEXT: mov z17.h, z1.h[5]
+; CHECK-NEXT: mov z4.h, z3.h[7]
+; CHECK-NEXT: mov z6.h, z3.h[6]
; CHECK-NEXT: mov z16.h, z3.h[5]
-; CHECK-NEXT: fmov w9, s17
-; CHECK-NEXT: mov z17.h, z4.h[5]
-; CHECK-NEXT: mov z20.h, z7.h[6]
-; CHECK-NEXT: strh w8, [sp, #30]
-; CHECK-NEXT: fmov w8, s18
+; CHECK-NEXT: mov z20.h, z2.h[7]
+; CHECK-NEXT: mov z21.h, z0.h[7]
; CHECK-NEXT: mov z18.h, z3.h[4]
-; CHECK-NEXT: strh w9, [sp, #28]
-; CHECK-NEXT: fmov w9, s19
-; CHECK-NEXT: mov z19.h, z5.h[7]
-; CHECK-NEXT: zip1 z3.h, z4.h, z3.h
-; CHECK-NEXT: strh w8, [sp, #26]
-; CHECK-NEXT: fmov w8, s16
-; CHECK-NEXT: mov z16.h, z4.h[4]
-; CHECK-NEXT: strh w9, [sp, #24]
-; CHECK-NEXT: zip1 z4.h, z5.h, z7.h
-; CHECK-NEXT: strh w8, [sp, #22]
-; CHECK-NEXT: fmov w8, s17
-; CHECK-NEXT: mov z17.h, z1.h[7]
-; CHECK-NEXT: add z3.h, z3.h, z4.h
-; CHECK-NEXT: strh w8, [sp, #20]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z0.h[7]
-; CHECK-NEXT: strh w8, [sp, #18]
-; CHECK-NEXT: fmov w8, s16
-; CHECK-NEXT: mov z16.h, z1.h[6]
-; CHECK-NEXT: strh w8, [sp, #16]
-; CHECK-NEXT: fmov w8, s17
-; CHECK-NEXT: mov z17.h, z0.h[6]
-; CHECK-NEXT: strh w8, [sp, #62]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z1.h[5]
-; CHECK-NEXT: strh w8, [sp, #60]
-; CHECK-NEXT: fmov w8, s16
-; CHECK-NEXT: mov z16.h, z0.h[5]
-; CHECK-NEXT: strh w8, [sp, #58]
-; CHECK-NEXT: fmov w8, s17
-; CHECK-NEXT: mov z17.h, z1.h[4]
-; CHECK-NEXT: strh w8, [sp, #56]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z0.h[4]
-; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
-; CHECK-NEXT: zip1 z1.h, z2.h, z6.h
-; CHECK-NEXT: strh w8, [sp, #54]
-; CHECK-NEXT: fmov w8, s16
-; CHECK-NEXT: ldr q16, [sp, #16]
-; CHECK-NEXT: add z0.h, z0.h, z1.h
-; CHECK-NEXT: strh w8, [sp, #52]
-; CHECK-NEXT: fmov w8, s17
-; CHECK-NEXT: strh w8, [sp, #50]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z7.h[7]
-; CHECK-NEXT: strh w8, [sp, #48]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z5.h[6]
-; CHECK-NEXT: ldr q17, [sp, #48]
-; CHECK-NEXT: strh w8, [sp, #46]
-; CHECK-NEXT: fmov w8, s19
-; CHECK-NEXT: mov z19.h, z7.h[5]
-; CHECK-NEXT: strh w8, [sp, #44]
-; CHECK-NEXT: fmov w8, s20
-; CHECK-NEXT: mov z20.h, z5.h[5]
-; CHECK-NEXT: strh w8, [sp, #42]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z7.h[4]
-; CHECK-NEXT: strh w8, [sp, #40]
-; CHECK-NEXT: fmov w8, s19
-; CHECK-NEXT: mov z19.h, z5.h[4]
-; CHECK-NEXT: strh w8, [sp, #38]
-; CHECK-NEXT: fmov w8, s20
-; CHECK-NEXT: mov z20.h, z6.h[7]
-; CHECK-NEXT: strh w8, [sp, #36]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z2.h[7]
-; CHECK-NEXT: strh w8, [sp, #34]
-; CHECK-NEXT: fmov w8, s19
-; CHECK-NEXT: mov z19.h, z6.h[6]
-; CHECK-NEXT: strh w8, [sp, #32]
-; CHECK-NEXT: fmov w8, s20
-; CHECK-NEXT: mov z20.h, z2.h[6]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z6.h[5]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s19
-; CHECK-NEXT: mov z19.h, z2.h[5]
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s20
-; CHECK-NEXT: mov z20.h, z6.h[4]
-; CHECK-NEXT: fmov w9, s19
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: mov z18.h, z2.h[4]
-; CHECK-NEXT: strh w9, [sp, #4]
-; CHECK-NEXT: ldr q2, [sp, #32]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s20
-; CHECK-NEXT: fmov w9, s18
-; CHECK-NEXT: add z2.h, z16.h, z2.h
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: ldr q4, [sp]
-; CHECK-NEXT: stp q3, q2, [x0, #32]
-; CHECK-NEXT: add z1.h, z17.h, z4.h
-; CHECK-NEXT: stp q0, q1, [x0]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: mov z19.h, z1.h[4]
+; CHECK-NEXT: mov z22.h, z2.h[6]
+; CHECK-NEXT: mov z23.h, z0.h[6]
+; CHECK-NEXT: zip1 z24.h, z5.h, z4.h
+; CHECK-NEXT: zip1 z25.h, z7.h, z6.h
+; CHECK-NEXT: zip1 z17.h, z17.h, z16.h
+; CHECK-NEXT: ldp q4, q6, [x0, #32]
+; CHECK-NEXT: zip1 z16.h, z21.h, z20.h
+; CHECK-NEXT: ldp q5, q7, [x1, #32]
+; CHECK-NEXT: zip1 z18.h, z19.h, z18.h
+; CHECK-NEXT: zip1 z19.s, z25.s, z24.s
+; CHECK-NEXT: zip1 z22.h, z23.h, z22.h
+; CHECK-NEXT: mov z23.h, z2.h[5]
+; CHECK-NEXT: mov z21.h, z6.h[7]
+; CHECK-NEXT: mov z24.h, z0.h[5]
+; CHECK-NEXT: mov z25.h, z2.h[4]
+; CHECK-NEXT: mov z20.h, z7.h[7]
+; CHECK-NEXT: mov z26.h, z0.h[4]
+; CHECK-NEXT: mov z27.h, z6.h[6]
+; CHECK-NEXT: mov z28.h, z7.h[5]
+; CHECK-NEXT: mov z29.h, z6.h[5]
+; CHECK-NEXT: mov z30.h, z7.h[4]
+; CHECK-NEXT: mov z31.h, z6.h[4]
+; CHECK-NEXT: mov z8.h, z5.h[7]
+; CHECK-NEXT: mov z9.h, z4.h[7]
+; CHECK-NEXT: zip1 z20.h, z21.h, z20.h
+; CHECK-NEXT: mov z21.h, z7.h[6]
+; CHECK-NEXT: mov z10.h, z5.h[6]
+; CHECK-NEXT: mov z11.h, z4.h[6]
+; CHECK-NEXT: mov z12.h, z5.h[5]
+; CHECK-NEXT: mov z13.h, z4.h[5]
+; CHECK-NEXT: mov z14.h, z5.h[4]
+; CHECK-NEXT: mov z15.h, z4.h[4]
+; CHECK-NEXT: zip1 z23.h, z24.h, z23.h
+; CHECK-NEXT: zip1 z21.h, z27.h, z21.h
+; CHECK-NEXT: zip1 z27.h, z29.h, z28.h
+; CHECK-NEXT: zip1 z28.h, z31.h, z30.h
+; CHECK-NEXT: zip1 z24.h, z26.h, z25.h
+; CHECK-NEXT: zip1 z25.h, z9.h, z8.h
+; CHECK-NEXT: zip1 z26.h, z11.h, z10.h
+; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: zip1 z29.h, z13.h, z12.h
+; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: zip1 z30.h, z15.h, z14.h
+; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: zip1 z17.s, z18.s, z17.s
+; CHECK-NEXT: zip1 z18.s, z21.s, z20.s
+; CHECK-NEXT: zip1 z20.s, z28.s, z27.s
+; CHECK-NEXT: zip1 z16.s, z22.s, z16.s
+; CHECK-NEXT: zip1 z21.s, z24.s, z23.s
+; CHECK-NEXT: zip1 z1.h, z1.h, z3.h
+; CHECK-NEXT: zip1 z3.s, z26.s, z25.s
+; CHECK-NEXT: zip1 z22.s, z30.s, z29.s
+; CHECK-NEXT: zip1 z6.h, z6.h, z7.h
+; CHECK-NEXT: zip1 z7.d, z17.d, z19.d
+; CHECK-NEXT: zip1 z17.d, z20.d, z18.d
+; CHECK-NEXT: zip1 z0.h, z0.h, z2.h
+; CHECK-NEXT: zip1 z2.h, z4.h, z5.h
+; CHECK-NEXT: zip1 z4.d, z21.d, z16.d
+; CHECK-NEXT: zip1 z3.d, z22.d, z3.d
+; CHECK-NEXT: add z1.h, z1.h, z6.h
+; CHECK-NEXT: add z5.h, z7.h, z17.h
+; CHECK-NEXT: add z0.h, z0.h, z2.h
+; CHECK-NEXT: add z2.h, z4.h, z3.h
+; CHECK-NEXT: stp q1, q5, [x0, #32]
+; CHECK-NEXT: stp q0, q2, [x0]
+; CHECK-NEXT: ldp d15, d14, [sp], #64 // 16-byte Folded Reload
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip_v32i16:
@@ -436,41 +389,28 @@ define void @zip_v32i16(ptr %a, ptr %b) {
define void @zip1_v16i16(ptr %a, ptr %b) {
; CHECK-LABEL: zip1_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0, #16]
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1, #16]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: mov z2.h, z0.h[7]
-; CHECK-NEXT: mov z3.h, z0.h[6]
-; CHECK-NEXT: mov z4.h, z0.h[5]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.h, z0.h[4]
-; CHECK-NEXT: fmov w9, s3
+; CHECK-NEXT: mov z4.h, z0.h[6]
+; CHECK-NEXT: mov z6.h, z0.h[5]
; CHECK-NEXT: mov z3.h, z1.h[7]
+; CHECK-NEXT: mov z5.h, z1.h[6]
+; CHECK-NEXT: mov z7.h, z1.h[5]
+; CHECK-NEXT: mov z16.h, z0.h[4]
+; CHECK-NEXT: mov z17.h, z1.h[4]
; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z1.h[6]
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.h, z1.h[5]
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.h, z1.h[4]
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w9, [sp, #10]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z2.h, z2.h, z3.h
+; CHECK-NEXT: zip1 z3.h, z4.h, z5.h
+; CHECK-NEXT: zip1 z4.h, z6.h, z7.h
+; CHECK-NEXT: zip1 z5.h, z16.h, z17.h
; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: zip1 z2.s, z3.s, z2.s
+; CHECK-NEXT: zip1 z3.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: str q1, [x0, #16]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip1_v16i16:
@@ -530,8 +470,6 @@ define void @zip1_v16i16(ptr %a, ptr %b) {
define void @zip1_v8i32(ptr %a, ptr %b) {
; CHECK-LABEL: zip1_v8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0, #16]
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1, #16]
@@ -539,18 +477,13 @@ define void @zip1_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: mov z2.s, z0.s[3]
; CHECK-NEXT: mov z4.s, z0.s[2]
; CHECK-NEXT: mov z3.s, z1.s[3]
+; CHECK-NEXT: mov z5.s, z1.s[2]
; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z1.s[2]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: stp w8, w9, [sp, #8]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: stp w8, w9, [sp]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z2.s, z2.s, z3.s
+; CHECK-NEXT: zip1 z3.s, z4.s, z5.s
; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: str q1, [x0, #16]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip1_v8i32:
@@ -636,25 +569,18 @@ define void @zip_v4f64(ptr %a, ptr %b) {
define void @zip_v4i32(ptr %a, ptr %b) {
; CHECK-LABEL: zip_v4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x1]
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: mov z2.s, z0.s[3]
; CHECK-NEXT: mov z3.s, z1.s[3]
; CHECK-NEXT: mov z4.s, z0.s[2]
+; CHECK-NEXT: mov z5.s, z1.s[2]
; CHECK-NEXT: zip1 z0.s, z1.s, z0.s
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z1.s[2]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: stp w9, w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: stp w9, w8, [sp]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z2.s, z3.s, z2.s
+; CHECK-NEXT: zip1 z3.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: add z0.s, z0.s, z1.s
; CHECK-NEXT: str q0, [x0]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip_v4i32:
@@ -1209,65 +1135,44 @@ define void @trn_v8i32_undef(ptr %a) {
define void @zip2_v32i8(ptr %a, ptr %b) #0{
; CHECK-LABEL: zip2_v32i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q0, [x0, #16]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: ldr q1, [x1, #16]
; CHECK-NEXT: mov z2.b, z0.b[15]
-; CHECK-NEXT: mov z3.b, z0.b[14]
-; CHECK-NEXT: mov z4.b, z0.b[13]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov z3.b, z0.b[11]
-; CHECK-NEXT: mov z2.b, z0.b[12]
-; CHECK-NEXT: strb w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z0.b[10]
-; CHECK-NEXT: strb w9, [sp, #12]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.b, z0.b[9]
-; CHECK-NEXT: strb w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z0.b[8]
-; CHECK-NEXT: strb w9, [sp, #8]
+; CHECK-NEXT: mov z4.b, z0.b[14]
+; CHECK-NEXT: mov z6.b, z0.b[13]
+; CHECK-NEXT: mov z3.b, z1.b[15]
+; CHECK-NEXT: mov z5.b, z1.b[14]
+; CHECK-NEXT: mov z7.b, z1.b[13]
+; CHECK-NEXT: mov z16.b, z0.b[12]
+; CHECK-NEXT: mov z17.b, z1.b[12]
+; CHECK-NEXT: mov z18.b, z0.b[11]
+; CHECK-NEXT: mov z19.b, z1.b[11]
+; CHECK-NEXT: mov z20.b, z0.b[10]
+; CHECK-NEXT: mov z21.b, z1.b[10]
+; CHECK-NEXT: mov z22.b, z0.b[9]
+; CHECK-NEXT: mov z23.b, z1.b[9]
+; CHECK-NEXT: mov z24.b, z0.b[8]
+; CHECK-NEXT: mov z25.b, z1.b[8]
+; CHECK-NEXT: zip1 z2.b, z2.b, z3.b
+; CHECK-NEXT: zip1 z3.b, z4.b, z5.b
+; CHECK-NEXT: zip1 z4.b, z6.b, z7.b
+; CHECK-NEXT: zip1 z5.b, z16.b, z17.b
+; CHECK-NEXT: zip1 z6.b, z18.b, z19.b
+; CHECK-NEXT: zip1 z7.b, z20.b, z21.b
+; CHECK-NEXT: zip1 z16.b, z22.b, z23.b
; CHECK-NEXT: zip1 z0.b, z0.b, z1.b
-; CHECK-NEXT: strb w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[15]
-; CHECK-NEXT: strb w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[14]
-; CHECK-NEXT: strb w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z1.b[13]
-; CHECK-NEXT: strb w8, [sp]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[12]
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[11]
-; CHECK-NEXT: strb w8, [sp, #13]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z1.b[10]
-; CHECK-NEXT: strb w8, [sp, #11]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[9]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: strb w8, [sp, #9]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[8]
-; CHECK-NEXT: strb w9, [sp, #5]
-; CHECK-NEXT: strb w8, [sp, #7]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: strb w8, [sp, #3]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strb w8, [sp, #1]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z17.b, z24.b, z25.b
+; CHECK-NEXT: zip1 z2.h, z3.h, z2.h
+; CHECK-NEXT: zip1 z3.h, z5.h, z4.h
+; CHECK-NEXT: zip1 z4.h, z7.h, z6.h
; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: zip1 z5.h, z17.h, z16.h
+; CHECK-NEXT: zip1 z2.s, z3.s, z2.s
+; CHECK-NEXT: zip1 z3.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: str q1, [x0, #16]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip2_v32i8:
@@ -1359,41 +1264,28 @@ define void @zip2_v32i8(ptr %a, ptr %b) #0{
define void @zip2_v16i16(ptr %a, ptr %b) #0{
; CHECK-LABEL: zip2_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q0, [x0, #16]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: ldr q1, [x1, #16]
; CHECK-NEXT: mov z2.h, z0.h[7]
-; CHECK-NEXT: mov z3.h, z0.h[6]
-; CHECK-NEXT: mov z4.h, z0.h[5]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.h, z0.h[4]
-; CHECK-NEXT: fmov w9, s3
+; CHECK-NEXT: mov z4.h, z0.h[6]
+; CHECK-NEXT: mov z6.h, z0.h[5]
; CHECK-NEXT: mov z3.h, z1.h[7]
+; CHECK-NEXT: mov z5.h, z1.h[6]
+; CHECK-NEXT: mov z7.h, z1.h[5]
+; CHECK-NEXT: mov z16.h, z0.h[4]
+; CHECK-NEXT: mov z17.h, z1.h[4]
; CHECK-NEXT: zip1 z0.h, z0.h, z1.h
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z1.h[6]
-; CHECK-NEXT: strh w9, [sp, #8]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.h, z1.h[5]
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.h, z1.h[4]
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w9, [sp, #10]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z2.h, z2.h, z3.h
+; CHECK-NEXT: zip1 z3.h, z4.h, z5.h
+; CHECK-NEXT: zip1 z4.h, z6.h, z7.h
+; CHECK-NEXT: zip1 z5.h, z16.h, z17.h
; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: zip1 z2.s, z3.s, z2.s
+; CHECK-NEXT: zip1 z3.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: str q1, [x0, #16]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip2_v16i16:
@@ -1453,8 +1345,6 @@ define void @zip2_v16i16(ptr %a, ptr %b) #0{
define void @zip2_v8i32(ptr %a, ptr %b) #0{
; CHECK-LABEL: zip2_v8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q0, [x0, #16]
; CHECK-NEXT: ldr q1, [x1]
@@ -1462,18 +1352,13 @@ define void @zip2_v8i32(ptr %a, ptr %b) #0{
; CHECK-NEXT: mov z2.s, z0.s[3]
; CHECK-NEXT: mov z4.s, z0.s[2]
; CHECK-NEXT: mov z3.s, z1.s[3]
+; CHECK-NEXT: mov z5.s, z1.s[2]
; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.s, z1.s[2]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: stp w8, w9, [sp, #8]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: stp w8, w9, [sp]
-; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: zip1 z2.s, z2.s, z3.s
+; CHECK-NEXT: zip1 z3.s, z4.s, z5.s
; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: str q1, [x0, #16]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip2_v8i32:
@@ -1547,197 +1432,139 @@ define void @zip2_v8i32_undef(ptr %a) #0{
define void @uzp_v32i8(ptr %a, ptr %b) #0{
; CHECK-LABEL: uzp_v32i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: ldp q2, q3, [x0]
-; CHECK-NEXT: ldp q0, q1, [x1]
-; CHECK-NEXT: mov z4.b, z3.b[14]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z6.b, z3.b[10]
-; CHECK-NEXT: mov z5.b, z3.b[12]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z7.b, z3.b[8]
-; CHECK-NEXT: mov z17.b, z3.b[9]
-; CHECK-NEXT: mov z18.b, z3.b[7]
-; CHECK-NEXT: mov z16.b, z3.b[11]
-; CHECK-NEXT: strb w8, [sp, #40]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z3.b[6]
-; CHECK-NEXT: strb w9, [sp, #32]
-; CHECK-NEXT: fmov w9, s5
-; CHECK-NEXT: mov z5.b, z3.b[4]
-; CHECK-NEXT: strb w8, [sp, #47]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z3.b[2]
-; CHECK-NEXT: strb w9, [sp, #46]
-; CHECK-NEXT: fmov w9, s7
-; CHECK-NEXT: mov z7.b, z2.b[14]
-; CHECK-NEXT: strb w8, [sp, #45]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z2.b[12]
-; CHECK-NEXT: strb w9, [sp, #44]
-; CHECK-NEXT: fmov w9, s16
-; CHECK-NEXT: mov z16.b, z2.b[11]
-; CHECK-NEXT: strb w8, [sp, #43]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.b, z2.b[10]
-; CHECK-NEXT: strb w9, [sp, #61]
-; CHECK-NEXT: fmov w9, s16
-; CHECK-NEXT: strb w8, [sp, #42]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z2.b[8]
-; CHECK-NEXT: strb w9, [sp, #53]
-; CHECK-NEXT: strb w8, [sp, #41]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z2.b[6]
-; CHECK-NEXT: strb w8, [sp, #39]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z2.b[4]
-; CHECK-NEXT: strb w8, [sp, #38]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.b, z2.b[2]
-; CHECK-NEXT: strb w8, [sp, #37]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z1.b[10]
-; CHECK-NEXT: strb w8, [sp, #36]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z1.b[8]
-; CHECK-NEXT: strb w8, [sp, #35]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[14]
-; CHECK-NEXT: strb w8, [sp, #34]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.b, z1.b[12]
-; CHECK-NEXT: strb w8, [sp, #33]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strb w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strb w8, [sp]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z1.b[6]
-; CHECK-NEXT: strb w8, [sp, #15]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.b, z1.b[4]
-; CHECK-NEXT: strb w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z1.b[2]
-; CHECK-NEXT: strb w8, [sp, #13]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z0.b[14]
-; CHECK-NEXT: strb w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z0.b[12]
-; CHECK-NEXT: strb w8, [sp, #11]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.b, z0.b[10]
-; CHECK-NEXT: strb w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z0.b[8]
-; CHECK-NEXT: strb w8, [sp, #9]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z0.b[6]
-; CHECK-NEXT: strb w8, [sp, #7]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.b, z0.b[4]
-; CHECK-NEXT: strb w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.b, z0.b[2]
-; CHECK-NEXT: strb w8, [sp, #5]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z3.b[15]
-; CHECK-NEXT: strb w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z3.b[13]
-; CHECK-NEXT: strb w8, [sp, #3]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: ldr q4, [sp, #32]
-; CHECK-NEXT: strb w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: strb w8, [sp, #1]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z3.b[5]
-; CHECK-NEXT: mov z3.b, z3.b[3]
-; CHECK-NEXT: ldr q5, [sp]
-; CHECK-NEXT: strb w8, [sp, #63]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z2.b[13]
-; CHECK-NEXT: strb w8, [sp, #62]
-; CHECK-NEXT: fmov w8, s17
-; CHECK-NEXT: strb w8, [sp, #60]
-; CHECK-NEXT: fmov w8, s18
-; CHECK-NEXT: strb w8, [sp, #59]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z2.b[9]
-; CHECK-NEXT: strb w8, [sp, #58]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z2.b[5]
-; CHECK-NEXT: strb w8, [sp, #57]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z2.b[3]
+; CHECK-NEXT: stp d13, d12, [sp, #-48]! // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset b8, -8
+; CHECK-NEXT: .cfi_offset b9, -16
+; CHECK-NEXT: .cfi_offset b10, -24
+; CHECK-NEXT: .cfi_offset b11, -32
+; CHECK-NEXT: .cfi_offset b12, -40
+; CHECK-NEXT: .cfi_offset b13, -48
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: mov z2.b, z1.b[14]
+; CHECK-NEXT: mov z3.b, z1.b[12]
+; CHECK-NEXT: mov z4.b, z1.b[10]
+; CHECK-NEXT: mov z5.b, z1.b[8]
+; CHECK-NEXT: mov z6.b, z1.b[6]
+; CHECK-NEXT: mov z7.b, z1.b[4]
+; CHECK-NEXT: mov z16.b, z1.b[2]
+; CHECK-NEXT: mov z18.b, z0.b[14]
+; CHECK-NEXT: mov z19.b, z0.b[12]
+; CHECK-NEXT: zip1 z3.b, z3.b, z2.b
+; CHECK-NEXT: ldp q2, q17, [x1]
+; CHECK-NEXT: mov z20.b, z0.b[10]
+; CHECK-NEXT: zip1 z4.b, z5.b, z4.b
+; CHECK-NEXT: zip1 z5.b, z7.b, z6.b
+; CHECK-NEXT: zip1 z6.b, z1.b, z16.b
+; CHECK-NEXT: mov z7.b, z0.b[8]
+; CHECK-NEXT: mov z16.b, z0.b[6]
+; CHECK-NEXT: mov z21.b, z0.b[4]
+; CHECK-NEXT: mov z22.b, z0.b[2]
+; CHECK-NEXT: mov z23.b, z17.b[14]
+; CHECK-NEXT: mov z24.b, z17.b[12]
+; CHECK-NEXT: mov z25.b, z17.b[10]
+; CHECK-NEXT: mov z26.b, z17.b[8]
+; CHECK-NEXT: mov z27.b, z17.b[6]
+; CHECK-NEXT: mov z28.b, z17.b[4]
+; CHECK-NEXT: mov z29.b, z17.b[2]
+; CHECK-NEXT: zip1 z18.b, z19.b, z18.b
+; CHECK-NEXT: zip1 z7.b, z7.b, z20.b
+; CHECK-NEXT: zip1 z16.b, z21.b, z16.b
+; CHECK-NEXT: zip1 z19.b, z0.b, z22.b
+; CHECK-NEXT: zip1 z20.b, z24.b, z23.b
+; CHECK-NEXT: zip1 z21.b, z26.b, z25.b
+; CHECK-NEXT: zip1 z22.b, z28.b, z27.b
+; CHECK-NEXT: mov z24.b, z2.b[14]
+; CHECK-NEXT: mov z25.b, z2.b[12]
+; CHECK-NEXT: mov z26.b, z2.b[10]
+; CHECK-NEXT: mov z27.b, z2.b[8]
+; CHECK-NEXT: zip1 z23.b, z17.b, z29.b
+; CHECK-NEXT: zip1 z3.h, z4.h, z3.h
+; CHECK-NEXT: zip1 z4.h, z6.h, z5.h
+; CHECK-NEXT: zip1 z5.h, z7.h, z18.h
+; CHECK-NEXT: zip1 z6.h, z19.h, z16.h
+; CHECK-NEXT: zip1 z7.h, z21.h, z20.h
+; CHECK-NEXT: zip1 z18.b, z25.b, z24.b
+; CHECK-NEXT: zip1 z19.b, z27.b, z26.b
+; CHECK-NEXT: mov z20.b, z2.b[6]
+; CHECK-NEXT: mov z21.b, z2.b[4]
+; CHECK-NEXT: mov z29.b, z17.b[3]
+; CHECK-NEXT: mov z30.b, z17.b[1]
+; CHECK-NEXT: mov z31.b, z2.b[15]
+; CHECK-NEXT: mov z8.b, z2.b[13]
+; CHECK-NEXT: zip1 z16.h, z23.h, z22.h
+; CHECK-NEXT: mov z22.b, z2.b[2]
+; CHECK-NEXT: mov z23.b, z17.b[15]
+; CHECK-NEXT: mov z24.b, z17.b[13]
+; CHECK-NEXT: mov z25.b, z17.b[11]
+; CHECK-NEXT: mov z26.b, z17.b[9]
+; CHECK-NEXT: mov z27.b, z17.b[7]
+; CHECK-NEXT: mov z28.b, z17.b[5]
+; CHECK-NEXT: zip1 z17.h, z19.h, z18.h
+; CHECK-NEXT: zip1 z21.b, z21.b, z20.b
+; CHECK-NEXT: zip1 z19.b, z30.b, z29.b
+; CHECK-NEXT: zip1 z20.b, z8.b, z31.b
+; CHECK-NEXT: mov z29.b, z1.b[15]
+; CHECK-NEXT: mov z30.b, z1.b[13]
+; CHECK-NEXT: mov z31.b, z1.b[11]
+; CHECK-NEXT: mov z8.b, z1.b[9]
+; CHECK-NEXT: zip1 z22.b, z2.b, z22.b
+; CHECK-NEXT: zip1 z23.b, z24.b, z23.b
+; CHECK-NEXT: zip1 z24.b, z26.b, z25.b
+; CHECK-NEXT: zip1 z18.b, z28.b, z27.b
+; CHECK-NEXT: mov z25.b, z2.b[11]
+; CHECK-NEXT: mov z26.b, z2.b[9]
+; CHECK-NEXT: mov z27.b, z2.b[7]
+; CHECK-NEXT: mov z28.b, z2.b[5]
+; CHECK-NEXT: mov z9.b, z1.b[7]
+; CHECK-NEXT: mov z10.b, z1.b[5]
+; CHECK-NEXT: mov z1.b, z1.b[3]
+; CHECK-NEXT: mov z11.b, z0.b[11]
+; CHECK-NEXT: mov z12.b, z0.b[9]
+; CHECK-NEXT: zip1 z29.b, z30.b, z29.b
+; CHECK-NEXT: mov z30.b, z0.b[3]
+; CHECK-NEXT: mov z13.b, z0.b[1]
+; CHECK-NEXT: zip1 z31.b, z8.b, z31.b
+; CHECK-NEXT: mov z8.b, z2.b[3]
; CHECK-NEXT: mov z2.b, z2.b[1]
-; CHECK-NEXT: strb w8, [sp, #54]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z1.b[15]
-; CHECK-NEXT: strb w8, [sp, #52]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z1.b[13]
-; CHECK-NEXT: strb w8, [sp, #50]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z1.b[11]
-; CHECK-NEXT: strb w8, [sp, #49]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z1.b[9]
-; CHECK-NEXT: strb w8, [sp, #48]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z1.b[7]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.b, z0.b[15]
-; CHECK-NEXT: strb w8, [sp, #31]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z1.b[5]
-; CHECK-NEXT: strb w9, [sp, #28]
-; CHECK-NEXT: strb w8, [sp, #30]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.b, z1.b[3]
-; CHECK-NEXT: mov z1.b, z1.b[1]
-; CHECK-NEXT: strb w8, [sp, #29]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z0.b[11]
-; CHECK-NEXT: strb w8, [sp, #27]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z0.b[13]
-; CHECK-NEXT: strb w8, [sp, #26]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: strb w8, [sp, #25]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z1.b, z0.b[9]
-; CHECK-NEXT: strb w8, [sp, #24]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.b, z0.b[7]
-; CHECK-NEXT: strb w8, [sp, #23]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z3.b, z0.b[5]
-; CHECK-NEXT: strb w8, [sp, #22]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.b, z0.b[3]
-; CHECK-NEXT: mov z0.b, z0.b[1]
-; CHECK-NEXT: strb w8, [sp, #21]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strb w8, [sp, #20]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strb w8, [sp, #19]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strb w8, [sp, #18]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: strb w8, [sp, #17]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: ldr q0, [sp, #48]
-; CHECK-NEXT: add z0.b, z4.b, z0.b
-; CHECK-NEXT: strb w8, [sp, #16]
-; CHECK-NEXT: ldr q1, [sp, #16]
-; CHECK-NEXT: add z1.b, z5.b, z1.b
+; CHECK-NEXT: zip1 z9.b, z10.b, z9.b
+; CHECK-NEXT: zip1 z10.b, z12.b, z11.b
+; CHECK-NEXT: zip1 z1.b, z0.b, z1.b
+; CHECK-NEXT: zip1 z30.b, z13.b, z30.b
+; CHECK-NEXT: mov z11.b, z0.b[13]
+; CHECK-NEXT: mov z0.b, z0.b[5]
+; CHECK-NEXT: zip1 z25.b, z26.b, z25.b
+; CHECK-NEXT: zip1 z26.b, z28.b, z27.b
+; CHECK-NEXT: zip1 z2.b, z2.b, z8.b
+; CHECK-NEXT: zip1 z21.h, z22.h, z21.h
+; CHECK-NEXT: zip1 z22.h, z24.h, z23.h
+; CHECK-NEXT: zip1 z23.h, z31.h, z29.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z9.h
+; CHECK-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: zip1 z24.h, z10.h, z11.h
+; CHECK-NEXT: ldp d11, d10, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: zip1 z0.h, z30.h, z0.h
+; CHECK-NEXT: zip1 z18.h, z19.h, z18.h
+; CHECK-NEXT: zip1 z19.h, z25.h, z20.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z26.h
+; CHECK-NEXT: zip1 z3.s, z4.s, z3.s
+; CHECK-NEXT: zip1 z4.s, z6.s, z5.s
+; CHECK-NEXT: zip1 z5.s, z16.s, z7.s
+; CHECK-NEXT: zip1 z1.s, z1.s, z23.s
+; CHECK-NEXT: zip1 z6.s, z21.s, z17.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z24.s
+; CHECK-NEXT: zip1 z7.s, z18.s, z22.s
+; CHECK-NEXT: zip1 z2.s, z2.s, z19.s
+; CHECK-NEXT: zip1 z3.d, z4.d, z3.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: zip1 z1.d, z6.d, z5.d
+; CHECK-NEXT: zip1 z2.d, z2.d, z7.d
+; CHECK-NEXT: add z0.b, z3.b, z0.b
+; CHECK-NEXT: add z1.b, z1.b, z2.b
; CHECK-NEXT: stp q0, q1, [x0]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: ldp d13, d12, [sp], #48 // 16-byte Folded Reload
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: uzp_v32i8:
@@ -1922,110 +1749,71 @@ define void @uzp_v4i16(ptr %a, ptr %b) #0{
define void @uzp_v16i16(ptr %a, ptr %b) #0{
; CHECK-LABEL: uzp_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: ldp q2, q3, [x0]
-; CHECK-NEXT: ldp q0, q1, [x1]
-; CHECK-NEXT: mov z4.h, z3.h[6]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: mov z6.h, z3.h[2]
-; CHECK-NEXT: mov z5.h, z3.h[4]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z7.h, z2.h[6]
-; CHECK-NEXT: mov z17.h, z2.h[7]
-; CHECK-NEXT: mov z16.h, z3.h[1]
-; CHECK-NEXT: strh w8, [sp, #40]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z2.h[4]
-; CHECK-NEXT: strh w9, [sp, #32]
-; CHECK-NEXT: fmov w9, s5
-; CHECK-NEXT: mov z5.h, z2.h[2]
-; CHECK-NEXT: strh w8, [sp, #46]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.h, z1.h[2]
-; CHECK-NEXT: strh w9, [sp, #44]
-; CHECK-NEXT: fmov w9, s7
-; CHECK-NEXT: mov z7.h, z0.h[6]
-; CHECK-NEXT: strh w8, [sp, #42]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z1.h[6]
-; CHECK-NEXT: strh w9, [sp, #38]
-; CHECK-NEXT: fmov w9, s16
-; CHECK-NEXT: strh w8, [sp, #36]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z1.h[4]
-; CHECK-NEXT: strh w9, [sp, #56]
-; CHECK-NEXT: strh w8, [sp, #34]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z0.h[4]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z0.h[2]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.h, z3.h[7]
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.h, z3.h[5]
-; CHECK-NEXT: strh w8, [sp, #6]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z3.h[3]
-; CHECK-NEXT: ldr q3, [sp, #32]
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.h, z2.h[5]
-; CHECK-NEXT: ldr q4, [sp]
-; CHECK-NEXT: strh w8, [sp, #62]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: mov z7.h, z1.h[7]
-; CHECK-NEXT: strh w8, [sp, #60]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z2.h[3]
-; CHECK-NEXT: mov z2.h, z2.h[1]
-; CHECK-NEXT: strh w8, [sp, #58]
-; CHECK-NEXT: fmov w8, s17
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.h, z0.h[7]
-; CHECK-NEXT: strh w8, [sp, #54]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.h, z1.h[5]
-; CHECK-NEXT: strh w9, [sp, #48]
-; CHECK-NEXT: strh w8, [sp, #52]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z1.h[3]
+; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset b8, -16
+; CHECK-NEXT: ldp q1, q6, [x0]
+; CHECK-NEXT: ldp q0, q2, [x1]
+; CHECK-NEXT: mov z3.h, z6.h[6]
+; CHECK-NEXT: mov z4.h, z6.h[4]
+; CHECK-NEXT: mov z5.h, z6.h[2]
+; CHECK-NEXT: mov z7.h, z1.h[6]
+; CHECK-NEXT: mov z16.h, z1.h[4]
+; CHECK-NEXT: mov z17.h, z1.h[2]
+; CHECK-NEXT: mov z18.h, z2.h[6]
+; CHECK-NEXT: mov z19.h, z2.h[4]
+; CHECK-NEXT: mov z20.h, z2.h[2]
+; CHECK-NEXT: mov z21.h, z0.h[6]
+; CHECK-NEXT: mov z22.h, z0.h[4]
+; CHECK-NEXT: zip1 z3.h, z4.h, z3.h
+; CHECK-NEXT: zip1 z4.h, z6.h, z5.h
+; CHECK-NEXT: zip1 z5.h, z16.h, z7.h
+; CHECK-NEXT: zip1 z7.h, z1.h, z17.h
+; CHECK-NEXT: zip1 z16.h, z19.h, z18.h
+; CHECK-NEXT: zip1 z18.h, z2.h, z20.h
+; CHECK-NEXT: mov z19.h, z0.h[2]
+; CHECK-NEXT: zip1 z17.h, z22.h, z21.h
+; CHECK-NEXT: mov z20.h, z6.h[7]
+; CHECK-NEXT: mov z21.h, z6.h[5]
+; CHECK-NEXT: mov z22.h, z6.h[3]
+; CHECK-NEXT: mov z6.h, z6.h[1]
+; CHECK-NEXT: mov z23.h, z1.h[7]
+; CHECK-NEXT: mov z24.h, z1.h[5]
+; CHECK-NEXT: mov z25.h, z1.h[3]
; CHECK-NEXT: mov z1.h, z1.h[1]
-; CHECK-NEXT: strh w8, [sp, #50]
-; CHECK-NEXT: fmov w8, s7
-; CHECK-NEXT: strh w8, [sp, #30]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: mov z6.h, z0.h[5]
-; CHECK-NEXT: strh w8, [sp, #28]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z0.h[3]
-; CHECK-NEXT: mov z0.h, z0.h[1]
-; CHECK-NEXT: strh w8, [sp, #26]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: strh w8, [sp, #24]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #22]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: strh w8, [sp, #20]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: strh w8, [sp, #18]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: ldr q0, [sp, #48]
-; CHECK-NEXT: add z0.h, z3.h, z0.h
-; CHECK-NEXT: strh w8, [sp, #16]
-; CHECK-NEXT: ldr q1, [sp, #16]
-; CHECK-NEXT: add z1.h, z4.h, z1.h
-; CHECK-NEXT: stp q0, q1, [x0]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: mov z26.h, z2.h[7]
+; CHECK-NEXT: mov z27.h, z2.h[5]
+; CHECK-NEXT: mov z28.h, z2.h[3]
+; CHECK-NEXT: mov z2.h, z2.h[1]
+; CHECK-NEXT: mov z29.h, z0.h[7]
+; CHECK-NEXT: mov z30.h, z0.h[5]
+; CHECK-NEXT: mov z31.h, z0.h[3]
+; CHECK-NEXT: mov z8.h, z0.h[1]
+; CHECK-NEXT: zip1 z0.h, z0.h, z19.h
+; CHECK-NEXT: zip1 z19.h, z21.h, z20.h
+; CHECK-NEXT: zip1 z6.h, z6.h, z22.h
+; CHECK-NEXT: zip1 z20.h, z24.h, z23.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z25.h
+; CHECK-NEXT: zip1 z21.h, z27.h, z26.h
+; CHECK-NEXT: zip1 z2.h, z2.h, z28.h
+; CHECK-NEXT: zip1 z22.h, z30.h, z29.h
+; CHECK-NEXT: zip1 z23.h, z8.h, z31.h
+; CHECK-NEXT: zip1 z3.s, z4.s, z3.s
+; CHECK-NEXT: zip1 z4.s, z7.s, z5.s
+; CHECK-NEXT: zip1 z5.s, z18.s, z16.s
+; CHECK-NEXT: zip1 z6.s, z6.s, z19.s
+; CHECK-NEXT: zip1 z1.s, z1.s, z20.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z17.s
+; CHECK-NEXT: zip1 z2.s, z2.s, z21.s
+; CHECK-NEXT: zip1 z7.s, z23.s, z22.s
+; CHECK-NEXT: zip1 z3.d, z4.d, z3.d
+; CHECK-NEXT: zip1 z1.d, z1.d, z6.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z5.d
+; CHECK-NEXT: zip1 z2.d, z7.d, z2.d
+; CHECK-NEXT: add z1.h, z3.h, z1.h
+; CHECK-NEXT: add z0.h, z0.h, z2.h
+; CHECK-NEXT: stp q1, q0, [x0]
+; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: uzp_v16i16:
@@ -2116,32 +1904,28 @@ define void @uzp_v16i16(ptr %a, ptr %b) #0{
define void @uzp_v8f32(ptr %a, ptr %b) #0{
; CHECK-LABEL: uzp_v8f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #48
-; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: ldp q2, q0, [x0]
+; CHECK-NEXT: ldp q6, q0, [x0]
; CHECK-NEXT: adrp x8, .LCPI21_0
-; CHECK-NEXT: ldp q4, q1, [x1]
+; CHECK-NEXT: ldp q1, q2, [x1]
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: mov z3.s, z0.s[2]
-; CHECK-NEXT: mov z5.s, z1.s[2]
-; CHECK-NEXT: stp s0, s3, [sp, #24]
-; CHECK-NEXT: mov z3.s, z4.s[2]
-; CHECK-NEXT: stp s5, s2, [sp, #12]
-; CHECK-NEXT: mov z5.s, z0.s[3]
-; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: stp s3, s1, [sp, #4]
-; CHECK-NEXT: mov z1.s, z2.s[1]
-; CHECK-NEXT: str s5, [sp, #44]
+; CHECK-NEXT: mov z4.s, z0.s[3]
+; CHECK-NEXT: mov z5.s, z0.s[1]
+; CHECK-NEXT: mov z7.s, z2.s[2]
+; CHECK-NEXT: mov z16.s, z1.s[2]
+; CHECK-NEXT: zip1 z0.s, z0.s, z3.s
+; CHECK-NEXT: zip1 z3.s, z5.s, z4.s
+; CHECK-NEXT: mov z4.s, z6.s[1]
+; CHECK-NEXT: zip1 z2.s, z2.s, z7.s
; CHECK-NEXT: ldr q5, [x8, :lo12:.LCPI21_0]
-; CHECK-NEXT: str s0, [sp, #40]
-; CHECK-NEXT: ldp q3, q2, [sp]
-; CHECK-NEXT: tbl z0.s, { z4.s }, z5.s
-; CHECK-NEXT: str s1, [sp, #32]
-; CHECK-NEXT: ldr q1, [sp, #32]
-; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: zip1 z7.s, z0.s, z16.s
+; CHECK-NEXT: tbl z1.s, { z1.s }, z5.s
+; CHECK-NEXT: zip1 z0.d, z6.d, z0.d
+; CHECK-NEXT: zip1 z3.d, z4.d, z3.d
+; CHECK-NEXT: zip1 z2.d, z7.d, z2.d
; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z3.s
-; CHECK-NEXT: stp q1, q0, [x0]
-; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: stp q0, q1, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: uzp_v8f32:
@@ -2231,60 +2015,38 @@ define void @uzp_v4i64(ptr %a, ptr %b) #0{
define void @uzp_v8i16(ptr %a, ptr %b) #0{
; CHECK-LABEL: uzp_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: ldr q0, [x0]
-; CHECK-NEXT: mov z2.h, z1.h[6]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov z4.h, z1.h[2]
-; CHECK-NEXT: mov z6.h, z0.h[4]
-; CHECK-NEXT: mov z3.h, z1.h[4]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov z5.h, z0.h[6]
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.h, z0.h[2]
-; CHECK-NEXT: strh w9, [sp]
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov z3.h, z1.h[7]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z1.h[5]
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: fmov w9, s5
-; CHECK-NEXT: mov z5.h, z1.h[3]
-; CHECK-NEXT: mov z1.h, z1.h[1]
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: fmov w8, s6
-; CHECK-NEXT: strh w9, [sp, #6]
-; CHECK-NEXT: fmov w9, s1
-; CHECK-NEXT: strh w8, [sp, #4]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov z2.h, z0.h[7]
-; CHECK-NEXT: strh w9, [sp, #24]
-; CHECK-NEXT: strh w8, [sp, #2]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w8, [sp, #30]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: mov z4.h, z0.h[5]
-; CHECK-NEXT: strh w8, [sp, #28]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov z5.h, z0.h[3]
-; CHECK-NEXT: mov z0.h, z0.h[1]
-; CHECK-NEXT: strh w8, [sp, #26]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: strh w8, [sp, #22]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: strh w8, [sp, #20]
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: strh w8, [sp, #18]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: strh w8, [sp, #16]
-; CHECK-NEXT: ldp q3, q0, [sp]
-; CHECK-NEXT: add z0.h, z3.h, z0.h
+; CHECK-NEXT: ldr q0, [x1]
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: mov z2.h, z0.h[6]
+; CHECK-NEXT: mov z3.h, z0.h[4]
+; CHECK-NEXT: mov z4.h, z0.h[2]
+; CHECK-NEXT: mov z5.h, z1.h[6]
+; CHECK-NEXT: mov z6.h, z1.h[4]
+; CHECK-NEXT: mov z7.h, z1.h[2]
+; CHECK-NEXT: mov z16.h, z0.h[7]
+; CHECK-NEXT: mov z17.h, z0.h[5]
+; CHECK-NEXT: mov z18.h, z0.h[3]
+; CHECK-NEXT: mov z19.h, z0.h[1]
+; CHECK-NEXT: mov z20.h, z1.h[7]
+; CHECK-NEXT: mov z21.h, z1.h[5]
+; CHECK-NEXT: mov z22.h, z1.h[3]
+; CHECK-NEXT: mov z23.h, z1.h[1]
+; CHECK-NEXT: zip1 z2.h, z3.h, z2.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z4.h
+; CHECK-NEXT: zip1 z3.h, z6.h, z5.h
+; CHECK-NEXT: zip1 z1.h, z1.h, z7.h
+; CHECK-NEXT: zip1 z4.h, z17.h, z16.h
+; CHECK-NEXT: zip1 z5.h, z19.h, z18.h
+; CHECK-NEXT: zip1 z6.h, z21.h, z20.h
+; CHECK-NEXT: zip1 z7.h, z23.h, z22.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z2.s
+; CHECK-NEXT: zip1 z1.s, z1.s, z3.s
+; CHECK-NEXT: zip1 z2.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z3.s, z7.s, z6.s
+; CHECK-NEXT: zip1 z0.d, z1.d, z0.d
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
+; CHECK-NEXT: add z0.h, z0.h, z1.h
; CHECK-NEXT: str q0, [x0]
-; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: uzp_v8i16:
@@ -2341,31 +2103,21 @@ define void @uzp_v8i16(ptr %a, ptr %b) #0{
define void @uzp_v8i32_undef(ptr %a) #0{
; CHECK-LABEL: uzp_v8i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: ldp q1, q0, [x0]
-; CHECK-NEXT: mov z2.s, z0.s[2]
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: mov z3.s, z1.s[2]
-; CHECK-NEXT: mov z4.s, z0.s[3]
-; CHECK-NEXT: mov z0.s, z0.s[1]
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: mov z2.s, z1.s[3]
-; CHECK-NEXT: stp w8, w9, [sp, #8]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov z1.s, z1.s[1]
-; CHECK-NEXT: stp w8, w9, [sp]
-; CHECK-NEXT: fmov w8, s4
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: stp w9, w8, [sp, #24]
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s1
-; CHECK-NEXT: stp w9, w8, [sp, #16]
-; CHECK-NEXT: ldp q0, q1, [sp]
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: mov z2.s, z1.s[2]
+; CHECK-NEXT: mov z3.s, z0.s[2]
+; CHECK-NEXT: mov z4.s, z1.s[3]
+; CHECK-NEXT: mov z5.s, z1.s[1]
+; CHECK-NEXT: mov z6.s, z0.s[3]
+; CHECK-NEXT: mov z7.s, z0.s[1]
+; CHECK-NEXT: zip1 z1.s, z1.s, z2.s
+; CHECK-NEXT: zip1 z0.s, z0.s, z3.s
+; CHECK-NEXT: zip1 z2.s, z5.s, z4.s
+; CHECK-NEXT: zip1 z3.s, z7.s, z6.s
+; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT: zip1 z1.d, z3.d, z2.d
; CHECK-NEXT: add z0.s, z0.s, z1.s
; CHECK-NEXT: stp q0, q0, [x0]
-; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: uzp_v8i32_undef:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
index 88c83a2..c942f1ec 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
@@ -10,22 +10,14 @@ target triple = "aarch64-unknown-linux-gnu"
define <4 x i1> @reshuffle_v4i1_nxv4i1(<vscale x 4 x i1> %a) {
; CHECK-LABEL: reshuffle_v4i1_nxv4i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
; CHECK-NEXT: mov z1.s, z0.s[3]
-; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: mov z2.s, z0.s[2]
; CHECK-NEXT: mov z3.s, z0.s[1]
-; CHECK-NEXT: strh w8, [sp, #8]
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #10]
-; CHECK-NEXT: ldr d0, [sp, #8]
-; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: zip1 z1.h, z2.h, z1.h
+; CHECK-NEXT: zip1 z0.h, z0.h, z3.h
+; CHECK-NEXT: zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%el0 = extractelement <vscale x 4 x i1> %a, i32 0
%el1 = extractelement <vscale x 4 x i1> %a, i32 1
diff --git a/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..5ff2d82
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple amdgcn | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: naked$local:
+; CHECK-NEXT: .type naked$local,@function
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, main@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, main@rel32@hi+12
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: normal$local:
+; CHECK-NEXT: .type normal$local,@function
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s16, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_or_saveexec_b64 s[18:19], -1
+; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[18:19]
+; CHECK-NEXT: s_waitcnt expcnt(0)
+; CHECK-NEXT: v_writelane_b32 v40, s16, 2
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v40, s30, 0
+; CHECK-NEXT: v_writelane_b32 v40, s31, 1
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, main@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, main@rel32@hi+12
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/ARM/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/ARM/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..2bdc7d3
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple arm | FileCheck %s -check-prefixes=CHECK-ALE
+; RUN: llc < %s -mtriple armeb | FileCheck %s -check-prefixes=CHECK-ABE
+; RUN: llc < %s -mtriple thumb | FileCheck %s -check-prefixes=CHECK-TLE
+; RUN: llc < %s -mtriple thumbeb | FileCheck %s -check-prefixes=CHECK-TBE
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-ALE-LABEL: naked:
+; CHECK-ALE: @ %bb.0:
+; CHECK-ALE-NEXT: bl main
+;
+; CHECK-ABE-LABEL: naked:
+; CHECK-ABE: @ %bb.0:
+; CHECK-ABE-NEXT: bl main
+;
+; CHECK-TLE-LABEL: naked:
+; CHECK-TLE: @ %bb.0:
+; CHECK-TLE-NEXT: bl main
+;
+; CHECK-TBE-LABEL: naked:
+; CHECK-TBE: @ %bb.0:
+; CHECK-TBE-NEXT: bl main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-ALE-LABEL: normal:
+; CHECK-ALE: @ %bb.0:
+; CHECK-ALE-NEXT: push {r11, lr}
+; CHECK-ALE-NEXT: mov r11, sp
+; CHECK-ALE-NEXT: bl main
+;
+; CHECK-ABE-LABEL: normal:
+; CHECK-ABE: @ %bb.0:
+; CHECK-ABE-NEXT: push {r11, lr}
+; CHECK-ABE-NEXT: mov r11, sp
+; CHECK-ABE-NEXT: bl main
+;
+; CHECK-TLE-LABEL: normal:
+; CHECK-TLE: @ %bb.0:
+; CHECK-TLE-NEXT: push {r7, lr}
+; CHECK-TLE-NEXT: add r7, sp, #0
+; CHECK-TLE-NEXT: bl main
+;
+; CHECK-TBE-LABEL: normal:
+; CHECK-TBE: @ %bb.0:
+; CHECK-TBE-NEXT: push {r7, lr}
+; CHECK-TBE-NEXT: add r7, sp, #0
+; CHECK-TBE-NEXT: bl main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/AVR/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/AVR/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..18ea609
--- /dev/null
+++ b/llvm/test/CodeGen/AVR/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple avr | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: rcall main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: rcall main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/BPF/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/BPF/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..4e44362
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple bpfel | FileCheck %s -check-prefixes=CHECK-LE
+; RUN: llc < %s -mtriple bpfeb | FileCheck %s -check-prefixes=CHECK-BE
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LE-LABEL: naked:
+; CHECK-LE: .Lnaked$local:
+; CHECK-LE-NEXT: .type .Lnaked$local,@function
+; CHECK-LE-NEXT: .cfi_startproc
+; CHECK-LE-NEXT: # %bb.0:
+; CHECK-LE-NEXT: call main
+;
+; CHECK-BE-LABEL: naked:
+; CHECK-BE: .Lnaked$local:
+; CHECK-BE-NEXT: .type .Lnaked$local,@function
+; CHECK-BE-NEXT: .cfi_startproc
+; CHECK-BE-NEXT: # %bb.0:
+; CHECK-BE-NEXT: call main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LE-LABEL: normal:
+; CHECK-LE: .Lnormal$local:
+; CHECK-LE-NEXT: .type .Lnormal$local,@function
+; CHECK-LE-NEXT: .cfi_startproc
+; CHECK-LE-NEXT: # %bb.0:
+; CHECK-LE-NEXT: call main
+;
+; CHECK-BE-LABEL: normal:
+; CHECK-BE: .Lnormal$local:
+; CHECK-BE-NEXT: .type .Lnormal$local,@function
+; CHECK-BE-NEXT: .cfi_startproc
+; CHECK-BE-NEXT: # %bb.0:
+; CHECK-BE-NEXT: call main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/CSKY/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/CSKY/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..e897127
--- /dev/null
+++ b/llvm/test/CodeGen/CSKY/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple csky | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lrw a0, [.LCPI0_0]
+; CHECK-NEXT: jsr16 a0
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: .p2align 2, 0x0
+; CHECK-NEXT: .LCPI0_0:
+; CHECK-NEXT: .long main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subi16 sp, sp, 8
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
+; CHECK-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset lr, -4
+; CHECK-NEXT: .cfi_offset l4, -8
+; CHECK-NEXT: mov16 l4, sp
+; CHECK-NEXT: .cfi_def_cfa_register l4
+; CHECK-NEXT: subi16 sp, sp, 4
+; CHECK-NEXT: lrw a0, [.LCPI1_0]
+; CHECK-NEXT: jsr16 a0
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: .p2align 2, 0x0
+; CHECK-NEXT: .LCPI1_0:
+; CHECK-NEXT: .long main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/Hexagon/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/Hexagon/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..c53f2d4
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple hexagon | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: {
+; CHECK-NEXT: call main
+; CHECK-NEXT: }
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: .cfi_def_cfa r30, 8
+; CHECK-NEXT: .cfi_offset r31, -4
+; CHECK-NEXT: .cfi_offset r30, -8
+; CHECK-NEXT: {
+; CHECK-NEXT: call main
+; CHECK-NEXT: allocframe(r29,#0):raw
+; CHECK-NEXT: }
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/Lanai/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/Lanai/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..4e14876
--- /dev/null
+++ b/llvm/test/CodeGen/Lanai/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple lanai | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: .Lnaked$local:
+; CHECK-NEXT: .type .Lnaked$local,@function
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: add %pc, 0x10, %rca
+; CHECK-NEXT: st %rca, [--%sp]
+; CHECK-NEXT: bt main
+; CHECK-NEXT: nop
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: .Lnormal$local:
+; CHECK-NEXT: .type .Lnormal$local,@function
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: st %fp, [--%sp]
+; CHECK-NEXT: add %sp, 0x8, %fp
+; CHECK-NEXT: sub %sp, 0x8, %sp
+; CHECK-NEXT: add %pc, 0x10, %rca
+; CHECK-NEXT: st %rca, [--%sp]
+; CHECK-NEXT: bt main
+; CHECK-NEXT: nop
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/LoongArch/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/LoongArch/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..9bb4491
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple loongarch32 -mattr +d | FileCheck %s -check-prefixes=CHECK-32
+; RUN: llc < %s -mtriple loongarch64 -mattr +d | FileCheck %s -check-prefixes=CHECK-64
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-LABEL: naked:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: bl main
+;
+; CHECK-64-LABEL: naked:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: bl main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-LABEL: normal:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: addi.w $sp, $sp, -16
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; CHECK-32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; CHECK-32-NEXT: .cfi_offset 1, -4
+; CHECK-32-NEXT: .cfi_offset 22, -8
+; CHECK-32-NEXT: addi.w $fp, $sp, 16
+; CHECK-32-NEXT: .cfi_def_cfa 22, 0
+; CHECK-32-NEXT: bl main
+;
+; CHECK-64-LABEL: normal:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: addi.d $sp, $sp, -16
+; CHECK-64-NEXT: .cfi_def_cfa_offset 16
+; CHECK-64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; CHECK-64-NEXT: .cfi_offset 1, -8
+; CHECK-64-NEXT: .cfi_offset 22, -16
+; CHECK-64-NEXT: addi.d $fp, $sp, 16
+; CHECK-64-NEXT: .cfi_def_cfa 22, 0
+; CHECK-64-NEXT: bl main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/M68k/Atomics/non-ari.ll b/llvm/test/CodeGen/M68k/Atomics/non-ari.ll
new file mode 100644
index 0000000..1ae545e
--- /dev/null
+++ b/llvm/test/CodeGen/M68k/Atomics/non-ari.ll
@@ -0,0 +1,46 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC
+
+define void @atomic_store_i8_element_monotonic(i8 %val, ptr %base, i32 %offset) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i8_element_monotonic:
+; NO-ATOMIC: ; %bb.0:
+; NO-ATOMIC-NEXT: move.b (7,%sp), %d0
+; NO-ATOMIC-NEXT: move.l (12,%sp), %d1
+; NO-ATOMIC-NEXT: move.l (8,%sp), %a0
+; NO-ATOMIC-NEXT: move.b %d0, (0,%a0,%d1)
+; NO-ATOMIC-NEXT: rts
+;
+; ATOMIC-LABEL: atomic_store_i8_element_monotonic:
+; ATOMIC: ; %bb.0:
+; ATOMIC-NEXT: move.b (7,%sp), %d0
+; ATOMIC-NEXT: move.l (12,%sp), %d1
+; ATOMIC-NEXT: move.l (8,%sp), %a0
+; ATOMIC-NEXT: move.b %d0, (0,%a0,%d1)
+; ATOMIC-NEXT: rts
+ %store_pointer = getelementptr i8, ptr %base, i32 %offset
+ store atomic i8 %val, ptr %store_pointer monotonic, align 1
+ ret void
+}
+
+define i8 @atomic_load_i8_element_monotonic(ptr %base, i32 %offset) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i8_element_monotonic:
+; NO-ATOMIC: ; %bb.0:
+; NO-ATOMIC-NEXT: move.l (8,%sp), %d0
+; NO-ATOMIC-NEXT: move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT: move.b (0,%a0,%d0), %d0
+; NO-ATOMIC-NEXT: rts
+;
+; ATOMIC-LABEL: atomic_load_i8_element_monotonic:
+; ATOMIC: ; %bb.0:
+; ATOMIC-NEXT: move.l (8,%sp), %d0
+; ATOMIC-NEXT: move.l (4,%sp), %a0
+; ATOMIC-NEXT: move.b (0,%a0,%d0), %d0
+; ATOMIC-NEXT: rts
+ %load_pointer = getelementptr i8, ptr %base, i32 %offset
+ %return_val = load atomic i8, ptr %load_pointer monotonic, align 1
+ ret i8 %return_val
+}
diff --git a/llvm/test/CodeGen/M68k/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/M68k/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..807c52c
--- /dev/null
+++ b/llvm/test/CodeGen/M68k/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple m68k | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: jsr main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: link.w %a6, #0
+; CHECK-NEXT: .cfi_def_cfa_offset -8
+; CHECK-NEXT: .cfi_offset %a6, -8
+; CHECK-NEXT: .cfi_def_cfa_register %a6
+; CHECK-NEXT: jsr main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/MSP430/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/MSP430/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..2fdb010
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple msp430 | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: call #main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: push r4
+; CHECK-NEXT: .cfi_def_cfa_offset 4
+; CHECK-NEXT: .cfi_offset r4, -4
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: .cfi_def_cfa_register r4
+; CHECK-NEXT: call #main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/Mips/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/Mips/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..a3820da
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple mips | FileCheck %s -check-prefixes=CHECK-32-BE
+; RUN: llc < %s -mtriple mipsel | FileCheck %s -check-prefixes=CHECK-32-LE
+; RUN: llc < %s -mtriple mips64 | FileCheck %s -check-prefixes=CHECK-64-BE
+; RUN: llc < %s -mtriple mips64el | FileCheck %s -check-prefixes=CHECK-64-LE
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-BE-LABEL: naked:
+; CHECK-32-BE: # %bb.0:
+; CHECK-32-BE-NEXT: jal main
+; CHECK-32-BE-NEXT: nop
+;
+; CHECK-32-LE-LABEL: naked:
+; CHECK-32-LE: # %bb.0:
+; CHECK-32-LE-NEXT: jal main
+; CHECK-32-LE-NEXT: nop
+;
+; CHECK-64-BE-LABEL: naked:
+; CHECK-64-BE: # %bb.0:
+; CHECK-64-BE-NEXT: jal main
+; CHECK-64-BE-NEXT: nop
+;
+; CHECK-64-LE-LABEL: naked:
+; CHECK-64-LE: # %bb.0:
+; CHECK-64-LE-NEXT: jal main
+; CHECK-64-LE-NEXT: nop
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-BE-LABEL: normal:
+; CHECK-32-BE: # %bb.0:
+; CHECK-32-BE-NEXT: addiu $sp, $sp, -24
+; CHECK-32-BE-NEXT: .cfi_def_cfa_offset 24
+; CHECK-32-BE-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; CHECK-32-BE-NEXT: sw $fp, 16($sp) # 4-byte Folded Spill
+; CHECK-32-BE-NEXT: .cfi_offset 31, -4
+; CHECK-32-BE-NEXT: .cfi_offset 30, -8
+; CHECK-32-BE-NEXT: move $fp, $sp
+; CHECK-32-BE-NEXT: .cfi_def_cfa_register 30
+; CHECK-32-BE-NEXT: jal main
+; CHECK-32-BE-NEXT: nop
+;
+; CHECK-32-LE-LABEL: normal:
+; CHECK-32-LE: # %bb.0:
+; CHECK-32-LE-NEXT: addiu $sp, $sp, -24
+; CHECK-32-LE-NEXT: .cfi_def_cfa_offset 24
+; CHECK-32-LE-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; CHECK-32-LE-NEXT: sw $fp, 16($sp) # 4-byte Folded Spill
+; CHECK-32-LE-NEXT: .cfi_offset 31, -4
+; CHECK-32-LE-NEXT: .cfi_offset 30, -8
+; CHECK-32-LE-NEXT: move $fp, $sp
+; CHECK-32-LE-NEXT: .cfi_def_cfa_register 30
+; CHECK-32-LE-NEXT: jal main
+; CHECK-32-LE-NEXT: nop
+;
+; CHECK-64-BE-LABEL: normal:
+; CHECK-64-BE: # %bb.0:
+; CHECK-64-BE-NEXT: daddiu $sp, $sp, -16
+; CHECK-64-BE-NEXT: .cfi_def_cfa_offset 16
+; CHECK-64-BE-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; CHECK-64-BE-NEXT: sd $fp, 0($sp) # 8-byte Folded Spill
+; CHECK-64-BE-NEXT: .cfi_offset 31, -8
+; CHECK-64-BE-NEXT: .cfi_offset 30, -16
+; CHECK-64-BE-NEXT: move $fp, $sp
+; CHECK-64-BE-NEXT: .cfi_def_cfa_register 30
+; CHECK-64-BE-NEXT: jal main
+; CHECK-64-BE-NEXT: nop
+;
+; CHECK-64-LE-LABEL: normal:
+; CHECK-64-LE: # %bb.0:
+; CHECK-64-LE-NEXT: daddiu $sp, $sp, -16
+; CHECK-64-LE-NEXT: .cfi_def_cfa_offset 16
+; CHECK-64-LE-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; CHECK-64-LE-NEXT: sd $fp, 0($sp) # 8-byte Folded Spill
+; CHECK-64-LE-NEXT: .cfi_offset 31, -8
+; CHECK-64-LE-NEXT: .cfi_offset 30, -16
+; CHECK-64-LE-NEXT: move $fp, $sp
+; CHECK-64-LE-NEXT: .cfi_def_cfa_register 30
+; CHECK-64-LE-NEXT: jal main
+; CHECK-64-LE-NEXT: nop
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/NVPTX/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/NVPTX/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..a1f0577
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple nvptx | FileCheck %s -check-prefixes=CHECK-32
+; RUN: llc < %s -mtriple nvptx64 | FileCheck %s -check-prefixes=CHECK-64
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-LABEL: naked(
+; CHECK-32: {
+; CHECK-32-EMPTY:
+; CHECK-32-EMPTY:
+; CHECK-32-NEXT: // %bb.0:
+; CHECK-32-NEXT: { // callseq 0, 0
+; CHECK-32-NEXT: call.uni
+; CHECK-32-NEXT: main,
+; CHECK-32-NEXT: (
+; CHECK-32-NEXT: );
+; CHECK-32-NEXT: } // callseq 0
+; CHECK-32-NEXT: // begin inline asm
+; CHECK-32-NEXT: exit;
+; CHECK-32-NEXT: // end inline asm
+;
+; CHECK-64-LABEL: naked(
+; CHECK-64: {
+; CHECK-64-EMPTY:
+; CHECK-64-EMPTY:
+; CHECK-64-NEXT: // %bb.0:
+; CHECK-64-NEXT: { // callseq 0, 0
+; CHECK-64-NEXT: call.uni
+; CHECK-64-NEXT: main,
+; CHECK-64-NEXT: (
+; CHECK-64-NEXT: );
+; CHECK-64-NEXT: } // callseq 0
+; CHECK-64-NEXT: // begin inline asm
+; CHECK-64-NEXT: exit;
+; CHECK-64-NEXT: // end inline asm
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-LABEL: normal(
+; CHECK-32: {
+; CHECK-32-EMPTY:
+; CHECK-32-EMPTY:
+; CHECK-32-NEXT: // %bb.0:
+; CHECK-32-NEXT: { // callseq 1, 0
+; CHECK-32-NEXT: call.uni
+; CHECK-32-NEXT: main,
+; CHECK-32-NEXT: (
+; CHECK-32-NEXT: );
+; CHECK-32-NEXT: } // callseq 1
+; CHECK-32-NEXT: // begin inline asm
+; CHECK-32-NEXT: exit;
+; CHECK-32-NEXT: // end inline asm
+;
+; CHECK-64-LABEL: normal(
+; CHECK-64: {
+; CHECK-64-EMPTY:
+; CHECK-64-EMPTY:
+; CHECK-64-NEXT: // %bb.0:
+; CHECK-64-NEXT: { // callseq 1, 0
+; CHECK-64-NEXT: call.uni
+; CHECK-64-NEXT: main,
+; CHECK-64-NEXT: (
+; CHECK-64-NEXT: );
+; CHECK-64-NEXT: } // callseq 1
+; CHECK-64-NEXT: // begin inline asm
+; CHECK-64-NEXT: exit;
+; CHECK-64-NEXT: // end inline asm
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/PowerPC/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/PowerPC/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..59b1044
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple powerpc | FileCheck %s -check-prefixes=CHECK-32-BE
+; RUN: llc < %s -mtriple powerpcle | FileCheck %s -check-prefixes=CHECK-32-LE
+; RUN: llc < %s -mtriple powerpc64 | FileCheck %s -check-prefixes=CHECK-64-BE
+; RUN: llc < %s -mtriple powerpc64le | FileCheck %s -check-prefixes=CHECK-64-LE
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-BE-LABEL: naked:
+; CHECK-32-BE: # %bb.0:
+; CHECK-32-BE-NEXT: bl main
+;
+; CHECK-32-LE-LABEL: naked:
+; CHECK-32-LE: # %bb.0:
+; CHECK-32-LE-NEXT: bl main
+;
+; CHECK-64-BE-LABEL: naked:
+; CHECK-64-BE: # %bb.0:
+; CHECK-64-BE-NEXT: bl main
+; CHECK-64-BE-NEXT: nop
+;
+; CHECK-64-LE-LABEL: naked:
+; CHECK-64-LE: # %bb.0:
+; CHECK-64-LE-NEXT: bl main
+; CHECK-64-LE-NEXT: nop
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-BE-LABEL: normal:
+; CHECK-32-BE: # %bb.0:
+; CHECK-32-BE-NEXT: mflr 0
+; CHECK-32-BE-NEXT: stwu 1, -16(1)
+; CHECK-32-BE-NEXT: stw 31, 12(1)
+; CHECK-32-BE-NEXT: stw 0, 20(1)
+; CHECK-32-BE-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-BE-NEXT: .cfi_offset r31, -4
+; CHECK-32-BE-NEXT: .cfi_offset lr, 4
+; CHECK-32-BE-NEXT: mr 31, 1
+; CHECK-32-BE-NEXT: .cfi_def_cfa_register r31
+; CHECK-32-BE-NEXT: bl main
+;
+; CHECK-32-LE-LABEL: normal:
+; CHECK-32-LE: # %bb.0:
+; CHECK-32-LE-NEXT: mflr 0
+; CHECK-32-LE-NEXT: stwu 1, -16(1)
+; CHECK-32-LE-NEXT: stw 31, 12(1)
+; CHECK-32-LE-NEXT: stw 0, 20(1)
+; CHECK-32-LE-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-LE-NEXT: .cfi_offset r31, -4
+; CHECK-32-LE-NEXT: .cfi_offset lr, 4
+; CHECK-32-LE-NEXT: mr 31, 1
+; CHECK-32-LE-NEXT: .cfi_def_cfa_register r31
+; CHECK-32-LE-NEXT: bl main
+;
+; CHECK-64-BE-LABEL: normal:
+; CHECK-64-BE: # %bb.0:
+; CHECK-64-BE-NEXT: mflr 0
+; CHECK-64-BE-NEXT: std 31, -8(1)
+; CHECK-64-BE-NEXT: stdu 1, -128(1)
+; CHECK-64-BE-NEXT: std 0, 144(1)
+; CHECK-64-BE-NEXT: .cfi_def_cfa_offset 128
+; CHECK-64-BE-NEXT: .cfi_offset r31, -8
+; CHECK-64-BE-NEXT: .cfi_offset lr, 16
+; CHECK-64-BE-NEXT: mr 31, 1
+; CHECK-64-BE-NEXT: .cfi_def_cfa_register r31
+; CHECK-64-BE-NEXT: bl main
+; CHECK-64-BE-NEXT: nop
+;
+; CHECK-64-LE-LABEL: normal:
+; CHECK-64-LE: # %bb.0:
+; CHECK-64-LE-NEXT: mflr 0
+; CHECK-64-LE-NEXT: std 31, -8(1)
+; CHECK-64-LE-NEXT: stdu 1, -48(1)
+; CHECK-64-LE-NEXT: std 0, 64(1)
+; CHECK-64-LE-NEXT: .cfi_def_cfa_offset 48
+; CHECK-64-LE-NEXT: .cfi_offset r31, -8
+; CHECK-64-LE-NEXT: .cfi_offset lr, 16
+; CHECK-64-LE-NEXT: mr 31, 1
+; CHECK-64-LE-NEXT: .cfi_def_cfa_register r31
+; CHECK-64-LE-NEXT: bl main
+; CHECK-64-LE-NEXT: nop
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/PowerPC/stack-guard-global.ll b/llvm/test/CodeGen/PowerPC/stack-guard-global.ll
new file mode 100644
index 0000000..022a62a4
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/stack-guard-global.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=powerpc64 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=BE64
+; RUN: llc -mtriple=powerpc64le -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=LE64
+; RUN: llc -mtriple=ppc32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=LE32
+
+define void @foo(i64 %t) sspstrong nounwind {
+; BE64-LABEL: foo:
+; BE64: # %bb.0:
+; BE64-NEXT: mflr 0
+; BE64-NEXT: std 31, -8(1)
+; BE64-NEXT: stdu 1, -144(1)
+; BE64-NEXT: mr 31, 1
+; BE64-NEXT: std 0, 160(1)
+; BE64-NEXT: std 30, 128(31) # 8-byte Folded Spill
+; BE64-NEXT: addis 30, 2, __stack_chk_guard@toc@ha
+; BE64-NEXT: sldi 3, 3, 2
+; BE64-NEXT: ld 4, __stack_chk_guard@toc@l(30)
+; BE64-NEXT: addi 3, 3, 15
+; BE64-NEXT: rldicr 3, 3, 0, 59
+; BE64-NEXT: neg 3, 3
+; BE64-NEXT: std 4, 120(31)
+; BE64-NEXT: addi 4, 31, 144
+; BE64-NEXT: stdux 4, 1, 3
+; BE64-NEXT: addi 3, 1, 112
+; BE64-NEXT: bl baz
+; BE64-NEXT: nop
+; BE64-NEXT: ld 3, __stack_chk_guard@toc@l(30)
+; BE64-NEXT: ld 4, 120(31)
+; BE64-NEXT: cmpld 3, 4
+; BE64-NEXT: bne 0, .LBB0_2
+; BE64-NEXT: # %bb.1:
+; BE64-NEXT: ld 30, 128(31) # 8-byte Folded Reload
+; BE64-NEXT: ld 1, 0(1)
+; BE64-NEXT: ld 0, 16(1)
+; BE64-NEXT: ld 31, -8(1)
+; BE64-NEXT: mtlr 0
+; BE64-NEXT: blr
+; BE64-NEXT: .LBB0_2:
+; BE64-NEXT: bl __stack_chk_fail
+; BE64-NEXT: nop
+;
+; LE64-LABEL: foo:
+; LE64: # %bb.0:
+; LE64-NEXT: mflr 0
+; LE64-NEXT: std 31, -8(1)
+; LE64-NEXT: stdu 1, -64(1)
+; LE64-NEXT: mr 31, 1
+; LE64-NEXT: sldi 3, 3, 2
+; LE64-NEXT: std 0, 80(1)
+; LE64-NEXT: std 30, 48(31) # 8-byte Folded Spill
+; LE64-NEXT: addis 30, 2, __stack_chk_guard@toc@ha
+; LE64-NEXT: addi 3, 3, 15
+; LE64-NEXT: ld 4, __stack_chk_guard@toc@l(30)
+; LE64-NEXT: rldicr 3, 3, 0, 59
+; LE64-NEXT: neg 3, 3
+; LE64-NEXT: std 4, 40(31)
+; LE64-NEXT: addi 4, 31, 64
+; LE64-NEXT: stdux 4, 1, 3
+; LE64-NEXT: addi 3, 1, 32
+; LE64-NEXT: bl baz
+; LE64-NEXT: nop
+; LE64-NEXT: ld 3, __stack_chk_guard@toc@l(30)
+; LE64-NEXT: ld 4, 40(31)
+; LE64-NEXT: cmpld 3, 4
+; LE64-NEXT: bne 0, .LBB0_2
+; LE64-NEXT: # %bb.1:
+; LE64-NEXT: ld 30, 48(31) # 8-byte Folded Reload
+; LE64-NEXT: ld 1, 0(1)
+; LE64-NEXT: ld 0, 16(1)
+; LE64-NEXT: ld 31, -8(1)
+; LE64-NEXT: mtlr 0
+; LE64-NEXT: blr
+; LE64-NEXT: .LBB0_2:
+; LE64-NEXT: bl __stack_chk_fail
+; LE64-NEXT: nop
+;
+; LE32-LABEL: foo:
+; LE32: # %bb.0:
+; LE32-NEXT: mflr 0
+; LE32-NEXT: stwu 1, -32(1)
+; LE32-NEXT: stw 31, 28(1)
+; LE32-NEXT: mr 31, 1
+; LE32-NEXT: stw 0, 36(1)
+; LE32-NEXT: slwi 4, 4, 2
+; LE32-NEXT: stw 30, 24(31) # 4-byte Folded Spill
+; LE32-NEXT: lis 30, __stack_chk_guard@ha
+; LE32-NEXT: lwz 3, __stack_chk_guard@l(30)
+; LE32-NEXT: addi 4, 4, 15
+; LE32-NEXT: rlwinm 4, 4, 0, 0, 27
+; LE32-NEXT: neg 4, 4
+; LE32-NEXT: stw 3, 20(31)
+; LE32-NEXT: addi 3, 31, 32
+; LE32-NEXT: stwux 3, 1, 4
+; LE32-NEXT: addi 3, 1, 16
+; LE32-NEXT: bl baz
+; LE32-NEXT: lwz 3, __stack_chk_guard@l(30)
+; LE32-NEXT: lwz 4, 20(31)
+; LE32-NEXT: cmplw 3, 4
+; LE32-NEXT: bne 0, .LBB0_2
+; LE32-NEXT: # %bb.1:
+; LE32-NEXT: lwz 30, 24(31) # 4-byte Folded Reload
+; LE32-NEXT: lwz 31, 0(1)
+; LE32-NEXT: lwz 0, -4(31)
+; LE32-NEXT: mr 1, 31
+; LE32-NEXT: mr 31, 0
+; LE32-NEXT: lwz 0, 4(1)
+; LE32-NEXT: mtlr 0
+; LE32-NEXT: blr
+; LE32-NEXT: .LBB0_2:
+; LE32-NEXT: bl __stack_chk_fail
+ %vla = alloca i32, i64 %t, align 4
+ call void @baz(ptr %vla)
+ ret void
+}
+
+declare void @baz(ptr)
+
+!llvm.module.flags = !{!1}
+!1 = !{i32 2, !"stack-protector-guard", !"global"}
diff --git a/llvm/test/CodeGen/PowerPC/stack-guard-tls.ll b/llvm/test/CodeGen/PowerPC/stack-guard-tls.ll
new file mode 100644
index 0000000..de0becc
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/stack-guard-tls.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=powerpc64 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=BE64
+; RUN: llc -mtriple=powerpc64le -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=LE64
+; RUN: llc -mtriple=ppc32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=LE32
+
+define void @foo(i64 %t) sspstrong nounwind {
+; BE64-LABEL: foo:
+; BE64: # %bb.0:
+; BE64-NEXT: mflr 0
+; BE64-NEXT: std 31, -8(1)
+; BE64-NEXT: stdu 1, -144(1)
+; BE64-NEXT: ld 4, 500(13)
+; BE64-NEXT: sldi 3, 3, 2
+; BE64-NEXT: mr 31, 1
+; BE64-NEXT: addi 3, 3, 15
+; BE64-NEXT: rldicr 3, 3, 0, 59
+; BE64-NEXT: std 0, 160(1)
+; BE64-NEXT: neg 3, 3
+; BE64-NEXT: std 4, 128(31)
+; BE64-NEXT: addi 4, 31, 144
+; BE64-NEXT: stdux 4, 1, 3
+; BE64-NEXT: addi 3, 1, 112
+; BE64-NEXT: bl baz
+; BE64-NEXT: nop
+; BE64-NEXT: ld 3, 128(31)
+; BE64-NEXT: ld 4, 500(13)
+; BE64-NEXT: cmpld 4, 3
+; BE64-NEXT: bne 0, .LBB0_2
+; BE64-NEXT: # %bb.1:
+; BE64-NEXT: ld 1, 0(1)
+; BE64-NEXT: ld 0, 16(1)
+; BE64-NEXT: ld 31, -8(1)
+; BE64-NEXT: mtlr 0
+; BE64-NEXT: blr
+; BE64-NEXT: .LBB0_2:
+; BE64-NEXT: bl __stack_chk_fail
+; BE64-NEXT: nop
+;
+; LE64-LABEL: foo:
+; LE64: # %bb.0:
+; LE64-NEXT: mflr 0
+; LE64-NEXT: std 31, -8(1)
+; LE64-NEXT: stdu 1, -64(1)
+; LE64-NEXT: sldi 3, 3, 2
+; LE64-NEXT: ld 4, 500(13)
+; LE64-NEXT: std 0, 80(1)
+; LE64-NEXT: addi 3, 3, 15
+; LE64-NEXT: mr 31, 1
+; LE64-NEXT: std 4, 48(31)
+; LE64-NEXT: addi 4, 31, 64
+; LE64-NEXT: rldicr 3, 3, 0, 59
+; LE64-NEXT: neg 3, 3
+; LE64-NEXT: stdux 4, 1, 3
+; LE64-NEXT: addi 3, 1, 32
+; LE64-NEXT: bl baz
+; LE64-NEXT: nop
+; LE64-NEXT: ld 3, 48(31)
+; LE64-NEXT: ld 4, 500(13)
+; LE64-NEXT: cmpld 4, 3
+; LE64-NEXT: bne 0, .LBB0_2
+; LE64-NEXT: # %bb.1:
+; LE64-NEXT: ld 1, 0(1)
+; LE64-NEXT: ld 0, 16(1)
+; LE64-NEXT: ld 31, -8(1)
+; LE64-NEXT: mtlr 0
+; LE64-NEXT: blr
+; LE64-NEXT: .LBB0_2:
+; LE64-NEXT: bl __stack_chk_fail
+; LE64-NEXT: nop
+;
+; LE32-LABEL: foo:
+; LE32: # %bb.0:
+; LE32-NEXT: mflr 0
+; LE32-NEXT: stwu 1, -32(1)
+; LE32-NEXT: lwz 3, 500(2)
+; LE32-NEXT: slwi 4, 4, 2
+; LE32-NEXT: addi 4, 4, 15
+; LE32-NEXT: stw 31, 28(1)
+; LE32-NEXT: mr 31, 1
+; LE32-NEXT: rlwinm 4, 4, 0, 0, 27
+; LE32-NEXT: stw 0, 36(1)
+; LE32-NEXT: neg 4, 4
+; LE32-NEXT: stw 3, 24(31)
+; LE32-NEXT: addi 3, 31, 32
+; LE32-NEXT: stwux 3, 1, 4
+; LE32-NEXT: addi 3, 1, 16
+; LE32-NEXT: bl baz
+; LE32-NEXT: lwz 3, 24(31)
+; LE32-NEXT: lwz 4, 500(2)
+; LE32-NEXT: cmplw 4, 3
+; LE32-NEXT: bne 0, .LBB0_2
+; LE32-NEXT: # %bb.1:
+; LE32-NEXT: lwz 31, 0(1)
+; LE32-NEXT: lwz 0, -4(31)
+; LE32-NEXT: mr 1, 31
+; LE32-NEXT: mr 31, 0
+; LE32-NEXT: lwz 0, 4(1)
+; LE32-NEXT: mtlr 0
+; LE32-NEXT: blr
+; LE32-NEXT: .LBB0_2:
+; LE32-NEXT: bl __stack_chk_fail
+ %vla = alloca i32, i64 %t, align 4
+ call void @baz(ptr %vla)
+ ret void
+}
+
+declare void @baz(ptr)
+
+!llvm.module.flags = !{!1, !2}
+!1 = !{i32 2, !"stack-protector-guard", !"tls"}
+!2 = !{i32 2, !"stack-protector-guard-offset", i32 500}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
index c480ba8..08e9173 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
@@ -39,6 +39,39 @@ define double @constraint_f_double(double %a) nounwind {
ret double %2
}
+define double @constraint_cf_double(double %a) nounwind {
+; RV32F-LABEL: constraint_cf_double:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw a0, 8(sp)
+; RV32F-NEXT: sw a1, 12(sp)
+; RV32F-NEXT: fld fa5, 8(sp)
+; RV32F-NEXT: lui a0, %hi(gd)
+; RV32F-NEXT: fld fa4, %lo(gd)(a0)
+; RV32F-NEXT: #APP
+; RV32F-NEXT: fadd.d fa5, fa5, fa4
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fsd fa5, 8(sp)
+; RV32F-NEXT: lw a0, 8(sp)
+; RV32F-NEXT: lw a1, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_cf_double:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gd)
+; RV64F-NEXT: fld fa5, %lo(gd)(a1)
+; RV64F-NEXT: fmv.d.x fa4, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: fadd.d fa5, fa4, fa5
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.d a0, fa5
+; RV64F-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm "fadd.d $0, $1, $2", "=^cf,^cf,^cf"(double %a, double %1)
+ ret double %2
+}
+
define double @constraint_f_double_abi_name(double %a) nounwind {
; RV32F-LABEL: constraint_f_double_abi_name:
; RV32F: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll
new file mode 100644
index 0000000..581cf8e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs -no-integrated-as < %s \
+; RUN: | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs -no-integrated-as < %s \
+; RUN: | FileCheck -check-prefix=RV64F %s
+
+;; `.insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is
+;; the raw encoding for `fadd.d`
+
+@gd = external global double
+
+define double @constraint_f_double(double %a) nounwind {
+; RV32F-LABEL: constraint_f_double:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw a0, 8(sp)
+; RV32F-NEXT: sw a1, 12(sp)
+; RV32F-NEXT: fld fa5, 8(sp)
+; RV32F-NEXT: lui a0, %hi(gd)
+; RV32F-NEXT: fld fa4, %lo(gd)(a0)
+; RV32F-NEXT: #APP
+; RV32F-NEXT: .insn 0x4, 0x02000053 | (15 << 7) | (15 << 15) | (14 << 20)
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fsd fa5, 8(sp)
+; RV32F-NEXT: lw a0, 8(sp)
+; RV32F-NEXT: lw a1, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_f_double:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gd)
+; RV64F-NEXT: fld fa5, %lo(gd)(a1)
+; RV64F-NEXT: fmv.d.x fa4, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: .insn 0x4, 0x02000053 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.d a0, fa5
+; RV64F-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm ".insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=f,f,f"(double %a, double %1)
+ ret double %2
+}
+
+define double @constraint_cf_double(double %a) nounwind {
+; RV32F-LABEL: constraint_cf_double:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw a0, 8(sp)
+; RV32F-NEXT: sw a1, 12(sp)
+; RV32F-NEXT: fld fa5, 8(sp)
+; RV32F-NEXT: lui a0, %hi(gd)
+; RV32F-NEXT: fld fa4, %lo(gd)(a0)
+; RV32F-NEXT: #APP
+; RV32F-NEXT: .insn 0x4, 0x02000053 | (15 << 7) | (15 << 15) | (14 << 20)
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fsd fa5, 8(sp)
+; RV32F-NEXT: lw a0, 8(sp)
+; RV32F-NEXT: lw a1, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_cf_double:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gd)
+; RV64F-NEXT: fld fa5, %lo(gd)(a1)
+; RV64F-NEXT: fmv.d.x fa4, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: .insn 0x4, 0x02000053 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.d a0, fa5
+; RV64F-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm ".insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=^cf,^cf,^cf"(double %a, double %1)
+ ret double %2
+}
+
+define double @constraint_f_double_abi_name(double %a) nounwind {
+; RV32F-LABEL: constraint_f_double_abi_name:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw a0, 8(sp)
+; RV32F-NEXT: sw a1, 12(sp)
+; RV32F-NEXT: fld fa1, 8(sp)
+; RV32F-NEXT: lui a0, %hi(gd)
+; RV32F-NEXT: fld fs0, %lo(gd)(a0)
+; RV32F-NEXT: #APP
+; RV32F-NEXT: .insn 0x4, 0x02000053 | (0 << 7) | (11 << 15) | (8 << 20)
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fsd ft0, 8(sp)
+; RV32F-NEXT: lw a0, 8(sp)
+; RV32F-NEXT: lw a1, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_f_double_abi_name:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gd)
+; RV64F-NEXT: fld fs0, %lo(gd)(a1)
+; RV64F-NEXT: fmv.d.x fa1, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: .insn 0x4, 0x02000053 | (0 << 7) | (11 << 15) | (8 << 20)
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.d a0, ft0
+; RV64F-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm ".insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "={ft0},{fa1},{fs0}"(double %a, double %1)
+ ret double %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
index 91922cd..a91c654 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
@@ -1,5 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; NOTE: Assertions gave been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32F %s
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs < %s \
@@ -38,6 +37,33 @@ define float @constraint_f_float(float %a) nounwind {
ret float %2
}
+define float @constraint_cf_float(float %a) nounwind {
+; RV32F-LABEL: constraint_cf_float:
+; RV32F: # %bb.0:
+; RV32F-NEXT: lui a1, %hi(gf)
+; RV32F-NEXT: flw fa5, %lo(gf)(a1)
+; RV32F-NEXT: fmv.w.x fa4, a0
+; RV32F-NEXT: #APP
+; RV32F-NEXT: fadd.s fa5, fa4, fa5
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fmv.x.w a0, fa5
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_cf_float:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gf)
+; RV64F-NEXT: flw fa5, %lo(gf)(a1)
+; RV64F-NEXT: fmv.w.x fa4, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: fadd.s fa5, fa4, fa5
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.w a0, fa5
+; RV64F-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm "fadd.s $0, $1, $2", "=^cf,cf,cf"(float %a, float %1)
+ ret float %2
+}
+
define float @constraint_f_float_abi_name(float %a) nounwind {
; RV32F-LABEL: constraint_f_float_abi_name:
; RV32F: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll
new file mode 100644
index 0000000..a0de5c7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs -no-integrated-as < %s \
+; RUN: | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs -no-integrated-as < %s \
+; RUN: | FileCheck -check-prefix=RV64F %s
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs -no-integrated-as < %s \
+; RUN: | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs -no-integrated-as < %s \
+; RUN: | FileCheck -check-prefix=RV64F %s
+
+;; `.insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is
+;; the raw encoding for `fadd.s`
+
+@gf = external global float
+
+define float @constraint_f_modifier_N_float(float %a) nounwind {
+; RV32F-LABEL: constraint_f_modifier_N_float:
+; RV32F: # %bb.0:
+; RV32F-NEXT: lui a1, %hi(gf)
+; RV32F-NEXT: flw fa5, %lo(gf)(a1)
+; RV32F-NEXT: fmv.w.x fa4, a0
+; RV32F-NEXT: #APP
+; RV32F-NEXT: .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fmv.x.w a0, fa5
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_f_modifier_N_float:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gf)
+; RV64F-NEXT: flw fa5, %lo(gf)(a1)
+; RV64F-NEXT: fmv.w.x fa4, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.w a0, fa5
+; RV64F-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm ".insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=f,f,f"(float %a, float %1)
+ ret float %2
+}
+
+
+define float @constraint_cf_modifier_N_float(float %a) nounwind {
+; RV32F-LABEL: constraint_cf_modifier_N_float:
+; RV32F: # %bb.0:
+; RV32F-NEXT: lui a1, %hi(gf)
+; RV32F-NEXT: flw fa5, %lo(gf)(a1)
+; RV32F-NEXT: fmv.w.x fa4, a0
+; RV32F-NEXT: #APP
+; RV32F-NEXT: .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fmv.x.w a0, fa5
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: constraint_cf_modifier_N_float:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gf)
+; RV64F-NEXT: flw fa5, %lo(gf)(a1)
+; RV64F-NEXT: fmv.w.x fa4, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.w a0, fa5
+; RV64F-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm ".insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=^cf,^cf,^cf"(float %a, float %1)
+ ret float %2
+}
+
+define float @modifier_N_float_abi_name(float %a) nounwind {
+; RV32F-LABEL: modifier_N_float_abi_name:
+; RV32F: # %bb.0:
+; RV32F-NEXT: lui a1, %hi(gf)
+; RV32F-NEXT: flw fs0, %lo(gf)(a1)
+; RV32F-NEXT: fmv.w.x fa0, a0
+; RV32F-NEXT: #APP
+; RV32F-NEXT: .insn 0x4, 0x53 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV32F-NEXT: #NO_APP
+; RV32F-NEXT: fmv.x.w a0, ft0
+; RV32F-NEXT: ret
+;
+; RV64F-LABEL: modifier_N_float_abi_name:
+; RV64F: # %bb.0:
+; RV64F-NEXT: lui a1, %hi(gf)
+; RV64F-NEXT: flw fs0, %lo(gf)(a1)
+; RV64F-NEXT: fmv.w.x fa0, a0
+; RV64F-NEXT: #APP
+; RV64F-NEXT: .insn 0x4, 0x53 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV64F-NEXT: #NO_APP
+; RV64F-NEXT: fmv.x.w a0, ft0
+; RV64F-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm ".insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "={ft0},{fa0},{fs0}"(float %a, float %1)
+ ret float %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll b/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll
index 14b7cb8..deffa17 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll
@@ -31,6 +31,14 @@ define void @constraint_f() nounwind {
ret void
}
+define void @constraint_cf() nounwind {
+; CHECK: error: couldn't allocate input reg for constraint 'cf'
+ tail call void asm "fadd.s fa0, fa0, $0", "^cf"(float 0.0)
+; CHECK: error: couldn't allocate input reg for constraint 'cf'
+ tail call void asm "fadd.d fa0, fa0, $0", "^cf"(double 0.0)
+ ret void
+}
+
define void @constraint_r_fixed_vec() nounwind {
; CHECK: error: couldn't allocate input reg for constraint 'r'
tail call void asm "add a0, a0, $0", "r"(<4 x i32> zeroinitializer)
@@ -42,3 +50,15 @@ define void @constraint_r_scalable_vec() nounwind {
tail call void asm "add a0, a0, $0", "r"(<vscale x 4 x i32> zeroinitializer)
ret void
}
+
+define void @constraint_cr_fixed_vec() nounwind {
+; CHECK: error: couldn't allocate input reg for constraint 'cr'
+ tail call void asm "add a0, a0, $0", "^cr"(<4 x i32> zeroinitializer)
+ ret void
+}
+
+define void @constraint_cr_scalable_vec() nounwind {
+; CHECK: error: couldn't allocate input reg for constraint 'cr'
+ tail call void asm "add a0, a0, $0", "^cr"(<vscale x 4 x i32> zeroinitializer)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
new file mode 100644
index 0000000..15729ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi=ilp32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32FINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64FINX %s
+
+@gd = external global double
+
+define double @constraint_r_double(double %a) nounwind {
+; RV32FINX-LABEL: constraint_r_double:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a2, %hi(gd)
+; RV32FINX-NEXT: lw a3, %lo(gd+4)(a2)
+; RV32FINX-NEXT: lw a2, %lo(gd)(a2)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.d a0, a0, a2
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_r_double:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gd)
+; RV64FINX-NEXT: ld a1, %lo(gd)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.d a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm "fadd.d $0, $1, $2", "=r,r,r"(double %a, double %1)
+ ret double %2
+}
+
+define double @constraint_cr_double(double %a) nounwind {
+; RV32FINX-LABEL: constraint_cr_double:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a2, %hi(gd)
+; RV32FINX-NEXT: lw a3, %lo(gd+4)(a2)
+; RV32FINX-NEXT: lw a2, %lo(gd)(a2)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.d a0, a0, a2
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_cr_double:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gd)
+; RV64FINX-NEXT: ld a1, %lo(gd)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.d a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm "fadd.d $0, $1, $2", "=^cr,^cr,^cr"(double %a, double %1)
+ ret double %2
+}
+
+define double @constraint_double_abi_name(double %a) nounwind {
+; RV32FINX-LABEL: constraint_double_abi_name:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: addi sp, sp, -16
+; RV32FINX-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
+; RV32FINX-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
+; RV32FINX-NEXT: lui a2, %hi(gd)
+; RV32FINX-NEXT: lw s0, %lo(gd)(a2)
+; RV32FINX-NEXT: lw s1, %lo(gd+4)(a2)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.d t1, a0, s0
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: mv a0, t1
+; RV32FINX-NEXT: mv a1, t2
+; RV32FINX-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; RV32FINX-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
+; RV32FINX-NEXT: addi sp, sp, 16
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_double_abi_name:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: addi sp, sp, -16
+; RV64FINX-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
+; RV64FINX-NEXT: lui a1, %hi(gd)
+; RV64FINX-NEXT: ld s0, %lo(gd)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.d t1, a0, s0
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: mv a0, t1
+; RV64FINX-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; RV64FINX-NEXT: addi sp, sp, 16
+; RV64FINX-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm "fadd.d $0, $1, $2", "={t1},{a0},{s0}"(double %a, double %1)
+ ret double %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
index 8caf595..83145ba 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
@@ -51,6 +51,47 @@ define half @constraint_f_half(half %a) nounwind {
ret half %2
}
+define half @constraint_cf_half(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_cf_half:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(gh)
+; RV32ZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV32ZFH-NEXT: #APP
+; RV32ZFH-NEXT: fadd.h fa0, fa0, fa5
+; RV32ZFH-NEXT: #NO_APP
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: constraint_cf_half:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: lui a0, %hi(gh)
+; RV64ZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV64ZFH-NEXT: #APP
+; RV64ZFH-NEXT: fadd.h fa0, fa0, fa5
+; RV64ZFH-NEXT: #NO_APP
+; RV64ZFH-NEXT: ret
+;
+; RV32DZFH-LABEL: constraint_cf_half:
+; RV32DZFH: # %bb.0:
+; RV32DZFH-NEXT: lui a0, %hi(gh)
+; RV32DZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV32DZFH-NEXT: #APP
+; RV32DZFH-NEXT: fadd.h fa0, fa0, fa5
+; RV32DZFH-NEXT: #NO_APP
+; RV32DZFH-NEXT: ret
+;
+; RV64DZFH-LABEL: constraint_cf_half:
+; RV64DZFH: # %bb.0:
+; RV64DZFH-NEXT: lui a0, %hi(gh)
+; RV64DZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV64DZFH-NEXT: #APP
+; RV64DZFH-NEXT: fadd.h fa0, fa0, fa5
+; RV64DZFH-NEXT: #NO_APP
+; RV64DZFH-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm "fadd.h $0, $1, $2", "=^cf,^cf,^cf"(half %a, half %1)
+ ret half %2
+}
+
define half @constraint_f_half_abi_name(half %a) nounwind {
; RV32ZFH-LABEL: constraint_f_half_abi_name:
; RV32ZFH: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll
new file mode 100644
index 0000000..d1eb2a2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll
@@ -0,0 +1,157 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=zfh -verify-machineinstrs -no-integrated-as < %s \
+; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32ZFH %s
+; RUN: llc -mtriple=riscv64 -mattr=zfh -verify-machineinstrs -no-integrated-as < %s \
+; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64ZFH %s
+; RUN: llc -mtriple=riscv32 -mattr=zfh,+d -verify-machineinstrs -no-integrated-as < %s \
+; RUN: -target-abi=ilp32d | FileCheck -check-prefix=RV32DZFH %s
+; RUN: llc -mtriple=riscv64 -mattr=zfh,+d -verify-machineinstrs -no-integrated-as < %s \
+; RUN: -target-abi=lp64d | FileCheck -check-prefix=RV64DZFH %s
+
+;; `.insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is
+;; the raw encoding for `fadd.h`
+
+@gh = external global half
+
+define half @constraint_f_half(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_f_half:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(gh)
+; RV32ZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV32ZFH-NEXT: #APP
+; RV32ZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32ZFH-NEXT: #NO_APP
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: constraint_f_half:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: lui a0, %hi(gh)
+; RV64ZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV64ZFH-NEXT: #APP
+; RV64ZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64ZFH-NEXT: #NO_APP
+; RV64ZFH-NEXT: ret
+;
+; RV32DZFH-LABEL: constraint_f_half:
+; RV32DZFH: # %bb.0:
+; RV32DZFH-NEXT: lui a0, %hi(gh)
+; RV32DZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV32DZFH-NEXT: #APP
+; RV32DZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32DZFH-NEXT: #NO_APP
+; RV32DZFH-NEXT: ret
+;
+; RV64DZFH-LABEL: constraint_f_half:
+; RV64DZFH: # %bb.0:
+; RV64DZFH-NEXT: lui a0, %hi(gh)
+; RV64DZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV64DZFH-NEXT: #APP
+; RV64DZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64DZFH-NEXT: #NO_APP
+; RV64DZFH-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm ".insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=f,f,f"(half %a, half %1)
+ ret half %2
+}
+
+define half @constraint_cf_half(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_cf_half:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(gh)
+; RV32ZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV32ZFH-NEXT: #APP
+; RV32ZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32ZFH-NEXT: #NO_APP
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: constraint_cf_half:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: lui a0, %hi(gh)
+; RV64ZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV64ZFH-NEXT: #APP
+; RV64ZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64ZFH-NEXT: #NO_APP
+; RV64ZFH-NEXT: ret
+;
+; RV32DZFH-LABEL: constraint_cf_half:
+; RV32DZFH: # %bb.0:
+; RV32DZFH-NEXT: lui a0, %hi(gh)
+; RV32DZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV32DZFH-NEXT: #APP
+; RV32DZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32DZFH-NEXT: #NO_APP
+; RV32DZFH-NEXT: ret
+;
+; RV64DZFH-LABEL: constraint_cf_half:
+; RV64DZFH: # %bb.0:
+; RV64DZFH-NEXT: lui a0, %hi(gh)
+; RV64DZFH-NEXT: flh fa5, %lo(gh)(a0)
+; RV64DZFH-NEXT: #APP
+; RV64DZFH-NEXT: .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64DZFH-NEXT: #NO_APP
+; RV64DZFH-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm ".insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=^cf,^cf,^cf"(half %a, half %1)
+ ret half %2
+}
+
+define half @constraint_f_half_abi_name(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_f_half_abi_name:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: addi sp, sp, -16
+; RV32ZFH-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV32ZFH-NEXT: lui a0, %hi(gh)
+; RV32ZFH-NEXT: flh fs0, %lo(gh)(a0)
+; RV32ZFH-NEXT: #APP
+; RV32ZFH-NEXT: .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV32ZFH-NEXT: #NO_APP
+; RV32ZFH-NEXT: fmv.h fa0, ft0
+; RV32ZFH-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
+; RV32ZFH-NEXT: addi sp, sp, 16
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: constraint_f_half_abi_name:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: addi sp, sp, -16
+; RV64ZFH-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64ZFH-NEXT: lui a0, %hi(gh)
+; RV64ZFH-NEXT: flh fs0, %lo(gh)(a0)
+; RV64ZFH-NEXT: #APP
+; RV64ZFH-NEXT: .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV64ZFH-NEXT: #NO_APP
+; RV64ZFH-NEXT: fmv.h fa0, ft0
+; RV64ZFH-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
+; RV64ZFH-NEXT: addi sp, sp, 16
+; RV64ZFH-NEXT: ret
+;
+; RV32DZFH-LABEL: constraint_f_half_abi_name:
+; RV32DZFH: # %bb.0:
+; RV32DZFH-NEXT: addi sp, sp, -16
+; RV32DZFH-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
+; RV32DZFH-NEXT: lui a0, %hi(gh)
+; RV32DZFH-NEXT: flh fs0, %lo(gh)(a0)
+; RV32DZFH-NEXT: #APP
+; RV32DZFH-NEXT: .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV32DZFH-NEXT: #NO_APP
+; RV32DZFH-NEXT: fmv.h fa0, ft0
+; RV32DZFH-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
+; RV32DZFH-NEXT: addi sp, sp, 16
+; RV32DZFH-NEXT: ret
+;
+; RV64DZFH-LABEL: constraint_f_half_abi_name:
+; RV64DZFH: # %bb.0:
+; RV64DZFH-NEXT: addi sp, sp, -16
+; RV64DZFH-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
+; RV64DZFH-NEXT: lui a0, %hi(gh)
+; RV64DZFH-NEXT: flh fs0, %lo(gh)(a0)
+; RV64DZFH-NEXT: #APP
+; RV64DZFH-NEXT: .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV64DZFH-NEXT: #NO_APP
+; RV64DZFH-NEXT: fmv.h fa0, ft0
+; RV64DZFH-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
+; RV64DZFH-NEXT: addi sp, sp, 16
+; RV64DZFH-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm ".insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "={ft0},{fa0},{fs0}"(half %a, half %1)
+ ret half %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
new file mode 100644
index 0000000..a8d3515
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -target-abi=ilp32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32FINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64FINX %s
+
+@gf = external global float
+
+define float @constraint_r_float(float %a) nounwind {
+; RV32FINX-LABEL: constraint_r_float:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a1, %hi(gf)
+; RV32FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.s a0, a0, a1
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_r_float:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gf)
+; RV64FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.s a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm "fadd.s $0, $1, $2", "=r,r,r"(float %a, float %1)
+ ret float %2
+}
+
+define float @constraint_cr_float(float %a) nounwind {
+; RV32FINX-LABEL: constraint_cr_float:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a1, %hi(gf)
+; RV32FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.s a0, a0, a1
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_cr_float:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gf)
+; RV64FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.s a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm "fadd.s $0, $1, $2", "=^cr,cr,cr"(float %a, float %1)
+ ret float %2
+}
+
+define float @constraint_float_abi_name(float %a) nounwind {
+; RV32FINX-LABEL: constraint_float_abi_name:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: addi sp, sp, -16
+; RV32FINX-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
+; RV32FINX-NEXT: lui a1, %hi(gf)
+; RV32FINX-NEXT: lw s0, %lo(gf)(a1)
+; RV32FINX-NEXT: # kill: def $x10_w killed $x10_w def $x10
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.s t0, a0, s0
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: mv a0, t0
+; RV32FINX-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; RV32FINX-NEXT: addi sp, sp, 16
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_float_abi_name:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: addi sp, sp, -16
+; RV64FINX-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
+; RV64FINX-NEXT: lui a1, %hi(gf)
+; RV64FINX-NEXT: lw s0, %lo(gf)(a1)
+; RV64FINX-NEXT: # kill: def $x10_w killed $x10_w def $x10
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.s t0, a0, s0
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: mv a0, t0
+; RV64FINX-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; RV64FINX-NEXT: addi sp, sp, 16
+; RV64FINX-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm "fadd.s $0, $1, $2", "={t0},{a0},{s0}"(float %a, float %1)
+ ret float %2
+}
+
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
new file mode 100644
index 0000000..f9707c6c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
@@ -0,0 +1,158 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32ZHINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64ZHINX %s
+; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+zhinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32DINXZHINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64DINXZHINX %s
+
+@gh = external global half
+
+define half @constraint_r_half(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_r_half:
+; RV32ZHINX: # %bb.0:
+; RV32ZHINX-NEXT: lui a1, %hi(gh)
+; RV32ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32ZHINX-NEXT: #APP
+; RV32ZHINX-NEXT: fadd.h a0, a0, a1
+; RV32ZHINX-NEXT: #NO_APP
+; RV32ZHINX-NEXT: ret
+;
+; RV64ZHINX-LABEL: constraint_r_half:
+; RV64ZHINX: # %bb.0:
+; RV64ZHINX-NEXT: lui a1, %hi(gh)
+; RV64ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64ZHINX-NEXT: #APP
+; RV64ZHINX-NEXT: fadd.h a0, a0, a1
+; RV64ZHINX-NEXT: #NO_APP
+; RV64ZHINX-NEXT: ret
+;
+; RV32DINXZHINX-LABEL: constraint_r_half:
+; RV32DINXZHINX: # %bb.0:
+; RV32DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT: #APP
+; RV32DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV32DINXZHINX-NEXT: #NO_APP
+; RV32DINXZHINX-NEXT: ret
+;
+; RV64DINXZHINX-LABEL: constraint_r_half:
+; RV64DINXZHINX: # %bb.0:
+; RV64DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT: #APP
+; RV64DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV64DINXZHINX-NEXT: #NO_APP
+; RV64DINXZHINX-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm "fadd.h $0, $1, $2", "=r,r,r"(half %a, half %1)
+ ret half %2
+}
+
+define half @constraint_cr_half(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_cr_half:
+; RV32ZHINX: # %bb.0:
+; RV32ZHINX-NEXT: lui a1, %hi(gh)
+; RV32ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32ZHINX-NEXT: #APP
+; RV32ZHINX-NEXT: fadd.h a0, a0, a1
+; RV32ZHINX-NEXT: #NO_APP
+; RV32ZHINX-NEXT: ret
+;
+; RV64ZHINX-LABEL: constraint_cr_half:
+; RV64ZHINX: # %bb.0:
+; RV64ZHINX-NEXT: lui a1, %hi(gh)
+; RV64ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64ZHINX-NEXT: #APP
+; RV64ZHINX-NEXT: fadd.h a0, a0, a1
+; RV64ZHINX-NEXT: #NO_APP
+; RV64ZHINX-NEXT: ret
+;
+; RV32DINXZHINX-LABEL: constraint_cr_half:
+; RV32DINXZHINX: # %bb.0:
+; RV32DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT: #APP
+; RV32DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV32DINXZHINX-NEXT: #NO_APP
+; RV32DINXZHINX-NEXT: ret
+;
+; RV64DINXZHINX-LABEL: constraint_cr_half:
+; RV64DINXZHINX: # %bb.0:
+; RV64DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT: #APP
+; RV64DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV64DINXZHINX-NEXT: #NO_APP
+; RV64DINXZHINX-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm "fadd.h $0, $1, $2", "=^cr,^cr,^cr"(half %a, half %1)
+ ret half %2
+}
+
+define half @constraint_half_abi_name(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_half_abi_name:
+; RV32ZHINX: # %bb.0:
+; RV32ZHINX-NEXT: addi sp, sp, -16
+; RV32ZHINX-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZHINX-NEXT: lui a1, %hi(gh)
+; RV32ZHINX-NEXT: lh s0, %lo(gh)(a1)
+; RV32ZHINX-NEXT: # kill: def $x10_h killed $x10_h def $x10
+; RV32ZHINX-NEXT: #APP
+; RV32ZHINX-NEXT: fadd.s t0, a0, s0
+; RV32ZHINX-NEXT: #NO_APP
+; RV32ZHINX-NEXT: mv a0, t0
+; RV32ZHINX-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZHINX-NEXT: addi sp, sp, 16
+; RV32ZHINX-NEXT: ret
+;
+; RV64ZHINX-LABEL: constraint_half_abi_name:
+; RV64ZHINX: # %bb.0:
+; RV64ZHINX-NEXT: addi sp, sp, -16
+; RV64ZHINX-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
+; RV64ZHINX-NEXT: lui a1, %hi(gh)
+; RV64ZHINX-NEXT: lh s0, %lo(gh)(a1)
+; RV64ZHINX-NEXT: # kill: def $x10_h killed $x10_h def $x10
+; RV64ZHINX-NEXT: #APP
+; RV64ZHINX-NEXT: fadd.s t0, a0, s0
+; RV64ZHINX-NEXT: #NO_APP
+; RV64ZHINX-NEXT: mv a0, t0
+; RV64ZHINX-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; RV64ZHINX-NEXT: addi sp, sp, 16
+; RV64ZHINX-NEXT: ret
+;
+; RV32DINXZHINX-LABEL: constraint_half_abi_name:
+; RV32DINXZHINX: # %bb.0:
+; RV32DINXZHINX-NEXT: addi sp, sp, -16
+; RV32DINXZHINX-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
+; RV32DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT: lh s0, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT: # kill: def $x10_h killed $x10_h def $x10
+; RV32DINXZHINX-NEXT: #APP
+; RV32DINXZHINX-NEXT: fadd.s t0, a0, s0
+; RV32DINXZHINX-NEXT: #NO_APP
+; RV32DINXZHINX-NEXT: mv a0, t0
+; RV32DINXZHINX-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; RV32DINXZHINX-NEXT: addi sp, sp, 16
+; RV32DINXZHINX-NEXT: ret
+;
+; RV64DINXZHINX-LABEL: constraint_half_abi_name:
+; RV64DINXZHINX: # %bb.0:
+; RV64DINXZHINX-NEXT: addi sp, sp, -16
+; RV64DINXZHINX-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
+; RV64DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT: lh s0, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT: # kill: def $x10_h killed $x10_h def $x10
+; RV64DINXZHINX-NEXT: #APP
+; RV64DINXZHINX-NEXT: fadd.s t0, a0, s0
+; RV64DINXZHINX-NEXT: #NO_APP
+; RV64DINXZHINX-NEXT: mv a0, t0
+; RV64DINXZHINX-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; RV64DINXZHINX-NEXT: addi sp, sp, 16
+; RV64DINXZHINX-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm "fadd.s $0, $1, $2", "={t0},{a0},{s0}"(half %a, half %1)
+ ret half %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm.ll b/llvm/test/CodeGen/RISCV/inline-asm.ll
index cb240d2..7926674 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm.ll
@@ -56,6 +56,29 @@ define i32 @constraint_r_zero(i32 %a) nounwind {
ret i32 %2
}
+define i32 @constraint_cr(i32 %a) nounwind {
+; RV32I-LABEL: constraint_cr:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a1, %hi(gi)
+; RV32I-NEXT: lw a1, %lo(gi)(a1)
+; RV32I-NEXT: #APP
+; RV32I-NEXT: c.add a0, a0, a1
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: constraint_cr:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, %hi(gi)
+; RV64I-NEXT: lw a1, %lo(gi)(a1)
+; RV64I-NEXT: #APP
+; RV64I-NEXT: c.add a0, a0, a1
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: ret
+ %1 = load i32, ptr @gi
+ %2 = tail call i32 asm "c.add $0, $1, $2", "=^cr,0,^cr"(i32 %a, i32 %1)
+ ret i32 %2
+}
+
define i32 @constraint_i(i32 %a) nounwind {
; RV32I-LABEL: constraint_i:
; RV32I: # %bb.0:
@@ -215,6 +238,49 @@ define i32 @modifier_i_reg(i32 %a, i32 %b) nounwind {
ret i32 %1
}
+;; `.insn 0x4, 0x33 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is the
+;; raw encoding of `add`
+
+define i32 @modifier_N_reg(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: modifier_N_reg:
+; RV32I: # %bb.0:
+; RV32I-NEXT: #APP
+; RV32I-NEXT: .insn 0x4, 0x33 | (10 << 7) | (10 << 15) | (11 << 20)
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: modifier_N_reg:
+; RV64I: # %bb.0:
+; RV64I-NEXT: #APP
+; RV64I-NEXT: .insn 0x4, 0x33 | (10 << 7) | (10 << 15) | (11 << 20)
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: ret
+ %1 = tail call i32 asm ".insn 0x4, 0x33 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=r,r,r"(i32 %a, i32 %b)
+ ret i32 %1
+}
+
+;; `.insn 0x2, 0x9422 | (${0:N} << 7) | (${2:N} << 2)` is the raw encoding of
+;; `c.add` (note the constraint that the first input should be the same as the
+;; output).
+
+define i32 @modifier_N_with_cr_reg(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: modifier_N_with_cr_reg:
+; RV32I: # %bb.0:
+; RV32I-NEXT: #APP
+; RV32I-NEXT: .insn 0x2, 0x9422 | (10 << 7) | (11 << 2)
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: modifier_N_with_cr_reg:
+; RV64I: # %bb.0:
+; RV64I-NEXT: #APP
+; RV64I-NEXT: .insn 0x2, 0x9422 | (10 << 7) | (11 << 2)
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: ret
+ %1 = tail call i32 asm ".insn 0x2, 0x9422 | (${0:N} << 7) | (${2:N} << 2)", "=^cr,0,^cr"(i32 %a, i32 %b)
+ ret i32 %1
+}
+
define void @operand_global() nounwind {
; RV32I-LABEL: operand_global:
; RV32I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/RISCV/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..de87b10
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple riscv32 | FileCheck %s -check-prefixes=CHECK-32
+; RUN: llc < %s -mtriple riscv64 | FileCheck %s -check-prefixes=CHECK-64
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-LABEL: naked:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: call main
+;
+; CHECK-64-LABEL: naked:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: call main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-LABEL: normal:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: addi sp, sp, -16
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK-32-NEXT: .cfi_offset ra, -4
+; CHECK-32-NEXT: .cfi_offset s0, -8
+; CHECK-32-NEXT: addi s0, sp, 16
+; CHECK-32-NEXT: .cfi_def_cfa s0, 0
+; CHECK-32-NEXT: call main
+;
+; CHECK-64-LABEL: normal:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: addi sp, sp, -16
+; CHECK-64-NEXT: .cfi_def_cfa_offset 16
+; CHECK-64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-64-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; CHECK-64-NEXT: .cfi_offset ra, -8
+; CHECK-64-NEXT: .cfi_offset s0, -16
+; CHECK-64-NEXT: addi s0, sp, 16
+; CHECK-64-NEXT: .cfi_def_cfa s0, 0
+; CHECK-64-NEXT: call main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
new file mode 100644
index 0000000..ba18406
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64 %s
+; RUN: llc -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64-LINUX %s
+
+declare void @llvm.init.trampoline(ptr, ptr, ptr)
+declare ptr @llvm.adjust.trampoline(ptr)
+declare i64 @f(ptr nest, i64)
+
+define i64 @test0(i64 %n, ptr %p) nounwind {
+; RV64-LABEL: test0:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: lui a0, %hi(f)
+; RV64-NEXT: addi a0, a0, %lo(f)
+; RV64-NEXT: sd a0, 32(sp)
+; RV64-NEXT: li a0, 919
+; RV64-NEXT: lui a2, %hi(.LCPI0_0)
+; RV64-NEXT: ld a2, %lo(.LCPI0_0)(a2)
+; RV64-NEXT: lui a3, 6203
+; RV64-NEXT: addi a3, a3, 643
+; RV64-NEXT: sw a0, 8(sp)
+; RV64-NEXT: sw a3, 12(sp)
+; RV64-NEXT: sd a2, 16(sp)
+; RV64-NEXT: sd a1, 24(sp)
+; RV64-NEXT: addi a1, sp, 24
+; RV64-NEXT: addi a0, sp, 8
+; RV64-NEXT: addi s1, sp, 8
+; RV64-NEXT: call __clear_cache
+; RV64-NEXT: mv a0, s0
+; RV64-NEXT: jalr s1
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+;
+; RV64-LINUX-LABEL: test0:
+; RV64-LINUX: # %bb.0:
+; RV64-LINUX-NEXT: addi sp, sp, -64
+; RV64-LINUX-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-LINUX-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-LINUX-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-LINUX-NEXT: mv s0, a0
+; RV64-LINUX-NEXT: lui a0, %hi(f)
+; RV64-LINUX-NEXT: addi a0, a0, %lo(f)
+; RV64-LINUX-NEXT: sd a0, 32(sp)
+; RV64-LINUX-NEXT: li a0, 919
+; RV64-LINUX-NEXT: lui a2, %hi(.LCPI0_0)
+; RV64-LINUX-NEXT: ld a2, %lo(.LCPI0_0)(a2)
+; RV64-LINUX-NEXT: lui a3, 6203
+; RV64-LINUX-NEXT: addi a3, a3, 643
+; RV64-LINUX-NEXT: sw a0, 8(sp)
+; RV64-LINUX-NEXT: sw a3, 12(sp)
+; RV64-LINUX-NEXT: sd a2, 16(sp)
+; RV64-LINUX-NEXT: sd a1, 24(sp)
+; RV64-LINUX-NEXT: addi a1, sp, 24
+; RV64-LINUX-NEXT: addi a0, sp, 8
+; RV64-LINUX-NEXT: addi s1, sp, 8
+; RV64-LINUX-NEXT: li a2, 0
+; RV64-LINUX-NEXT: call __riscv_flush_icache
+; RV64-LINUX-NEXT: mv a0, s0
+; RV64-LINUX-NEXT: jalr s1
+; RV64-LINUX-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-LINUX-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-LINUX-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-LINUX-NEXT: addi sp, sp, 64
+; RV64-LINUX-NEXT: ret
+ %alloca = alloca [32 x i8], align 8
+ call void @llvm.init.trampoline(ptr %alloca, ptr @f, ptr %p)
+ %tramp = call ptr @llvm.adjust.trampoline(ptr %alloca)
+ %ret = call i64 %tramp(i64 %n)
+ ret i64 %ret
+
+}
diff --git a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
index 95695aa..18bd41a 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
@@ -1,6 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
; RUN: -target-abi=ilp32 -mattr=+zhinx | FileCheck %s
+
+;; These tests cover the use of `r` and `cr` constraints for floating point values on rv32.
+;;
+;; In particular, there is significant complexity around using paired GPRs for double values on rv32.
+
define dso_local void @zdinx_asm(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
; CHECK-LABEL: zdinx_asm:
; CHECK: # %bb.0: # %entry
@@ -50,3 +55,59 @@ entry:
store half %0, ptr %arrayidx, align 8
ret void
}
+
+define dso_local void @zdinx_asm_cr(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
+; CHECK-LABEL: zdinx_asm_cr:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: mv s1, a2
+; CHECK-NEXT: mv a4, a3
+; CHECK-NEXT: mv s0, a1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: fsgnjx.d a2, s0, a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: sw a2, 8(a0)
+; CHECK-NEXT: sw a3, 12(a0)
+; CHECK-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+entry:
+ %arrayidx = getelementptr inbounds double, ptr %a, i32 1
+ %0 = tail call double asm "fsgnjx.d $0, $1, $2", "=^cr,^cr,^cr"(double %b, double %c)
+ store double %0, ptr %arrayidx, align 8
+ ret void
+}
+
+define dso_local void @zfinx_asm_cr(ptr nocapture noundef writeonly %a, float noundef %b, float noundef %c) nounwind {
+; CHECK-LABEL: zfinx_asm_cr:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: fsgnjx.s a1, a1, a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: sw a1, 4(a0)
+; CHECK-NEXT: ret
+entry:
+ %arrayidx = getelementptr inbounds float, ptr %a, i32 1
+ %0 = tail call float asm "fsgnjx.s $0, $1, $2", "=^cr,^cr,^cr"(float %b, float %c)
+ store float %0, ptr %arrayidx, align 8
+ ret void
+}
+
+define dso_local void @zhinx_asm_cr(ptr nocapture noundef writeonly %a, half noundef %b, half noundef %c) nounwind {
+; CHECK-LABEL: zhinx_asm_cr:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: fsgnjx.h a1, a1, a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: sh a1, 2(a0)
+; CHECK-NEXT: ret
+entry:
+ %arrayidx = getelementptr inbounds half, ptr %a, i32 1
+ %0 = tail call half asm "fsgnjx.h $0, $1, $2", "=^cr,^cr,^cr"(half %b, half %c)
+ store half %0, ptr %arrayidx, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPARC/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/SPARC/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..af97c573
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple sparc | FileCheck %s -check-prefixes=CHECK-32
+; RUN: llc < %s -mtriple sparc64 | FileCheck %s -check-prefixes=CHECK-64
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-LABEL: naked:
+; CHECK-32: .cfi_startproc
+; CHECK-32-NEXT: ! %bb.0:
+; CHECK-32-NEXT: call main
+; CHECK-32-NEXT: nop
+;
+; CHECK-64-LABEL: naked:
+; CHECK-64: .cfi_startproc
+; CHECK-64-NEXT: ! %bb.0:
+; CHECK-64-NEXT: call main
+; CHECK-64-NEXT: nop
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-LABEL: normal:
+; CHECK-32: .cfi_startproc
+; CHECK-32-NEXT: ! %bb.0:
+; CHECK-32-NEXT: save %sp, -96, %sp
+; CHECK-32-NEXT: .cfi_def_cfa_register %fp
+; CHECK-32-NEXT: .cfi_window_save
+; CHECK-32-NEXT: .cfi_register %o7, %i7
+; CHECK-32-NEXT: call main
+; CHECK-32-NEXT: nop
+;
+; CHECK-64-LABEL: normal:
+; CHECK-64: .cfi_startproc
+; CHECK-64-NEXT: ! %bb.0:
+; CHECK-64-NEXT: save %sp, -176, %sp
+; CHECK-64-NEXT: .cfi_def_cfa_register %fp
+; CHECK-64-NEXT: .cfi_window_save
+; CHECK-64-NEXT: .cfi_register %o7, %i7
+; CHECK-64-NEXT: call main
+; CHECK-64-NEXT: nop
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/SystemZ/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/SystemZ/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..3eb396e
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple s390x | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: brasl %r14, main@PLT
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r11, %r15, 88(%r15)
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: aghi %r15, -160
+; CHECK-NEXT: .cfi_def_cfa_offset 320
+; CHECK-NEXT: lgr %r11, %r15
+; CHECK-NEXT: .cfi_def_cfa_register %r11
+; CHECK-NEXT: brasl %r14, main@PLT
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-reduce-add-01.ll b/llvm/test/CodeGen/SystemZ/vec-reduce-add-01.ll
index 56b151d..2762b8d 100644
--- a/llvm/test/CodeGen/SystemZ/vec-reduce-add-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-reduce-add-01.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; Test vector add reduction instrinsic
+; Test vector add reduction intrinsic
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z16 | FileCheck %s
diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-frame-chain.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-frame-chain.ll
deleted file mode 100644
index 8bcf871..0000000
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-frame-chain.ll
+++ /dev/null
@@ -1,150 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=thumbv8.1m.main-none-eabi < %s --force-dwarf-frame-section -frame-pointer=all -mattr=+aapcs-frame-chain | FileCheck %s
-
-; int test1() {
-; return 0;
-; }
-define i32 @test1() "sign-return-address"="non-leaf" {
-; CHECK-LABEL: test1:
-; CHECK: .cfi_sections .debug_frame
-; CHECK-NEXT: .cfi_startproc
-; CHECK-NEXT: @ %bb.0: @ %entry
-; CHECK-NEXT: pac r12, lr, sp
-; CHECK-NEXT: .save {ra_auth_code}
-; CHECK-NEXT: str r12, [sp, #-4]!
-; CHECK-NEXT: .cfi_def_cfa_offset 4
-; CHECK-NEXT: .cfi_offset ra_auth_code, -4
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push.w {r11, lr}
-; CHECK-NEXT: .cfi_def_cfa_offset 12
-; CHECK-NEXT: .cfi_offset lr, -8
-; CHECK-NEXT: .cfi_offset r11, -12
-; CHECK-NEXT: .setfp r11, sp
-; CHECK-NEXT: mov r11, sp
-; CHECK-NEXT: .cfi_def_cfa_register r11
-; CHECK-NEXT: movs r0, #0
-; CHECK-NEXT: pop.w {r11, lr}
-; CHECK-NEXT: ldr r12, [sp], #4
-; CHECK-NEXT: aut r12, lr, sp
-; CHECK-NEXT: bx lr
-entry:
- ret i32 0
-}
-
-; void foo(int n) {
-; int a[n];
-; bar(a);
-; }
-define dso_local void @test2(i32 noundef %n) "sign-return-address"="non-leaf" {
-; CHECK-LABEL: test2:
-; CHECK: .cfi_startproc
-; CHECK-NEXT: @ %bb.0: @ %entry
-; CHECK-NEXT: pac r12, lr, sp
-; CHECK-NEXT: .save {r4, r7, ra_auth_code}
-; CHECK-NEXT: push.w {r4, r7, r12}
-; CHECK-NEXT: .cfi_def_cfa_offset 12
-; CHECK-NEXT: .cfi_offset ra_auth_code, -4
-; CHECK-NEXT: .cfi_offset r7, -8
-; CHECK-NEXT: .cfi_offset r4, -12
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push.w {r11, lr}
-; CHECK-NEXT: .cfi_def_cfa_offset 20
-; CHECK-NEXT: .cfi_offset lr, -16
-; CHECK-NEXT: .cfi_offset r11, -20
-; CHECK-NEXT: .setfp r11, sp
-; CHECK-NEXT: mov r11, sp
-; CHECK-NEXT: .cfi_def_cfa_register r11
-; CHECK-NEXT: .pad #4
-; CHECK-NEXT: sub sp, #4
-; CHECK-NEXT: movs r1, #7
-; CHECK-NEXT: add.w r0, r1, r0, lsl #2
-; CHECK-NEXT: bic r0, r0, #7
-; CHECK-NEXT: sub.w r0, sp, r0
-; CHECK-NEXT: mov sp, r0
-; CHECK-NEXT: bl take_ptr
-; CHECK-NEXT: mov sp, r11
-; CHECK-NEXT: pop.w {r11, lr}
-; CHECK-NEXT: pop.w {r4, r7, r12}
-; CHECK-NEXT: aut r12, lr, sp
-; CHECK-NEXT: bx lr
-entry:
- %vla = alloca i32, i32 %n, align 4
- call void @take_ptr(ptr noundef nonnull %vla)
- ret void
-}
-
-; void test3(int c, float e, int z) {
-; if (c)
-; knr();
-; take_ptr(alloca(z));
-; if (e)
-; knr();
-; }
-define void @test3(i32 noundef %c, float noundef %e, i32 noundef %z) "sign-return-address"="non-leaf" {
-; CHECK-LABEL: test3:
-; CHECK: .cfi_startproc
-; CHECK-NEXT: @ %bb.0: @ %entry
-; CHECK-NEXT: pac r12, lr, sp
-; CHECK-NEXT: .save {r4, r5, r6, r7, ra_auth_code}
-; CHECK-NEXT: push.w {r4, r5, r6, r7, r12}
-; CHECK-NEXT: .cfi_def_cfa_offset 20
-; CHECK-NEXT: .cfi_offset ra_auth_code, -4
-; CHECK-NEXT: .cfi_offset r7, -8
-; CHECK-NEXT: .cfi_offset r6, -12
-; CHECK-NEXT: .cfi_offset r5, -16
-; CHECK-NEXT: .cfi_offset r4, -20
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push.w {r11, lr}
-; CHECK-NEXT: .cfi_def_cfa_offset 28
-; CHECK-NEXT: .cfi_offset lr, -24
-; CHECK-NEXT: .cfi_offset r11, -28
-; CHECK-NEXT: .setfp r11, sp
-; CHECK-NEXT: mov r11, sp
-; CHECK-NEXT: .cfi_def_cfa_register r11
-; CHECK-NEXT: .pad #4
-; CHECK-NEXT: sub sp, #4
-; CHECK-NEXT: cmp r0, #0
-; CHECK-NEXT: mov r5, r2
-; CHECK-NEXT: mov r4, r1
-; CHECK-NEXT: it ne
-; CHECK-NEXT: blne knr
-; CHECK-NEXT: adds r0, r5, #7
-; CHECK-NEXT: bic r0, r0, #7
-; CHECK-NEXT: sub.w r0, sp, r0
-; CHECK-NEXT: mov sp, r0
-; CHECK-NEXT: bl take_ptr
-; CHECK-NEXT: mov r0, r4
-; CHECK-NEXT: movs r1, #0
-; CHECK-NEXT: bl __aeabi_fcmpeq
-; CHECK-NEXT: cmp r0, #0
-; CHECK-NEXT: it eq
-; CHECK-NEXT: bleq knr
-; CHECK-NEXT: mov sp, r11
-; CHECK-NEXT: pop.w {r11, lr}
-; CHECK-NEXT: pop.w {r4, r5, r6, r7, r12}
-; CHECK-NEXT: aut r12, lr, sp
-; CHECK-NEXT: bx lr
-entry:
- %tobool.not = icmp eq i32 %c, 0
- br i1 %tobool.not, label %if.end, label %if.then
-
-if.then: ; preds = %entry
- tail call void @knr()
- br label %if.end
-
-if.end: ; preds = %if.then, %entry
- %0 = alloca i8, i32 %z, align 8
- call void @take_ptr(ptr noundef nonnull %0)
- %tobool1 = fcmp une float %e, 0.000000e+00
- br i1 %tobool1, label %if.then2, label %if.end3
-
-if.then2: ; preds = %if.end
- call void @knr()
- br label %if.end3
-
-if.end3: ; preds = %if.then2, %if.end
- ret void
-}
-
-declare void @knr(...)
-declare void @take_ptr(ptr noundef)
diff --git a/llvm/test/CodeGen/VE/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/VE/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..3b88bea
--- /dev/null
+++ b/llvm/test/CodeGen/VE/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple ve | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lea %s0, main@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s12, main@hi(, %s0)
+; CHECK-NEXT: bsic %s10, (, %s12)
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: or %s9, 0, %s11
+; CHECK-NEXT: lea %s11, -240(, %s11)
+; CHECK-NEXT: brge.l.t %s11, %s8, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: ld %s61, 24(, %s14)
+; CHECK-NEXT: or %s62, 0, %s0
+; CHECK-NEXT: lea %s63, 315
+; CHECK-NEXT: shm.l %s63, (%s61)
+; CHECK-NEXT: shm.l %s8, 8(%s61)
+; CHECK-NEXT: shm.l %s11, 16(%s61)
+; CHECK-NEXT: monc
+; CHECK-NEXT: or %s0, 0, %s62
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: lea %s0, main@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s12, main@hi(, %s0)
+; CHECK-NEXT: bsic %s10, (, %s12)
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/WebAssembly/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/WebAssembly/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..fcd42e8
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple wasm32 | FileCheck %s -check-prefixes=CHECK-32
+; RUN: llc < %s -mtriple wasm64 | FileCheck %s -check-prefixes=CHECK-64
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-LABEL: naked:
+; CHECK-32: .functype naked () -> ()
+; CHECK-32-NEXT: # %bb.0:
+; CHECK-32-NEXT: call main
+; CHECK-32-NEXT: unreachable
+;
+; CHECK-64-LABEL: naked:
+; CHECK-64: .functype naked () -> ()
+; CHECK-64-NEXT: # %bb.0:
+; CHECK-64-NEXT: call main
+; CHECK-64-NEXT: unreachable
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-LABEL: normal:
+; CHECK-32: .functype normal () -> ()
+; CHECK-32-NEXT: # %bb.0:
+; CHECK-32-NEXT: call main
+; CHECK-32-NEXT: unreachable
+;
+; CHECK-64-LABEL: normal:
+; CHECK-64: .functype normal () -> ()
+; CHECK-64-NEXT: # %bb.0:
+; CHECK-64-NEXT: call main
+; CHECK-64-NEXT: unreachable
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/X86/andnot-patterns.ll b/llvm/test/CodeGen/X86/andnot-patterns.ll
index 46ebe6b..101e4ed 100644
--- a/llvm/test/CodeGen/X86/andnot-patterns.ll
+++ b/llvm/test/CodeGen/X86/andnot-patterns.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mattr=+bmi | FileCheck %s --check-prefixes=X86
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+bmi | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=i686-- -mattr=-bmi | FileCheck %s --check-prefixes=X86,X86-NOBMI
+; RUN: llc < %s -mtriple=i686-- -mattr=+bmi | FileCheck %s --check-prefixes=X86,X86-BMI
+; RUN: llc < %s -mtriple=x86_64-- -mattr=-bmi | FileCheck %s --check-prefixes=X64,X64-NOBMI
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+bmi | FileCheck %s --check-prefixes=X64,X64-BMI
; TODO - PR112425 - attempt to reconstruct andnot patterns through bitwise-agnostic operations
@@ -624,3 +626,8 @@ define i8 @andnot_bitreverse_i8(i8 %a0, i8 %a1) nounwind {
%and = and i8 %bitrev, %a0
ret i8 %and
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64-BMI: {{.*}}
+; X64-NOBMI: {{.*}}
+; X86-BMI: {{.*}}
+; X86-NOBMI: {{.*}}
diff --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll
index 90733df..44ab33a 100644
--- a/llvm/test/CodeGen/X86/avx2-arith.ll
+++ b/llvm/test/CodeGen/X86/avx2-arith.ll
@@ -122,7 +122,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; CHECK-LABEL: mul_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; CHECK-NEXT: vpand %ymm1, %ymm2, %ymm3
+; CHECK-NEXT: vpand %ymm2, %ymm1, %ymm3
; CHECK-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3
; CHECK-NEXT: vpand %ymm2, %ymm3, %ymm3
; CHECK-NEXT: vpandn %ymm1, %ymm2, %ymm1
diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll
index 7eee418..c982884 100644
--- a/llvm/test/CodeGen/X86/combine-sra.ll
+++ b/llvm/test/CodeGen/X86/combine-sra.ll
@@ -725,12 +725,11 @@ define <4 x i64> @combine_vec4i64_ashr_clamped(<4 x i64> %x, <4 x i64> %y) {
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm6
+; SSE41-NEXT: pxor %xmm7, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259519,9223372039002259519]
-; SSE41-NEXT: movdqa %xmm8, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483711,2147483711,2147483711,2147483711]
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
diff --git a/llvm/test/CodeGen/X86/fma.ll b/llvm/test/CodeGen/X86/fma.ll
index c55f50e..f26960b 100644
--- a/llvm/test/CodeGen/X86/fma.ll
+++ b/llvm/test/CodeGen/X86/fma.ll
@@ -2096,6 +2096,142 @@ entry:
ret <8 x double> %call
}
+define float @constant_fold_f32() {
+; FMA32-LABEL: constant_fold_f32:
+; FMA32: ## %bb.0:
+; FMA32-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}} ## encoding: [0xd9,0x05,A,A,A,A]
+; FMA32-NEXT: ## fixup A - offset: 2, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: constant_fold_f32:
+; FMACALL32: ## %bb.0:
+; FMACALL32-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}} ## encoding: [0xd9,0x05,A,A,A,A]
+; FMACALL32-NEXT: ## fixup A - offset: 2, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; FMACALL32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: constant_fold_f32:
+; FMA64: ## %bb.0:
+; FMA64-NEXT: vmovss {{.*#+}} xmm0 = [1.02E+3,0.0E+0,0.0E+0,0.0E+0]
+; FMA64-NEXT: ## encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; FMA64-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: constant_fold_f32:
+; FMACALL64: ## %bb.0:
+; FMACALL64-NEXT: movss {{.*#+}} xmm0 = [1.02E+3,0.0E+0,0.0E+0,0.0E+0]
+; FMACALL64-NEXT: ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
+; FMACALL64-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; FMACALL64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: constant_fold_f32:
+; AVX512: ## %bb.0:
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = [1.02E+3,0.0E+0,0.0E+0,0.0E+0]
+; AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: constant_fold_f32:
+; AVX512VL: ## %bb.0:
+; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = [1.02E+3,0.0E+0,0.0E+0,0.0E+0]
+; AVX512VL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+ %r = call float @llvm.fma.f32(float 5.000000e+01, float 2.000000e+01, float 2.000000e+01)
+ ret float %r
+}
+
+define <4 x float> @constant_fold_v4f32() {
+; FMA32-LABEL: constant_fold_v4f32:
+; FMA32: ## %bb.0:
+; FMA32-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,4.9E+2,1.18E+3,2.07E+3]
+; FMA32-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; FMA32-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: constant_fold_v4f32:
+; FMACALL32: ## %bb.0:
+; FMACALL32-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,4.9E+2,1.18E+3,2.07E+3]
+; FMACALL32-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; FMACALL32-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; FMACALL32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: constant_fold_v4f32:
+; FMA64: ## %bb.0:
+; FMA64-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,4.9E+2,1.18E+3,2.07E+3]
+; FMA64-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; FMA64-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: constant_fold_v4f32:
+; FMACALL64: ## %bb.0:
+; FMACALL64-NEXT: movaps {{.*#+}} xmm0 = [0.0E+0,4.9E+2,1.18E+3,2.07E+3]
+; FMACALL64-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; FMACALL64-NEXT: ## fixup A - offset: 3, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; FMACALL64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: constant_fold_v4f32:
+; AVX512: ## %bb.0:
+; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,4.9E+2,1.18E+3,2.07E+3]
+; AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: constant_fold_v4f32:
+; AVX512VL: ## %bb.0:
+; AVX512VL-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,4.9E+2,1.18E+3,2.07E+3]
+; AVX512VL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+ %r = call <4 x float> @llvm.fma.v4f32(<4 x float> <float 0.000000e+01, float 1.000000e+01, float 2.000000e+01, float 3.000000e+01>, <4 x float> <float 4.000000e+01, float 5.000000e+01, float 6.000000e+01, float 7.000000e+01>, <4 x float> <float 0.000000e+01, float -1.000000e+01, float -2.000000e+01, float -3.000000e+01>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @constant_fold_v2f64() {
+; FMA32-LABEL: constant_fold_v2f64:
+; FMA32: ## %bb.0:
+; FMA32-NEXT: vmovaps {{.*#+}} xmm0 = [4.1E+2,1.4E+3]
+; FMA32-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; FMA32-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: constant_fold_v2f64:
+; FMACALL32: ## %bb.0:
+; FMACALL32-NEXT: vmovaps {{.*#+}} xmm0 = [4.1E+2,1.4E+3]
+; FMACALL32-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; FMACALL32-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; FMACALL32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: constant_fold_v2f64:
+; FMA64: ## %bb.0:
+; FMA64-NEXT: vmovaps {{.*#+}} xmm0 = [4.1E+2,1.4E+3]
+; FMA64-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; FMA64-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: constant_fold_v2f64:
+; FMACALL64: ## %bb.0:
+; FMACALL64-NEXT: movaps {{.*#+}} xmm0 = [4.1E+2,1.4E+3]
+; FMACALL64-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
+; FMACALL64-NEXT: ## fixup A - offset: 3, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; FMACALL64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: constant_fold_v2f64:
+; AVX512: ## %bb.0:
+; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [4.1E+2,1.4E+3]
+; AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: constant_fold_v2f64:
+; AVX512VL: ## %bb.0:
+; AVX512VL-NEXT: vmovaps {{.*#+}} xmm0 = [4.1E+2,1.4E+3]
+; AVX512VL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+ %r = call <2 x double> @llvm.fma.v2f64(<2 x double> <double 1.000000e+01, double 2.000000e+01>, <2 x double> <double 4.000000e+01, double 7.000000e+01>, <2 x double> <double 1.000000e+01, double 0.000000e+01>)
+ ret <2 x double> %r
+}
+
declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80)
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
index 6fd3db3..ee83a79 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
@@ -2369,8 +2369,8 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pand %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: pand %xmm3, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pmaddubsw %xmm4, %xmm5
; SSE41-NEXT: pand %xmm3, %xmm5
@@ -2391,7 +2391,7 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
@@ -2432,7 +2432,7 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOP-FALLBACK-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -2450,7 +2450,7 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOPAVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -2592,8 +2592,8 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pmaddubsw %xmm3, %xmm5
; SSE41-NEXT: pand %xmm2, %xmm5
@@ -2616,7 +2616,7 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm4
; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm4
; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2
@@ -2659,7 +2659,7 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOP-FALLBACK-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -2677,7 +2677,7 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOPAVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -2823,8 +2823,8 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
; SSE41-NEXT: psrlw $1, %xmm0
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pand %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: pand %xmm3, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: pmaddubsw %xmm4, %xmm5
; SSE41-NEXT: pand %xmm3, %xmm5
@@ -2846,7 +2846,7 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpmaddubsw %xmm4, %xmm0, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
@@ -2889,7 +2889,7 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm0, %xmm4
-; XOP-FALLBACK-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2],xmm4[2],xmm0[4],xmm4[4],xmm0[6],xmm4[6],xmm0[8],xmm4[8],xmm0[10],xmm4[10],xmm0[12],xmm4[12],xmm0[14],xmm4[14]
; XOP-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
@@ -2908,7 +2908,7 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm0, %xmm4
-; XOPAVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2],xmm4[2],xmm0[4],xmm4[4],xmm0[6],xmm4[6],xmm0[8],xmm4[8],xmm0[10],xmm4[10],xmm0[12],xmm4[12],xmm0[14],xmm4[14]
; XOPAVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
@@ -3054,8 +3054,8 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pand %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: pand %xmm3, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pmaddubsw %xmm4, %xmm5
; SSE41-NEXT: pand %xmm3, %xmm5
@@ -3077,7 +3077,7 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
@@ -3120,7 +3120,7 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind
; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOP-FALLBACK-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -3139,7 +3139,7 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOPAVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -3287,8 +3287,8 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; SSE41-NEXT: psrlw $1, %xmm0
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pand %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: pand %xmm3, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: pmaddubsw %xmm4, %xmm5
; SSE41-NEXT: pand %xmm3, %xmm5
@@ -3311,7 +3311,7 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
@@ -3356,7 +3356,7 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOP-FALLBACK-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
@@ -3376,7 +3376,7 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4
; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4
-; XOPAVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14]
; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index 5a1c4c8..b4e8f0a 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -1914,7 +1914,7 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm7, %xmm8
+; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8
; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5
@@ -1922,7 +1922,7 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm5
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4
@@ -1944,7 +1944,7 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -1974,14 +1974,14 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
-; XOP-NEXT: vpand %xmm5, %xmm7, %xmm5
+; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6
-; XOP-NEXT: vpand %xmm4, %xmm7, %xmm4
+; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -1999,7 +1999,7 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -2088,7 +2088,7 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm4, %xmm6, %xmm6
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm8
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm8
; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
; AVX1-NEXT: vpandn %xmm6, %xmm7, %xmm6
@@ -2096,7 +2096,7 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1
; AVX1-NEXT: vpor %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm5
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4
@@ -2120,7 +2120,7 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4
+; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm4
; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpand %ymm2, %ymm4, %ymm4
; AVX2-NEXT: vpandn %ymm3, %ymm2, %ymm2
@@ -2150,14 +2150,14 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
-; XOP-NEXT: vpand %xmm5, %xmm7, %xmm5
+; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6
-; XOP-NEXT: vpand %xmm4, %xmm7, %xmm4
+; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -2176,7 +2176,7 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm2, %ymm4, %ymm4
; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm2
@@ -2266,7 +2266,7 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm7, %xmm8
+; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8
; AVX1-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5
@@ -2274,7 +2274,7 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm8, %xmm0
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm5
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4
@@ -2297,7 +2297,7 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX2-NEXT: vpmaddubsw %ymm4, %ymm0, %ymm4
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -2328,14 +2328,14 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
; XOP-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8
-; XOP-NEXT: vpand %xmm5, %xmm7, %xmm5
+; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
; XOP-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm0, %xmm0
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
; XOP-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm6
-; XOP-NEXT: vpand %xmm4, %xmm7, %xmm4
+; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm1, %xmm1
; XOP-NEXT: vpaddb %xmm3, %xmm1, %xmm1
@@ -2354,7 +2354,7 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm0, %ymm4
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -2444,7 +2444,7 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm7, %xmm8
+; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8
; AVX1-NEXT: vpmaddubsw %xmm8, %xmm2, %xmm8
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5
@@ -2452,7 +2452,7 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm8, %xmm2
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm5
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4
@@ -2475,7 +2475,7 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -2506,14 +2506,14 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
-; XOP-NEXT: vpand %xmm5, %xmm7, %xmm5
+; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6
-; XOP-NEXT: vpand %xmm4, %xmm7, %xmm4
+; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -2532,7 +2532,7 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -2623,7 +2623,7 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm7, %xmm8
+; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8
; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5
@@ -2631,7 +2631,7 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm5
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
; AVX1-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm5
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4
@@ -2655,7 +2655,7 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2
@@ -2687,14 +2687,14 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
; XOP-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8
-; XOP-NEXT: vpand %xmm5, %xmm7, %xmm5
+; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
; XOP-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm0, %xmm0
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
; XOP-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm6
-; XOP-NEXT: vpand %xmm4, %xmm7, %xmm4
+; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm1, %xmm1
; XOP-NEXT: vpaddb %xmm3, %xmm1, %xmm1
@@ -2714,7 +2714,7 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
diff --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
index 8289e88..9b08d8b 100644
--- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll
+++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
@@ -892,13 +892,13 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
; CHECK-SKX-VBMI-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; CHECK-SKX-VBMI-NEXT: vpandn %ymm3, %ymm4, %ymm5
; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5
-; CHECK-SKX-VBMI-NEXT: vpand %ymm3, %ymm4, %ymm3
+; CHECK-SKX-VBMI-NEXT: vpand %ymm4, %ymm3, %ymm3
; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1
; CHECK-SKX-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,32,2,34,4,36,6,38,8,40,10,42,12,44,14,46,16,48,18,50,20,52,22,54,24,56,26,58,28,60,30,62]
; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm1
; CHECK-SKX-VBMI-NEXT: vpandn %ymm2, %ymm4, %ymm5
; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm5
-; CHECK-SKX-VBMI-NEXT: vpand %ymm2, %ymm4, %ymm2
+; CHECK-SKX-VBMI-NEXT: vpand %ymm4, %ymm2, %ymm2
; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0
; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm0
; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm0, (%rdx)
@@ -913,13 +913,13 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
; CHECK-AVX512-NEXT: vmovdqa (%rsi), %ymm2
; CHECK-AVX512-NEXT: vmovdqa 32(%rsi), %ymm3
; CHECK-AVX512-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; CHECK-AVX512-NEXT: vpand %ymm3, %ymm4, %ymm5
+; CHECK-AVX512-NEXT: vpand %ymm4, %ymm3, %ymm5
; CHECK-AVX512-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5
; CHECK-AVX512-NEXT: vpandn %ymm3, %ymm4, %ymm3
; CHECK-AVX512-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1
; CHECK-AVX512-NEXT: vpsllw $8, %ymm1, %ymm1
; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm5 & ymm4)
-; CHECK-AVX512-NEXT: vpand %ymm2, %ymm4, %ymm3
+; CHECK-AVX512-NEXT: vpand %ymm4, %ymm2, %ymm3
; CHECK-AVX512-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3
; CHECK-AVX512-NEXT: vpandn %ymm2, %ymm4, %ymm2
; CHECK-AVX512-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0
@@ -939,13 +939,13 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
; CHECK-VBMI-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; CHECK-VBMI-NEXT: vpandn %ymm3, %ymm4, %ymm5
; CHECK-VBMI-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5
-; CHECK-VBMI-NEXT: vpand %ymm3, %ymm4, %ymm3
+; CHECK-VBMI-NEXT: vpand %ymm4, %ymm3, %ymm3
; CHECK-VBMI-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1
; CHECK-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,32,2,34,4,36,6,38,8,40,10,42,12,44,14,46,16,48,18,50,20,52,22,54,24,56,26,58,28,60,30,62]
; CHECK-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm1
; CHECK-VBMI-NEXT: vpandn %ymm2, %ymm4, %ymm5
; CHECK-VBMI-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm5
-; CHECK-VBMI-NEXT: vpand %ymm2, %ymm4, %ymm2
+; CHECK-VBMI-NEXT: vpand %ymm4, %ymm2, %ymm2
; CHECK-VBMI-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0
; CHECK-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm0
; CHECK-VBMI-NEXT: vmovdqa %ymm0, (%rdx)
@@ -967,7 +967,7 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
; CHECK-SKX-VBMI-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; CHECK-SKX-VBMI-NEXT: vpandnq %zmm1, %zmm2, %zmm3
; CHECK-SKX-VBMI-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3
-; CHECK-SKX-VBMI-NEXT: vpandq %zmm1, %zmm2, %zmm1
+; CHECK-SKX-VBMI-NEXT: vpandq %zmm2, %zmm1, %zmm1
; CHECK-SKX-VBMI-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0
; CHECK-SKX-VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,64,2,66,4,68,6,70,8,72,10,74,12,76,14,78,16,80,18,82,20,84,22,86,24,88,26,90,28,92,30,94,32,96,34,98,36,100,38,102,40,104,42,106,44,108,46,110,48,112,50,114,52,116,54,118,56,120,58,122,60,124,62,126]
; CHECK-SKX-VBMI-NEXT: vpermi2b %zmm3, %zmm0, %zmm1
@@ -980,7 +980,7 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
; CHECK-AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; CHECK-AVX512-NEXT: vmovdqa64 (%rsi), %zmm1
; CHECK-AVX512-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; CHECK-AVX512-NEXT: vpandq %zmm1, %zmm2, %zmm3
+; CHECK-AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm3
; CHECK-AVX512-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3
; CHECK-AVX512-NEXT: vpandnq %zmm1, %zmm2, %zmm1
; CHECK-AVX512-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0
@@ -997,7 +997,7 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
; CHECK-VBMI-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; CHECK-VBMI-NEXT: vpandnq %zmm1, %zmm2, %zmm3
; CHECK-VBMI-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3
-; CHECK-VBMI-NEXT: vpandq %zmm1, %zmm2, %zmm1
+; CHECK-VBMI-NEXT: vpandq %zmm2, %zmm1, %zmm1
; CHECK-VBMI-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0
; CHECK-VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,64,2,66,4,68,6,70,8,72,10,74,12,76,14,78,16,80,18,82,20,84,22,86,24,88,26,90,28,92,30,94,32,96,34,98,36,100,38,102,40,104,42,106,44,108,46,110,48,112,50,114,52,116,54,118,56,120,58,122,60,124,62,126]
; CHECK-VBMI-NEXT: vpermi2b %zmm3, %zmm0, %zmm1
diff --git a/llvm/test/CodeGen/X86/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/X86/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..3775600
--- /dev/null
+++ b/llvm/test/CodeGen/X86/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple i386 | FileCheck %s -check-prefixes=CHECK-32
+; RUN: llc < %s -mtriple x86_64 | FileCheck %s -check-prefixes=CHECK-64
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-32-LABEL: naked:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: calll main
+;
+; CHECK-64-LABEL: naked:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: callq main
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-32-LABEL: normal:
+; CHECK-32: # %bb.0:
+; CHECK-32-NEXT: pushl %ebp
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: .cfi_offset %ebp, -8
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: .cfi_def_cfa_register %ebp
+; CHECK-32-NEXT: calll main
+;
+; CHECK-64-LABEL: normal:
+; CHECK-64: # %bb.0:
+; CHECK-64-NEXT: pushq %rbp
+; CHECK-64-NEXT: .cfi_def_cfa_offset 16
+; CHECK-64-NEXT: .cfi_offset %rbp, -16
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-64-NEXT: callq main
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll
index 6c3d048..fe8a4fa 100644
--- a/llvm/test/CodeGen/X86/pmul.ll
+++ b/llvm/test/CodeGen/X86/pmul.ll
@@ -161,8 +161,8 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE41-LABEL: mul_v16i8:
; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pand %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pmaddubsw %xmm3, %xmm4
; SSE41-NEXT: pand %xmm2, %xmm4
@@ -586,17 +586,16 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pand %xmm2, %xmm5
+; SSE41-NEXT: pandn %xmm2, %xmm5
+; SSE41-NEXT: pand %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm6
-; SSE41-NEXT: pmaddubsw %xmm5, %xmm6
+; SSE41-NEXT: pmaddubsw %xmm2, %xmm6
; SSE41-NEXT: pand %xmm4, %xmm6
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pandn %xmm2, %xmm5
; SSE41-NEXT: pmaddubsw %xmm5, %xmm0
; SSE41-NEXT: psllw $8, %xmm0
; SSE41-NEXT: por %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm2
-; SSE41-NEXT: pand %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pmaddubsw %xmm2, %xmm5
; SSE41-NEXT: pand %xmm4, %xmm5
@@ -609,7 +608,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; AVX2-LABEL: mul_v32i8:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3
; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm3
; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1
@@ -621,7 +620,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; AVX512F-LABEL: mul_v32i8:
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm1, %ymm2, %ymm3
+; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
; AVX512F-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3
; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm3
; AVX512F-NEXT: vpandn %ymm1, %ymm2, %ymm1
@@ -902,37 +901,34 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: movdqa %xmm8, %xmm9
-; SSE41-NEXT: pand %xmm4, %xmm9
+; SSE41-NEXT: pandn %xmm4, %xmm9
+; SSE41-NEXT: pand %xmm8, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm10
-; SSE41-NEXT: pmaddubsw %xmm9, %xmm10
+; SSE41-NEXT: pmaddubsw %xmm4, %xmm10
; SSE41-NEXT: pand %xmm8, %xmm10
-; SSE41-NEXT: movdqa %xmm8, %xmm9
-; SSE41-NEXT: pandn %xmm4, %xmm9
; SSE41-NEXT: pmaddubsw %xmm9, %xmm0
; SSE41-NEXT: psllw $8, %xmm0
; SSE41-NEXT: por %xmm10, %xmm0
; SSE41-NEXT: movdqa %xmm8, %xmm4
-; SSE41-NEXT: pand %xmm5, %xmm4
+; SSE41-NEXT: pandn %xmm5, %xmm4
+; SSE41-NEXT: pand %xmm8, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm9
-; SSE41-NEXT: pmaddubsw %xmm4, %xmm9
+; SSE41-NEXT: pmaddubsw %xmm5, %xmm9
; SSE41-NEXT: pand %xmm8, %xmm9
-; SSE41-NEXT: movdqa %xmm8, %xmm4
-; SSE41-NEXT: pandn %xmm5, %xmm4
; SSE41-NEXT: pmaddubsw %xmm4, %xmm1
; SSE41-NEXT: psllw $8, %xmm1
; SSE41-NEXT: por %xmm9, %xmm1
; SSE41-NEXT: movdqa %xmm8, %xmm4
-; SSE41-NEXT: pand %xmm6, %xmm4
+; SSE41-NEXT: pandn %xmm6, %xmm4
+; SSE41-NEXT: pand %xmm8, %xmm6
; SSE41-NEXT: movdqa %xmm2, %xmm5
-; SSE41-NEXT: pmaddubsw %xmm4, %xmm5
+; SSE41-NEXT: pmaddubsw %xmm6, %xmm5
; SSE41-NEXT: pand %xmm8, %xmm5
-; SSE41-NEXT: movdqa %xmm8, %xmm4
-; SSE41-NEXT: pandn %xmm6, %xmm4
; SSE41-NEXT: pmaddubsw %xmm4, %xmm2
; SSE41-NEXT: psllw $8, %xmm2
; SSE41-NEXT: por %xmm5, %xmm2
-; SSE41-NEXT: movdqa %xmm8, %xmm4
-; SSE41-NEXT: pand %xmm7, %xmm4
+; SSE41-NEXT: movdqa %xmm7, %xmm4
+; SSE41-NEXT: pand %xmm8, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm5
; SSE41-NEXT: pmaddubsw %xmm4, %xmm5
; SSE41-NEXT: pand %xmm8, %xmm5
@@ -945,14 +941,14 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; AVX2-LABEL: mul_v64i8:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm2, %ymm4, %ymm5
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm5
; AVX2-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm5
; AVX2-NEXT: vpand %ymm4, %ymm5, %ymm5
; AVX2-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX2-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsllw $8, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm5, %ymm0
-; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm2
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm2
; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vpandn %ymm3, %ymm4, %ymm3
@@ -963,28 +959,28 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
;
; AVX512F-LABEL: mul_v64i8:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm5
; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm5, %ymm4
-; AVX512F-NEXT: vpand %ymm1, %ymm3, %ymm6
+; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm6
; AVX512F-NEXT: vpmaddubsw %ymm6, %ymm0, %ymm6
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512F-NEXT: vpandn %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpandn %ymm1, %ymm2, %ymm1
; AVX512F-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpsllw $8, %ymm0, %ymm0
-; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm1
+; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm1
; AVX512F-NEXT: vpmaddubsw %ymm1, %ymm5, %ymm1
; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm4 & zmm3)
+; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm4 & zmm2)
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v64i8:
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm1, %zmm2, %zmm3
+; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm3
; AVX512BW-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3
; AVX512BW-NEXT: vpandnq %zmm1, %zmm2, %zmm1
; AVX512BW-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll
index c9bb3de..885b075 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll
@@ -59,7 +59,7 @@ define <32 x i8> @test_mul_32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX256BW-LABEL: test_mul_32i8:
; AVX256BW: # %bb.0:
; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX256BW-NEXT: vpand %ymm1, %ymm2, %ymm3
+; AVX256BW-NEXT: vpand %ymm2, %ymm1, %ymm3
; AVX256BW-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3
; AVX256BW-NEXT: vpandn %ymm1, %ymm2, %ymm1
; AVX256BW-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index be8adf6..9656822 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -1671,12 +1671,11 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm8
+; SSE41-NEXT: pxor %xmm9, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002324991,9223372039002324991]
-; SSE41-NEXT: movdqa %xmm7, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [2147549183,2147549183,2147549183,2147549183]
; SSE41-NEXT: movdqa %xmm6, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
@@ -1684,22 +1683,20 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; SSE41-NEXT: movapd {{.*#+}} xmm8 = [65535,65535]
; SSE41-NEXT: movapd %xmm8, %xmm10
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm10
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm7, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pxor %xmm9, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm0
; SSE41-NEXT: pcmpgtd %xmm11, %xmm0
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: movapd %xmm8, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm4
; SSE41-NEXT: packusdw %xmm10, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm7, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm9, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm3, %xmm0
@@ -2771,12 +2768,11 @@ define <8 x i32> @test33(<8 x i32> %a0, <8 x i64> %a1) {
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm7
; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm9
+; SSE41-NEXT: pxor %xmm10, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259455,9223372039002259455]
-; SSE41-NEXT: movdqa %xmm8, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647,2147483647,2147483647]
; SSE41-NEXT: movdqa %xmm6, %xmm0
; SSE41-NEXT: pcmpgtd %xmm11, %xmm0
@@ -2784,11 +2780,10 @@ define <8 x i32> @test33(<8 x i32> %a0, <8 x i64> %a1) {
; SSE41-NEXT: movapd {{.*#+}} xmm9 = [4294967295,4294967295]
; SSE41-NEXT: movapd %xmm9, %xmm11
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm11
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm10, %xmm0
-; SSE41-NEXT: movdqa %xmm8, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm10, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm0
; SSE41-NEXT: pcmpgtd %xmm12, %xmm0
; SSE41-NEXT: pand %xmm3, %xmm0
@@ -2797,11 +2792,10 @@ define <8 x i32> @test33(<8 x i32> %a0, <8 x i64> %a1) {
; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm11[0,2]
; SSE41-NEXT: pmaxud %xmm3, %xmm7
; SSE41-NEXT: psubd %xmm3, %xmm7
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm10, %xmm0
-; SSE41-NEXT: movdqa %xmm8, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm5, %xmm2
+; SSE41-NEXT: pxor %xmm10, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm6, %xmm0
; SSE41-NEXT: pcmpgtd %xmm3, %xmm0
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -2997,12 +2991,11 @@ define <8 x i32> @test34(<8 x i32> %a0, <8 x i64> %a1) {
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm9
+; SSE41-NEXT: pxor %xmm10, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259455,9223372039002259455]
-; SSE41-NEXT: movdqa %xmm8, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [2147483647,2147483647,2147483647,2147483647]
; SSE41-NEXT: movdqa %xmm7, %xmm0
; SSE41-NEXT: pcmpgtd %xmm11, %xmm0
@@ -3010,11 +3003,10 @@ define <8 x i32> @test34(<8 x i32> %a0, <8 x i64> %a1) {
; SSE41-NEXT: movapd {{.*#+}} xmm9 = [4294967295,4294967295]
; SSE41-NEXT: movapd %xmm9, %xmm11
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm11
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm10, %xmm0
-; SSE41-NEXT: movdqa %xmm8, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm10, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm3
; SSE41-NEXT: movdqa %xmm7, %xmm0
; SSE41-NEXT: pcmpgtd %xmm12, %xmm0
; SSE41-NEXT: pand %xmm3, %xmm0
@@ -3023,11 +3015,10 @@ define <8 x i32> @test34(<8 x i32> %a0, <8 x i64> %a1) {
; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm11[0,2]
; SSE41-NEXT: pmaxud %xmm3, %xmm6
; SSE41-NEXT: psubd %xmm3, %xmm6
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm10, %xmm0
-; SSE41-NEXT: movdqa %xmm8, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm5, %xmm2
+; SSE41-NEXT: pxor %xmm10, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm7, %xmm0
; SSE41-NEXT: pcmpgtd %xmm3, %xmm0
; SSE41-NEXT: pand %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/sat-add.ll b/llvm/test/CodeGen/X86/sat-add.ll
index 949902a..b12be7c 100644
--- a/llvm/test/CodeGen/X86/sat-add.ll
+++ b/llvm/test/CodeGen/X86/sat-add.ll
@@ -631,8 +631,8 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_min(<2 x i64> %x) {
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292117,9223372034707292117]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm4
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
index efe34c5..d3e4906 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
@@ -84,8 +84,8 @@ define <16 x i8> @PR50049(ptr %p1, ptr %p2) {
; SSE-NEXT: pshufb %xmm8, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
; SSE-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: pmaddubsw %xmm3, %xmm4
; SSE-NEXT: pand %xmm2, %xmm4
@@ -120,7 +120,7 @@ define <16 x i8> @PR50049(ptr %p1, ptr %p2) {
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
; AVX1-NEXT: vpandn %xmm1, %xmm2, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
index 5568604..0af5e9a 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
@@ -57,8 +57,8 @@ define <2 x i32> @trunc_packus_v2i64_v2i32(<2 x i64> %a0) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovsxdq {{.*#+}} xmm4 = [2147483647,2147483647]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -175,8 +175,8 @@ define void @trunc_packus_v2i64_v2i32_store(<2 x i64> %a0, ptr %p1) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovsxdq {{.*#+}} xmm4 = [2147483647,2147483647]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -317,12 +317,12 @@ define <4 x i32> @trunc_packus_v4i64_v4i32(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [4294967295,4294967295]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovsxdq {{.*#+}} xmm6 = [2147483647,2147483647]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -330,8 +330,8 @@ define <4 x i32> @trunc_packus_v4i64_v4i32(<4 x i64> %a0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -584,35 +584,32 @@ define <8 x i32> @trunc_packus_v8i64_v8i32(ptr %p0) "min-legal-vector-width"="25
; SSE41-NEXT: movdqa 48(%rdi), %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [4294967295,4294967295]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: pmovsxdq {{.*#+}} xmm6 = [2147483647,2147483647]
-; SSE41-NEXT: movdqa %xmm6, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm8, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm5
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm3, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -620,8 +617,8 @@ define <8 x i32> @trunc_packus_v8i64_v8i32(ptr %p0) "min-legal-vector-width"="25
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -828,8 +825,8 @@ define <2 x i16> @trunc_packus_v2i64_v2i16(<2 x i64> %a0) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147549183,2147549183]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -971,8 +968,8 @@ define void @trunc_packus_v2i64_v2i16_store(<2 x i64> %a0, ptr%p1) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147549183,2147549183]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -1143,12 +1140,12 @@ define <4 x i16> @trunc_packus_v4i64_v4i16(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [65535,65535]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147549183,2147549183]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -1156,8 +1153,8 @@ define <4 x i16> @trunc_packus_v4i64_v4i16(<4 x i64> %a0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -1333,12 +1330,12 @@ define void @trunc_packus_v4i64_v4i16_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [65535,65535]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147549183,2147549183]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -1346,8 +1343,8 @@ define void @trunc_packus_v4i64_v4i16_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -1583,35 +1580,32 @@ define <8 x i16> @trunc_packus_v8i64_v8i16(ptr %p0) "min-legal-vector-width"="25
; SSE41-NEXT: movdqa 48(%rdi), %xmm8
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [65535,65535]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147549183,2147549183]
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm8, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm2, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -1619,8 +1613,8 @@ define <8 x i16> @trunc_packus_v8i64_v8i16(ptr %p0) "min-legal-vector-width"="25
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -2239,8 +2233,8 @@ define <2 x i8> @trunc_packus_v2i64_v2i8(<2 x i64> %a0) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -2393,8 +2387,8 @@ define void @trunc_packus_v2i64_v2i8_store(<2 x i64> %a0, ptr%p1) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -2539,12 +2533,12 @@ define <4 x i8> @trunc_packus_v4i64_v4i8(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [255,255]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -2552,8 +2546,8 @@ define <4 x i8> @trunc_packus_v4i64_v4i8(<4 x i64> %a0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -2733,12 +2727,12 @@ define void @trunc_packus_v4i64_v4i8_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [255,255]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -2746,8 +2740,8 @@ define void @trunc_packus_v4i64_v4i8_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -2987,35 +2981,32 @@ define <8 x i8> @trunc_packus_v8i64_v8i8(ptr %p0) "min-legal-vector-width"="256"
; SSE41-NEXT: movdqa 48(%rdi), %xmm8
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [255,255]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm8, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm2, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -3023,8 +3014,8 @@ define <8 x i8> @trunc_packus_v8i64_v8i8(ptr %p0) "min-legal-vector-width"="256"
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -3277,35 +3268,32 @@ define void @trunc_packus_v8i64_v8i8_store(ptr %p0, ptr%p1) "min-legal-vector-wi
; SSE41-NEXT: movdqa 48(%rdi), %xmm8
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [255,255]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm2, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm2
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm8, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm3
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -3313,8 +3301,8 @@ define void @trunc_packus_v8i64_v8i8_store(ptr %p0, ptr%p1) "min-legal-vector-wi
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -3677,79 +3665,72 @@ define <16 x i8> @trunc_packus_v16i64_v16i8(ptr %p0) "min-legal-vector-width"="2
; SSE41-NEXT: movdqa 96(%rdi), %xmm4
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [255,255]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm9 = [2147483903,2147483903]
-; SSE41-NEXT: movdqa %xmm9, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm6, %xmm5
+; SSE41-NEXT: pxor %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm5
-; SSE41-NEXT: movdqa %xmm10, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT: movdqa %xmm10, %xmm6
+; SSE41-NEXT: pxor %xmm2, %xmm6
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm6, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm6, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm6
; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm6
-; SSE41-NEXT: movdqa %xmm12, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
+; SSE41-NEXT: movdqa %xmm12, %xmm10
+; SSE41-NEXT: pxor %xmm2, %xmm10
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm10
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm10, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm10
; SSE41-NEXT: blendvpd %xmm0, %xmm12, %xmm10
-; SSE41-NEXT: movdqa %xmm11, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm12
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm12
+; SSE41-NEXT: movdqa %xmm11, %xmm12
+; SSE41-NEXT: pxor %xmm2, %xmm12
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm12, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm12
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm12, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm12
; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm12
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
+; SSE41-NEXT: movdqa %xmm8, %xmm11
+; SSE41-NEXT: pxor %xmm2, %xmm11
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm11, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm11
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm11, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
@@ -3757,8 +3738,8 @@ define <16 x i8> @trunc_packus_v16i64_v16i8(ptr %p0) "min-legal-vector-width"="2
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm11
; SSE41-NEXT: movdqa %xmm7, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm8
; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
index d276a68..3c03c52 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
@@ -59,8 +59,8 @@ define <2 x i32> @trunc_ssat_v2i64_v2i32(<2 x i64> %a0) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm4 = [4294967295,0,4294967295,0]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -182,8 +182,8 @@ define void @trunc_ssat_v2i64_v2i32_store(<2 x i64> %a0, ptr %p1) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm4 = [4294967295,0,4294967295,0]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -334,12 +334,12 @@ define <4 x i32> @trunc_ssat_v4i64_v4i32(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [2147483647,2147483647]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm6 = [4294967295,0,4294967295,0]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -347,8 +347,8 @@ define <4 x i32> @trunc_ssat_v4i64_v4i32(<4 x i64> %a0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -604,35 +604,32 @@ define <8 x i32> @trunc_ssat_v8i64_v8i32(ptr %p0) "min-legal-vector-width"="256"
; SSE41-NEXT: movdqa 48(%rdi), %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [2147483647,2147483647]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm6 = [4294967295,0,4294967295,0]
-; SSE41-NEXT: movdqa %xmm6, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm8, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm5
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm3, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -640,8 +637,8 @@ define <8 x i32> @trunc_ssat_v8i64_v8i32(ptr %p0) "min-legal-vector-width"="256"
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -849,8 +846,8 @@ define <2 x i16> @trunc_ssat_v2i64_v2i16(<2 x i64> %a0) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147516415,2147516415]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -983,8 +980,8 @@ define void @trunc_ssat_v2i64_v2i16_store(<2 x i64> %a0, ptr%p1) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147516415,2147516415]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -1149,12 +1146,12 @@ define <4 x i16> @trunc_ssat_v4i64_v4i16(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [32767,32767]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147516415,2147516415]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -1162,8 +1159,8 @@ define <4 x i16> @trunc_ssat_v4i64_v4i16(<4 x i64> %a0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -1333,12 +1330,12 @@ define void @trunc_ssat_v4i64_v4i16_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [32767,32767]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147516415,2147516415]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -1346,8 +1343,8 @@ define void @trunc_ssat_v4i64_v4i16_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -1579,35 +1576,32 @@ define <8 x i16> @trunc_ssat_v8i64_v8i16(ptr %p0) "min-legal-vector-width"="256"
; SSE41-NEXT: movdqa 48(%rdi), %xmm8
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [32767,32767]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147516415,2147516415]
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm8, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm2, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -1615,8 +1609,8 @@ define <8 x i16> @trunc_ssat_v8i64_v8i16(ptr %p0) "min-legal-vector-width"="256"
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -2002,8 +1996,8 @@ define <2 x i8> @trunc_ssat_v2i64_v2i8(<2 x i64> %a0) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -2148,8 +2142,8 @@ define void @trunc_ssat_v2i64_v2i8_store(<2 x i64> %a0, ptr%p1) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
@@ -2288,12 +2282,12 @@ define <4 x i8> @trunc_ssat_v4i64_v4i8(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [127,127]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -2301,8 +2295,8 @@ define <4 x i8> @trunc_ssat_v4i64_v4i8(<4 x i64> %a0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -2476,12 +2470,12 @@ define void @trunc_ssat_v4i64_v4i8_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [127,127]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm5
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm6, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
@@ -2489,8 +2483,8 @@ define void @trunc_ssat_v4i64_v4i8_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
@@ -2726,35 +2720,32 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(ptr %p0) "min-legal-vector-width"="256" {
; SSE41-NEXT: movdqa 48(%rdi), %xmm8
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [127,127]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm8, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm2, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -2762,8 +2753,8 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(ptr %p0) "min-legal-vector-width"="256" {
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -3022,35 +3013,32 @@ define void @trunc_ssat_v8i64_v8i8_store(ptr %p0, ptr%p1) "min-legal-vector-widt
; SSE41-NEXT: movdqa 48(%rdi), %xmm8
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [127,127]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm2, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm2
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm8, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm3
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
; SSE41-NEXT: por %xmm9, %xmm0
@@ -3058,8 +3046,8 @@ define void @trunc_ssat_v8i64_v8i8_store(ptr %p0, ptr%p1) "min-legal-vector-widt
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm8
; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
+; SSE41-NEXT: movdqa %xmm0, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm7
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm7, %xmm0
@@ -3430,79 +3418,72 @@ define <16 x i8> @trunc_ssat_v16i64_v16i8(ptr %p0) "min-legal-vector-width"="256
; SSE41-NEXT: movdqa 96(%rdi), %xmm4
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [127,127]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm9 = [2147483775,2147483775]
-; SSE41-NEXT: movdqa %xmm9, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm3
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm4, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm4
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm6, %xmm5
+; SSE41-NEXT: pxor %xmm2, %xmm5
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm5, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm5
; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm5
-; SSE41-NEXT: movdqa %xmm10, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT: movdqa %xmm10, %xmm6
+; SSE41-NEXT: pxor %xmm2, %xmm6
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm6, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm6, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm6
; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm6
-; SSE41-NEXT: movdqa %xmm12, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
+; SSE41-NEXT: movdqa %xmm12, %xmm10
+; SSE41-NEXT: pxor %xmm2, %xmm10
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm10
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm10, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm10
; SSE41-NEXT: blendvpd %xmm0, %xmm12, %xmm10
-; SSE41-NEXT: movdqa %xmm11, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm12
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm12
+; SSE41-NEXT: movdqa %xmm11, %xmm12
+; SSE41-NEXT: pxor %xmm2, %xmm12
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm12, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm12
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm12, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm1, %xmm12
; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm12
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
+; SSE41-NEXT: movdqa %xmm8, %xmm11
+; SSE41-NEXT: pxor %xmm2, %xmm11
; SSE41-NEXT: movdqa %xmm9, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm13
+; SSE41-NEXT: pcmpgtd %xmm11, %xmm13
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm11
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,0,2,2]
; SSE41-NEXT: pand %xmm11, %xmm0
; SSE41-NEXT: por %xmm13, %xmm0
@@ -3510,8 +3491,8 @@ define <16 x i8> @trunc_ssat_v16i64_v16i8(ptr %p0) "min-legal-vector-width"="256
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm11
; SSE41-NEXT: movdqa %xmm7, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm9, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm8
; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
; SSE41-NEXT: pand %xmm8, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-trunc-usat.ll b/llvm/test/CodeGen/X86/vector-trunc-usat.ll
index 4126616..c1d22dc 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-usat.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-usat.ll
@@ -207,20 +207,20 @@ define <4 x i32> @trunc_usat_v4i64_v4i32(<4 x i64> %a0) {
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: pxor %xmm4, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259455,9223372039002259455]
-; SSE41-NEXT: movdqa %xmm5, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pxor %xmm4, %xmm5
+; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259455,9223372039002259455]
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm5
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483647,2147483647,2147483647,2147483647]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pcmpgtd %xmm7, %xmm3
-; SSE41-NEXT: pand %xmm6, %xmm3
+; SSE41-NEXT: pand %xmm5, %xmm3
; SSE41-NEXT: pxor %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,2,2]
; SSE41-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE41-NEXT: pand %xmm5, %xmm0
+; SSE41-NEXT: pand %xmm6, %xmm0
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [4294967295,4294967295]
; SSE41-NEXT: movapd {{.*#+}} xmm5 = [4294967295,429496729]
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
@@ -407,34 +407,31 @@ define <8 x i32> @trunc_usat_v8i64_v8i32(ptr %p0) {
; SSE41-NEXT: movdqa 48(%rdi), %xmm1
; SSE41-NEXT: movapd {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm9
+; SSE41-NEXT: pxor %xmm6, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259455,9223372039002259455]
-; SSE41-NEXT: movdqa %xmm5, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm9
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm9
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm1
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm8, %xmm1
+; SSE41-NEXT: pxor %xmm6, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm1, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm9[0,2]
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm6, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm8
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
; SSE41-NEXT: pand %xmm8, %xmm0
@@ -790,26 +787,25 @@ define <4 x i16> @trunc_usat_v4i64_v4i16(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: movapd {{.*#+}} xmm2 = [65535,65535]
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002324991,9223372039002324991]
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm1, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm6
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002324991,9223372039002324991]
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183]
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: movapd %xmm2, %xmm7
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm7
+; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm6
; SSE41-NEXT: pxor %xmm3, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm7
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT: pand %xmm6, %xmm4
+; SSE41-NEXT: pand %xmm7, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: packusdw %xmm7, %xmm2
+; SSE41-NEXT: packusdw %xmm6, %xmm2
; SSE41-NEXT: packusdw %xmm2, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
@@ -924,26 +920,25 @@ define void @trunc_usat_v4i64_v4i16_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [65535,65535]
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002324991,9223372039002324991]
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm1, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm6
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002324991,9223372039002324991]
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147549183,2147549183,2147549183,2147549183]
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: movapd %xmm4, %xmm7
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm7
+; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm4, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm6
; SSE41-NEXT: pxor %xmm2, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm7
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pand %xmm6, %xmm3
+; SSE41-NEXT: pand %xmm7, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
-; SSE41-NEXT: packusdw %xmm7, %xmm4
+; SSE41-NEXT: packusdw %xmm6, %xmm4
; SSE41-NEXT: packusdw %xmm4, %xmm4
; SSE41-NEXT: movq %xmm4, (%rdi)
; SSE41-NEXT: retq
@@ -1094,34 +1089,31 @@ define <8 x i16> @trunc_usat_v8i64_v8i16(ptr %p0) {
; SSE41-NEXT: movdqa 48(%rdi), %xmm7
; SSE41-NEXT: movapd {{.*#+}} xmm3 = [65535,65535]
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
+; SSE41-NEXT: movdqa %xmm2, %xmm9
+; SSE41-NEXT: pxor %xmm6, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002324991,9223372039002324991]
-; SSE41-NEXT: movdqa %xmm5, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2147549183,2147549183,2147549183,2147549183]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm9
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm9
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm8, %xmm2
+; SSE41-NEXT: pxor %xmm6, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm2
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: packusdw %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm6, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm8
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
; SSE41-NEXT: pand %xmm8, %xmm0
@@ -1869,26 +1861,25 @@ define <4 x i8> @trunc_usat_v4i64_v4i8(<4 x i64> %a0) {
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: movapd {{.*#+}} xmm2 = [255,255]
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259711,9223372039002259711]
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm1, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm6
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259711,9223372039002259711]
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [2147483903,2147483903,2147483903,2147483903]
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: movapd %xmm2, %xmm7
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm7
+; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm6
; SSE41-NEXT: pxor %xmm3, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm7
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT: pand %xmm6, %xmm4
+; SSE41-NEXT: pand %xmm7, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: packusdw %xmm7, %xmm2
+; SSE41-NEXT: packusdw %xmm6, %xmm2
; SSE41-NEXT: packusdw %xmm2, %xmm2
; SSE41-NEXT: packuswb %xmm2, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
@@ -2005,26 +1996,25 @@ define void @trunc_usat_v4i64_v4i8_store(<4 x i64> %a0, ptr%p1) {
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [255,255]
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259711,9223372039002259711]
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm1, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm6
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259711,9223372039002259711]
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm6
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483903,2147483903,2147483903,2147483903]
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: movapd %xmm4, %xmm7
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm7
+; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm4, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm6
; SSE41-NEXT: pxor %xmm2, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm7
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pand %xmm6, %xmm3
+; SSE41-NEXT: pand %xmm7, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
-; SSE41-NEXT: packusdw %xmm7, %xmm4
+; SSE41-NEXT: packusdw %xmm6, %xmm4
; SSE41-NEXT: packusdw %xmm4, %xmm4
; SSE41-NEXT: packuswb %xmm4, %xmm4
; SSE41-NEXT: movd %xmm4, (%rdi)
@@ -2175,34 +2165,31 @@ define <8 x i8> @trunc_usat_v8i64_v8i8(ptr %p0) {
; SSE41-NEXT: movdqa 48(%rdi), %xmm7
; SSE41-NEXT: movapd {{.*#+}} xmm3 = [255,255]
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
+; SSE41-NEXT: movdqa %xmm2, %xmm9
+; SSE41-NEXT: pxor %xmm6, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259711,9223372039002259711]
-; SSE41-NEXT: movdqa %xmm5, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2147483903,2147483903,2147483903,2147483903]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm9
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm9
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm8, %xmm2
+; SSE41-NEXT: pxor %xmm6, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm2
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: packusdw %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm7, %xmm8
+; SSE41-NEXT: pxor %xmm6, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm8
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
; SSE41-NEXT: pand %xmm8, %xmm0
@@ -2360,34 +2347,31 @@ define void @trunc_usat_v8i64_v8i8_store(ptr %p0, ptr%p1) {
; SSE41-NEXT: movdqa 48(%rdi), %xmm6
; SSE41-NEXT: movapd {{.*#+}} xmm2 = [255,255]
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm7, %xmm9
+; SSE41-NEXT: pxor %xmm5, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259711,9223372039002259711]
-; SSE41-NEXT: movdqa %xmm4, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm9
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2147483903,2147483903,2147483903,2147483903]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm2, %xmm9
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm9
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm8, %xmm7
+; SSE41-NEXT: pxor %xmm5, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm7
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm7, %xmm0
; SSE41-NEXT: movapd %xmm2, %xmm7
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm7
; SSE41-NEXT: packusdw %xmm9, %xmm7
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm6, %xmm8
+; SSE41-NEXT: pxor %xmm5, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm8
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm9, %xmm0
; SSE41-NEXT: pand %xmm8, %xmm0
@@ -2602,44 +2586,40 @@ define <16 x i8> @trunc_usat_v16i64_v16i8(ptr %p0) {
; SSE41-NEXT: movdqa 48(%rdi), %xmm11
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [255,255]
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm2, %xmm13
+; SSE41-NEXT: pxor %xmm7, %xmm13
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259711,9223372039002259711]
-; SSE41-NEXT: movdqa %xmm6, %xmm13
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm13
-; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[0,0,2,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm13
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2147483903,2147483903,2147483903,2147483903]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm14, %xmm0
; SSE41-NEXT: pand %xmm13, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm13
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm13
-; SSE41-NEXT: movdqa %xmm12, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pxor %xmm7, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm14, %xmm0
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm2
; SSE41-NEXT: blendvpd %xmm0, %xmm12, %xmm2
; SSE41-NEXT: packusdw %xmm13, %xmm2
-; SSE41-NEXT: movdqa %xmm11, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm12
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm12
-; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm11, %xmm12
+; SSE41-NEXT: pxor %xmm7, %xmm12
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm12
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm13, %xmm0
; SSE41-NEXT: pand %xmm12, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm12
; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm12
-; SSE41-NEXT: movdqa %xmm10, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
-; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm10, %xmm11
+; SSE41-NEXT: pxor %xmm7, %xmm11
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm11[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm11
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm13, %xmm0
; SSE41-NEXT: pand %xmm11, %xmm0
@@ -2647,32 +2627,29 @@ define <16 x i8> @trunc_usat_v16i64_v16i8(ptr %p0) {
; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm11
; SSE41-NEXT: packusdw %xmm12, %xmm11
; SSE41-NEXT: packusdw %xmm11, %xmm2
-; SSE41-NEXT: movdqa %xmm9, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
-; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm9, %xmm10
+; SSE41-NEXT: pxor %xmm7, %xmm10
+; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm10
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm11, %xmm0
; SSE41-NEXT: pand %xmm10, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm10
; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm10
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm9
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm8, %xmm9
+; SSE41-NEXT: pxor %xmm7, %xmm9
+; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm9[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm9
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm11, %xmm0
; SSE41-NEXT: pand %xmm9, %xmm0
; SSE41-NEXT: movapd %xmm4, %xmm9
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm9
; SSE41-NEXT: packusdw %xmm10, %xmm9
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm5, %xmm8
+; SSE41-NEXT: pxor %xmm7, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm8
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
; SSE41-NEXT: pand %xmm8, %xmm0
diff --git a/llvm/test/CodeGen/XCore/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/XCore/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..429a781
--- /dev/null
+++ b/llvm/test/CodeGen/XCore/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march xcore | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: bl main
+; CHECK-NEXT: .cc_bottom naked.function
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: entsp 2
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .cfi_offset 15, 0
+; CHECK-NEXT: stw r10, sp[1] # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset 10, -4
+; CHECK-NEXT: ldaw r10, sp[0]
+; CHECK-NEXT: .cfi_def_cfa_register 10
+; CHECK-NEXT: extsp 1
+; CHECK-NEXT: bl main
+; CHECK-NEXT: ldaw sp, sp[1]
+; CHECK-NEXT: .cc_bottom normal.function
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/CodeGen/Xtensa/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/Xtensa/naked-fn-with-frame-pointer.ll
new file mode 100644
index 0000000..020fcc4
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/naked-fn-with-frame-pointer.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march xtensa | FileCheck %s -check-prefixes=CHECK
+
+declare dso_local void @main()
+
+define dso_local void @naked() naked "frame-pointer"="all" {
+; CHECK-LABEL: naked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: l32r a8, {{\.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NEXT: callx0 a8
+ call void @main()
+ unreachable
+}
+
+define dso_local void @normal() "frame-pointer"="all" {
+; CHECK-LABEL: normal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: s32i a0, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a15, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset a0, -4
+; CHECK-NEXT: .cfi_offset a15, -8
+; CHECK-NEXT: or a15, a1, a1
+; CHECK-NEXT: .cfi_def_cfa_register a15
+; CHECK-NEXT: l32r a8, {{\.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NEXT: callx0 a8
+ call void @main()
+ unreachable
+}
diff --git a/llvm/test/TableGen/GlobalISelEmitter-implicit-defs.td b/llvm/test/TableGen/GlobalISelEmitter-implicit-defs.td
new file mode 100644
index 0000000..79af1a3
--- /dev/null
+++ b/llvm/test/TableGen/GlobalISelEmitter-implicit-defs.td
@@ -0,0 +1,12 @@
+// RUN: llvm-tblgen -gen-global-isel -warn-on-skipped-patterns -I %p/../../include -I %p/Common %s -o /dev/null 2>&1 < %s | FileCheck %s --implicit-check-not="Skipped pattern"
+
+include "llvm/Target/Target.td"
+include "GlobalISelEmitterCommon.td"
+
+// CHECK: Skipped pattern: Pattern defines a physical register
+let Uses = [B0], Defs = [B0] in
+def tst1 : I<(outs), (ins), [(set B0, (add B0, 1))]>;
+
+// CHECK: Skipped pattern: Src pattern result has 1 def(s) without the HasNoUse predicate set to true but Dst MI has no def
+let Uses = [B0] in
+def tst2 : I<(outs), (ins), [(set B0, (add B0, 1))]>;
diff --git a/llvm/test/ThinLTO/X86/memprof-icp.ll b/llvm/test/ThinLTO/X86/memprof-icp.ll
index 5c6d4e3..2e97679 100644
--- a/llvm/test/ThinLTO/X86/memprof-icp.ll
+++ b/llvm/test/ThinLTO/X86/memprof-icp.ll
@@ -69,24 +69,29 @@
; RUN: split-file %s %t
-; RUN: opt -thinlto-bc %t/main.ll >%t/main.o
-; RUN: opt -thinlto-bc %t/foo.ll >%t/foo.o
+;; For now explicitly turn on this handling, which is off by default.
+; RUN: opt -thinlto-bc %t/main.ll -enable-memprof-indirect-call-support=true >%t/main.o
+; RUN: opt -thinlto-bc %t/foo.ll -enable-memprof-indirect-call-support=true >%t/foo.o
;; Check that we get the synthesized callsite records. There should be 2, one
;; for each profiled target in the VP metadata. They will have the same stackIds
;; since the debug information for the callsite is the same.
; RUN: llvm-dis %t/foo.o -o - | FileCheck %s --check-prefix=CALLSITES
-; CALLSITES: gv: (name: "_Z3fooR2B0j", {{.*}} callsites: ((callee: ^{{[0-9]+}}, clones: (0), stackIds: (16345663650247127235)), (callee: ^{{[0-9]+}}, clones: (0), stackIds: (16345663650247127235)))
+; CALLSITES: gv: (name: "_Z3fooR2B0j", {{.*}} callsites: ((callee: ^{{[0-9]+}}, clones: (0), stackIds: (16345663650247127235)), (callee: ^{{[0-9]+}}, clones: (0), stackIds: (16345663650247127235))
;; Make sure that we don't get the synthesized callsite records if the
;; -enable-memprof-indirect-call-support flag is false.
-; RUN: opt -thinlto-bc %t/foo.ll -enable-memprof-indirect-call-support=false -o - \
-; RUN: | llvm-dis -o - | FileCheck %s --implicit-check-not callsites
+; RUN: opt -thinlto-bc %t/foo.ll -enable-memprof-indirect-call-support=false >%t/foo.noicp.o
+; RUN: llvm-dis %t/foo.noicp.o -o - | FileCheck %s --implicit-check-not "stackIds: (16345663650247127235)"
+;; Currently this should be off by default as well.
+; RUN: opt -thinlto-bc %t/foo.ll -o - | llvm-dis -o - | FileCheck %s --implicit-check-not "stackIds: (16345663650247127235)"
;; First perform in-process ThinLTO
; RUN: llvm-lto2 run %t/main.o %t/foo.o -enable-memprof-context-disambiguation \
+; RUN: -enable-memprof-indirect-call-support=true \
; RUN: -supports-hot-cold-new \
; RUN: -r=%t/foo.o,_Z3fooR2B0j,plx \
+; RUN: -r=%t/foo.o,_Z3xyzR2B0j, \
; RUN: -r=%t/main.o,_Z3fooR2B0j, \
; RUN: -r=%t/main.o,_Znwm, \
; RUN: -r=%t/main.o,_ZdlPvm, \
@@ -116,6 +121,7 @@
; RUN: -supports-hot-cold-new \
; RUN: -thinlto-distributed-indexes \
; RUN: -r=%t/foo.o,_Z3fooR2B0j,plx \
+; RUN: -r=%t/foo.o,_Z3xyzR2B0j, \
; RUN: -r=%t/main.o,_Z3fooR2B0j, \
; RUN: -r=%t/main.o,_Znwm, \
; RUN: -r=%t/main.o,_ZdlPvm, \
@@ -136,11 +142,42 @@
;; Run ThinLTO backend
; RUN: opt -import-all-index -passes=function-import,memprof-context-disambiguation,inline \
+; RUN: -enable-memprof-indirect-call-support=true \
; RUN: -summary-file=%t/foo.o.thinlto.bc -memprof-import-summary=%t/foo.o.thinlto.bc \
; RUN: -enable-import-metadata -stats -pass-remarks=. \
; RUN: %t/foo.o -S 2>&1 | FileCheck %s --check-prefix=IR \
; RUN: --check-prefix=STATS-BE-DISTRIB --check-prefix=REMARKS-FOO
+;; Retry with the ICP-disabled object file, and make sure we disable it again
+;; so we don't look for the synthesized callsite records when applying imports.
+;; We should not get any cloning.
+; RUN: llvm-lto2 run %t/main.o %t/foo.noicp.o -enable-memprof-context-disambiguation \
+; RUN: -enable-memprof-indirect-call-support=false \
+; RUN: -supports-hot-cold-new \
+; RUN: -r=%t/foo.noicp.o,_Z3fooR2B0j,plx \
+; RUN: -r=%t/foo.noicp.o,_Z3xyzR2B0j, \
+; RUN: -r=%t/main.o,_Z3fooR2B0j, \
+; RUN: -r=%t/main.o,_Znwm, \
+; RUN: -r=%t/main.o,_ZdlPvm, \
+; RUN: -r=%t/main.o,_Z8externalPi, \
+; RUN: -r=%t/main.o,main,plx \
+; RUN: -r=%t/main.o,_ZN2B03barEj,plx \
+; RUN: -r=%t/main.o,_ZN1B3barEj,plx \
+; RUN: -r=%t/main.o,_ZTV1B,plx \
+; RUN: -r=%t/main.o,_ZTVN10__cxxabiv120__si_class_type_infoE,plx \
+; RUN: -r=%t/main.o,_ZTS1B,plx \
+; RUN: -r=%t/main.o,_ZTVN10__cxxabiv117__class_type_infoE,plx \
+; RUN: -r=%t/main.o,_ZTS2B0,plx \
+; RUN: -r=%t/main.o,_ZTI2B0,plx \
+; RUN: -r=%t/main.o,_ZTI1B,plx \
+; RUN: -r=%t/main.o,_ZTV2B0,plx \
+; RUN: -thinlto-threads=1 \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=. -save-temps \
+; RUN: -o %t.noicp.out 2>&1 | FileCheck %s --implicit-check-not "created clone"
+
+; RUN: llvm-dis %t.noicp.out.2.4.opt.bc -o - | FileCheck %s --implicit-check-not "_Z3fooR2B0j.memprof"
+
; REMARKS-MAIN: call in clone main assigned to call function clone _Z3fooR2B0j.memprof.1
; REMARKS-MAIN: call in clone main assigned to call function clone _Z3fooR2B0j.memprof.1
; REMARKS-MAIN: created clone _ZN2B03barEj.memprof.1
@@ -215,15 +252,22 @@
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
+declare i32 @_Z3xyzR2B0j(ptr %b)
+
define i32 @_Z3fooR2B0j(ptr %b) {
entry:
%0 = load ptr, ptr %b, align 8
%call = tail call i32 %0(ptr null, i32 0), !prof !0, !callsite !1
+ ;; Add a dummy call to ensure that we have some callsite metadata,
+ ;; which triggers callsite record checking in the ThinLTO backend
+ ;; even with -enable-memprof-indirect-call-support=false.
+ %call2 = call i32 @_Z3xyzR2B0j(ptr null, i32 0), !callsite !2
ret i32 0
}
!0 = !{!"VP", i32 0, i64 4, i64 4445083295448962937, i64 2, i64 -2718743882639408571, i64 2}
!1 = !{i64 -2101080423462424381}
+!2 = !{i64 1234}
;--- main.ll
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/umax.ll b/llvm/test/Transforms/CorrelatedValuePropagation/umax.ll
new file mode 100644
index 0000000..4fca708
--- /dev/null
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/umax.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s
+
+target datalayout = "p:32:32"
+
+define i32 @infer_range_from_dom_equality(i32 %x, i32 %y) {
+; CHECK-LABEL: define range(i32 1, 0) i32 @infer_range_from_dom_equality(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: ret i32 1
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+entry:
+ %cond = icmp eq i32 %x, %y
+ %sub = sub i32 %x, %y
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %max1 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max1
+
+if.else:
+ %max2 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max2
+}
+
+define i32 @infer_range_from_dom_equality_commuted1(i32 %x, i32 %y) {
+; CHECK-LABEL: define range(i32 1, 0) i32 @infer_range_from_dom_equality_commuted1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[Y]], [[X]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: ret i32 1
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+entry:
+ %cond = icmp eq i32 %x, %y
+ %sub = sub i32 %y, %x
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %max1 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max1
+
+if.else:
+ %max2 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max2
+}
+
+define i32 @infer_range_from_dom_equality_commuted2(i32 %x, i32 %y) {
+; CHECK-LABEL: define range(i32 1, 0) i32 @infer_range_from_dom_equality_commuted2(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[Y]], [[X]]
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: ret i32 1
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+entry:
+ %cond = icmp eq i32 %y, %x
+ %sub = sub i32 %x, %y
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %max1 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max1
+
+if.else:
+ %max2 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max2
+}
+
+define i32 @infer_range_from_dom_equality_ptrdiff(ptr %x, ptr %y) {
+; CHECK-LABEL: define range(i32 1, 0) i32 @infer_range_from_dom_equality_ptrdiff(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) {
+; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[X]], [[Y]]
+; CHECK-NEXT: [[XI:%.*]] = ptrtoint ptr [[X]] to i32
+; CHECK-NEXT: [[YI:%.*]] = ptrtoint ptr [[Y]] to i32
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[XI]], [[YI]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: ret i32 1
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+ %cond = icmp eq ptr %x, %y
+ %xi = ptrtoint ptr %x to i32
+ %yi = ptrtoint ptr %y to i32
+ %sub = sub i32 %xi, %yi
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %max1 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max1
+
+if.else:
+ %max2 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max2
+}
+
+; Negative tests
+
+define i32 @infer_range_from_dom_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: define range(i32 1, 0) i32 @infer_range_from_dom_slt(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[MAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[SUB]], i32 1)
+; CHECK-NEXT: ret i32 [[MAX1]]
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: [[MAX2:%.*]] = call i32 @llvm.umax.i32(i32 [[SUB]], i32 1)
+; CHECK-NEXT: ret i32 [[MAX2]]
+;
+entry:
+ %cond = icmp slt i32 %x, %y
+ %sub = sub i32 %x, %y
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %max1 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max1
+
+if.else:
+ %max2 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max2
+}
+
+define i32 @infer_range_from_dom_equality_not_match(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: define range(i32 1, 0) i32 @infer_range_from_dom_equality_not_match(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], i32 [[Z:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X]], [[Z]]
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[MAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[SUB]], i32 1)
+; CHECK-NEXT: ret i32 [[MAX1]]
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: [[MAX2:%.*]] = call i32 @llvm.umax.i32(i32 [[SUB]], i32 1)
+; CHECK-NEXT: ret i32 [[MAX2]]
+;
+entry:
+ %cond = icmp eq i32 %x, %z
+ %sub = sub i32 %x, %y
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %max1 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max1
+
+if.else:
+ %max2 = call i32 @llvm.umax.i32(i32 %sub, i32 1)
+ ret i32 %max2
+}
diff --git a/llvm/test/Transforms/FunctionSpecialization/noinline.ll b/llvm/test/Transforms/FunctionSpecialization/noinline.ll
index 7357640..34a8ecb 100644
--- a/llvm/test/Transforms/FunctionSpecialization/noinline.ll
+++ b/llvm/test/Transforms/FunctionSpecialization/noinline.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S --passes="ipsccp<func-spec>" < %s | FileCheck %s
+; RUN: opt -S --passes="ipsccp<func-spec>" -funcspec-for-literal-constant=false < %s | FileCheck %s
define dso_local i32 @p0(i32 noundef %x) {
entry:
%add = add nsw i32 %x, 1
diff --git a/llvm/test/Transforms/GlobalOpt/ctor-list-preserve-addrspace.ll b/llvm/test/Transforms/GlobalOpt/ctor-list-preserve-addrspace.ll
new file mode 100644
index 0000000..3f2f041
--- /dev/null
+++ b/llvm/test/Transforms/GlobalOpt/ctor-list-preserve-addrspace.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 5
+; RUN: opt -S -passes=globalopt < %s | FileCheck %s
+
+; Make sure the address space of global_ctors is preserved
+
+%ini = type { i32, ptr, ptr }
+
+@llvm.global_ctors = appending addrspace(1) global [1 x %ini] [%ini { i32 65534, ptr @ctor1, ptr null }]
+
+;.
+; CHECK: @llvm.global_ctors = appending addrspace(1) global [0 x %ini] zeroinitializer
+;.
+define void @ctor1() {
+; CHECK-LABEL: define void @ctor1() local_unnamed_addr {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
diff --git a/llvm/test/Transforms/InferFunctionAttrs/annotate.ll b/llvm/test/Transforms/InferFunctionAttrs/annotate.ll
index 7c33d47..8567cc0 100644
--- a/llvm/test/Transforms/InferFunctionAttrs/annotate.ll
+++ b/llvm/test/Transforms/InferFunctionAttrs/annotate.ll
@@ -643,6 +643,15 @@ declare float @log2f(float)
; CHECK: declare x86_fp80 @log2l(x86_fp80) [[NOFREE_NOUNWIND_WILLRETURN_WRITEONLY]]
declare x86_fp80 @log2l(x86_fp80)
+; CHECK: declare i32 @ilogb(double) [[NOFREE_NOUNWIND_WILLRETURN_WRITEONLY]]
+declare i32 @ilogb(double)
+
+; CHECK: declare i32 @ilogbf(float) [[NOFREE_NOUNWIND_WILLRETURN_WRITEONLY]]
+declare i32 @ilogbf(float)
+
+; CHECK: declare i32 @ilogbl(x86_fp80) [[NOFREE_NOUNWIND_WILLRETURN_WRITEONLY]]
+declare i32 @ilogbl(x86_fp80)
+
; CHECK: declare double @logb(double) [[NOFREE_NOUNWIND_WILLRETURN_WRITEONLY]]
declare double @logb(double)
diff --git a/llvm/test/Transforms/InstCombine/sink_instruction.ll b/llvm/test/Transforms/InstCombine/sink_instruction.ll
index c938002..dac4085 100644
--- a/llvm/test/Transforms/InstCombine/sink_instruction.ll
+++ b/llvm/test/Transforms/InstCombine/sink_instruction.ll
@@ -86,8 +86,8 @@ define i32 @test3(ptr nocapture readonly %P, i32 %i) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 [[I:%.*]], label [[SW_EPILOG:%.*]] [
-; CHECK-NEXT: i32 5, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB]]
+; CHECK-NEXT: i32 5, label [[SW_BB:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB]]
; CHECK-NEXT: ]
; CHECK: sw.bb:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
@@ -190,8 +190,8 @@ define i32 @test6(ptr nocapture readonly %P, i32 %i, i1 %cond) {
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IDXPROM]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: switch i32 [[I]], label [[SW_BB:%.*]] [
-; CHECK-NEXT: i32 5, label [[SW_EPILOG:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_EPILOG]]
+; CHECK-NEXT: i32 5, label [[SW_EPILOG:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_EPILOG]]
; CHECK-NEXT: ]
; CHECK: sw.bb:
; CHECK-NEXT: br label [[SW_EPILOG]]
@@ -272,3 +272,114 @@ abort:
call void @abort()
unreachable
}
+
+; Loads marked invariant can be sunk past potential memory writes.
+
+define i32 @invariant_load_metadata(ptr %p, i1 %cond) {
+; CHECK-LABEL: @invariant_load_metadata(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[BLOCK:%.*]], label [[END:%.*]]
+; CHECK: block:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load [[META0:![0-9]+]]
+; CHECK-NEXT: ret i32 [[V]]
+;
+entry:
+ %v = load i32, ptr %p, !invariant.load !0
+ br i1 %cond, label %block, label %end
+block:
+ call void @fn()
+ br label %end
+end:
+ ret i32 %v
+}
+
+; Loads not marked invariant cannot be sunk past potential memory writes.
+
+define i32 @invariant_load_neg(ptr %p, i1 %cond) {
+; CHECK-LABEL: @invariant_load_neg(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[BLOCK:%.*]], label [[END:%.*]]
+; CHECK: block:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret i32 [[V]]
+;
+entry:
+ %v = load i32, ptr %p
+ br i1 %cond, label %block, label %end
+block:
+ call void @fn()
+ br label %end
+end:
+ ret i32 %v
+}
+
+; Loads that aren't marked invariant but used in one branch
+; can be sunk to that branch.
+
+define void @invariant_load_use_in_br(ptr %p, i1 %cond) {
+; CHECK-LABEL: @invariant_load_use_in_br(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[TRUE_BR:%.*]], label [[FALSE_BR:%.*]]
+; CHECK: true.br:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: false.br:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: call void @fn(i32 [[VAL]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %p
+ br i1 %cond, label %true.br, label %false.br
+true.br:
+ call void @fn()
+ br label %exit
+false.br:
+ call void @fn(i32 %val)
+ br label %exit
+exit:
+ ret void
+}
+
+; Invariant loads marked with metadata can be sunk past calls.
+
+define void @invariant_load_metadata_call(ptr %p, i1 %cond) {
+; CHECK-LABEL: @invariant_load_metadata_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[TRUE_BR:%.*]], label [[FALSE_BR:%.*]]
+; CHECK: true.br:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: false.br:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load [[META0]]
+; CHECK-NEXT: call void @fn(i32 [[VAL]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %p, !invariant.load !0
+ call void @fn()
+ br i1 %cond, label %true.br, label %false.br
+true.br:
+ call void @fn()
+ br label %exit
+false.br:
+ call void @fn(i32 %val)
+ br label %exit
+exit:
+ ret void
+}
+
+declare void @fn()
+
+!0 = !{}
diff --git a/llvm/test/Transforms/JumpThreading/thread-debug-info.ll b/llvm/test/Transforms/JumpThreading/thread-debug-info.ll
index cd7b0b1..4727413 100644
--- a/llvm/test/Transforms/JumpThreading/thread-debug-info.ll
+++ b/llvm/test/Transforms/JumpThreading/thread-debug-info.ll
@@ -50,7 +50,7 @@ exit: ; preds = %bb.f4, %bb.f3, %bb.
ret void, !dbg !29
}
-; This is testing for debug value instrinsics outside of the threaded block pointing to a value
+; This is testing for debug value intrinsics outside of the threaded block pointing to a value
; inside to correctly take any new definitions.
define void @test2(i32 %cond1, i32 %cond2) !dbg !5 {
; CHECK: bb.f3
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 01fca39..7f325ce 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -732,20 +732,9 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; DEFAULT-LABEL: define void @multiple_exit_conditions(
; DEFAULT-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR2:[0-9]+]] {
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 32
-; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 257, [[TMP8]]
-; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DEFAULT-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; DEFAULT: vector.ph:
-; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 32
-; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 257, [[TMP3]]
-; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 257, [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP17:%.*]] = mul i64 [[N_VEC]], 8
-; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP17]]
-; DEFAULT-NEXT: [[IND_END1:%.*]] = mul i64 [[N_VEC]], 2
-; DEFAULT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 32
+; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 2048
; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
; DEFAULT: vector.body:
; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -753,39 +742,20 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP0]]
; DEFAULT-NEXT: [[TMP1:%.*]] = load i16, ptr [[SRC]], align 2
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[TMP1]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP9:%.*]] = or <vscale x 8 x i16> [[BROADCAST_SPLAT]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
-; DEFAULT-NEXT: [[TMP10:%.*]] = or <vscale x 8 x i16> [[BROADCAST_SPLAT]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
-; DEFAULT-NEXT: [[TMP11:%.*]] = or <vscale x 8 x i16> [[BROADCAST_SPLAT]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
-; DEFAULT-NEXT: [[TMP12:%.*]] = or <vscale x 8 x i16> [[BROADCAST_SPLAT]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
-; DEFAULT-NEXT: [[TMP13:%.*]] = uitofp <vscale x 8 x i16> [[TMP9]] to <vscale x 8 x double>
-; DEFAULT-NEXT: [[TMP14:%.*]] = uitofp <vscale x 8 x i16> [[TMP10]] to <vscale x 8 x double>
-; DEFAULT-NEXT: [[TMP15:%.*]] = uitofp <vscale x 8 x i16> [[TMP11]] to <vscale x 8 x double>
-; DEFAULT-NEXT: [[TMP16:%.*]] = uitofp <vscale x 8 x i16> [[TMP12]] to <vscale x 8 x double>
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[TMP1]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP2:%.*]] = or <8 x i16> [[BROADCAST_SPLAT]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+; DEFAULT-NEXT: [[TMP3:%.*]] = uitofp <8 x i16> [[TMP2]] to <8 x double>
; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i32 0
-; DEFAULT-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 8
-; DEFAULT-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i64 [[TMP19]]
-; DEFAULT-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 16
-; DEFAULT-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i64 [[TMP22]]
-; DEFAULT-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 24
-; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i64 [[TMP25]]
-; DEFAULT-NEXT: store <vscale x 8 x double> [[TMP13]], ptr [[TMP4]], align 8
-; DEFAULT-NEXT: store <vscale x 8 x double> [[TMP14]], ptr [[TMP20]], align 8
-; DEFAULT-NEXT: store <vscale x 8 x double> [[TMP15]], ptr [[TMP23]], align 8
-; DEFAULT-NEXT: store <vscale x 8 x double> [[TMP16]], ptr [[TMP26]], align 8
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; DEFAULT-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; DEFAULT-NEXT: store <8 x double> [[TMP3]], ptr [[TMP4]], align 8
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; DEFAULT-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; DEFAULT-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; DEFAULT: middle.block:
-; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 257, [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; DEFAULT-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]]
; DEFAULT: scalar.ph:
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[DST]], [[ENTRY:%.*]] ]
-; DEFAULT-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; DEFAULT-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 512, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
; DEFAULT-NEXT: br label [[LOOP:%.*]]
; DEFAULT: vector.scevcheck:
; DEFAULT-NEXT: unreachable
@@ -810,7 +780,7 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; PRED: vector.ph:
; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8
+; PRED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; PRED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 257, [[TMP2]]
; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
@@ -819,31 +789,31 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP3]]
; PRED-NEXT: [[IND_END1:%.*]] = mul i64 [[N_VEC]], 2
; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
+; PRED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8
+; PRED-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2
; PRED-NEXT: [[TMP8:%.*]] = sub i64 257, [[TMP7]]
; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 257, [[TMP7]]
; PRED-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i64 [[TMP8]], i64 0
-; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 257)
+; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 257)
; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
; PRED: vector.body:
; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; PRED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
; PRED-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 0
; PRED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP11]]
; PRED-NEXT: [[TMP12:%.*]] = load i16, ptr [[SRC]], align 2
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[TMP12]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-; PRED-NEXT: [[TMP13:%.*]] = or <vscale x 8 x i16> [[BROADCAST_SPLAT]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
-; PRED-NEXT: [[TMP14:%.*]] = uitofp <vscale x 8 x i16> [[TMP13]] to <vscale x 8 x double>
+; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i16> poison, i16 [[TMP12]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+; PRED-NEXT: [[TMP13:%.*]] = or <vscale x 2 x i16> [[BROADCAST_SPLAT]], shufflevector (<vscale x 2 x i16> insertelement (<vscale x 2 x i16> poison, i16 1, i64 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer)
+; PRED-NEXT: [[TMP14:%.*]] = uitofp <vscale x 2 x i16> [[TMP13]] to <vscale x 2 x double>
; PRED-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i32 0
-; PRED-NEXT: call void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double> [[TMP14]], ptr [[TMP15]], i32 8, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
+; PRED-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP14]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]]
-; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP10]])
-; PRED-NEXT: [[TMP16:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
-; PRED-NEXT: [[TMP17:%.*]] = extractelement <vscale x 8 x i1> [[TMP16]], i32 0
+; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP10]])
+; PRED-NEXT: [[TMP16:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
+; PRED-NEXT: [[TMP17:%.*]] = extractelement <vscale x 2 x i1> [[TMP16]], i32 0
; PRED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; PRED: middle.block:
; PRED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll
index 59da1e1..f28f77b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll
@@ -1,23 +1,23 @@
; REQUIRES: asserts
; RUN: opt -mtriple=aarch64 -mattr=+sve \
; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \
-; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE16
+; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE4
; RUN: opt -mtriple=aarch64 -mattr=+sve -mcpu=generic \
; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \
-; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE16
+; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE4
; RUN: opt -mtriple=aarch64 -mcpu=neoverse-v1 \
; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \
-; RUN: | FileCheck %s --check-prefixes=NEOVERSE-V1,VF-VSCALE16
+; RUN: | FileCheck %s --check-prefixes=NEOVERSE-V1,VF-VSCALE4
; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 \
; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \
-; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE16
+; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE4
; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 \
; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \
-; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE16
+; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE4
; GENERIC: LV: Vector loop of width vscale x 2 costs: 3 (assuming a minimum vscale of 2).
; GENERIC: LV: Vector loop of width vscale x 4 costs: 1 (assuming a minimum vscale of 2).
@@ -29,7 +29,7 @@
; NEOVERSE-N2: LV: Vector loop of width vscale x 4 costs: 3 (assuming a minimum vscale of 1).
; VF-4: <4 x i32>
-; VF-VSCALE16: <vscale x 16 x i32>
+; VF-VSCALE4: <16 x i32>
define void @test0(ptr %a, ptr %b, ptr %c) #0 {
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll
index a84932a..e83eb72 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll
@@ -8,8 +8,8 @@
; (maximized bandwidth for i8 in the loop).
define void @test0(ptr %a, ptr %b, ptr %c) #0 {
; CHECK: LV: Checking a loop in 'test0'
-; CHECK_SCALABLE_ON: LV: Found feasible scalable VF = vscale x 16
-; CHECK_SCALABLE_ON: LV: Selecting VF: vscale x 16
+; CHECK_SCALABLE_ON: LV: Found feasible scalable VF = vscale x 4
+; CHECK_SCALABLE_ON: LV: Selecting VF: 16
; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF
; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16
; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 16
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index a4861ad..7d2fc348 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -145,7 +145,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i16 [[X:%.*]]) #[[ATTR1:[0-9]+]] {
; DEFAULT-NEXT: iter.check:
; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8
+; DEFAULT-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP1]]
; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; DEFAULT: vector.memcheck:
@@ -155,72 +155,59 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; DEFAULT-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
; DEFAULT: vector.main.loop.iter.check:
-; DEFAULT-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[TMP9]], 32
-; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 0, [[TMP3]]
-; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DEFAULT-NEXT: br i1 true, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
; DEFAULT: vector.ph:
-; DEFAULT-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 32
-; DEFAULT-NEXT: [[N_MOD_VF1:%.*]] = urem i64 0, [[TMP5]]
-; DEFAULT-NEXT: [[N_VEC1:%.*]] = sub i64 0, [[N_MOD_VF1]]
-; DEFAULT-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 32
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i16> poison, i16 [[X]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP8:%.*]] = trunc <vscale x 16 x i16> [[BROADCAST_SPLAT]] to <vscale x 16 x i8>
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <16 x i16> poison, i16 [[X]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <16 x i16> [[BROADCAST_SPLATINSERT3]], <16 x i16> poison, <16 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP7:%.*]] = trunc <16 x i16> [[BROADCAST_SPLAT4]] to <16 x i8>
; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
; DEFAULT: vector.body:
; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; DEFAULT-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0
-; DEFAULT-NEXT: [[TMP14:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META5:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 16 x i64> poison, i64 [[TMP14]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 16 x i64> [[BROADCAST_SPLATINSERT2]], <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP11:%.*]] = trunc <vscale x 16 x i64> [[BROADCAST_SPLAT3]] to <vscale x 16 x i8>
-; DEFAULT-NEXT: [[TMP22:%.*]] = and <vscale x 16 x i8> [[TMP11]], [[TMP8]]
-; DEFAULT-NEXT: [[TMP13:%.*]] = and <vscale x 16 x i8> [[TMP11]], [[TMP8]]
+; DEFAULT-NEXT: [[TMP4:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META5:![0-9]+]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[TMP4]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP5:%.*]] = trunc <16 x i64> [[BROADCAST_SPLAT2]] to <16 x i8>
+; DEFAULT-NEXT: [[TMP8:%.*]] = and <16 x i8> [[TMP5]], [[TMP7]]
+; DEFAULT-NEXT: [[TMP9:%.*]] = and <16 x i8> [[TMP5]], [[TMP7]]
; DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP2]]
; DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP10]], i32 0
-; DEFAULT-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP23:%.*]] = mul i64 [[TMP16]], 16
-; DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP10]], i64 [[TMP23]]
-; DEFAULT-NEXT: store <vscale x 16 x i8> [[TMP22]], ptr [[TMP12]], align 1, !alias.scope [[META8:![0-9]+]], !noalias [[META5]]
-; DEFAULT-NEXT: store <vscale x 16 x i8> [[TMP13]], ptr [[TMP24]], align 1, !alias.scope [[META8]], !noalias [[META5]]
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; DEFAULT-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC1]]
-; DEFAULT-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP10]], i32 16
+; DEFAULT-NEXT: store <16 x i8> [[TMP8]], ptr [[TMP12]], align 1, !alias.scope [[META8:![0-9]+]], !noalias [[META5]]
+; DEFAULT-NEXT: store <16 x i8> [[TMP9]], ptr [[TMP13]], align 1, !alias.scope [[META8]], !noalias [[META5]]
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; DEFAULT-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
+; DEFAULT-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; DEFAULT: middle.block:
-; DEFAULT-NEXT: [[CMP_N1:%.*]] = icmp eq i64 0, [[N_VEC1]]
-; DEFAULT-NEXT: br i1 [[CMP_N1]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
+; DEFAULT-NEXT: br i1 true, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; DEFAULT: vec.epilog.iter.check:
-; DEFAULT-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 0, [[N_VEC1]]
; DEFAULT-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP31:%.*]] = mul i64 [[TMP15]], 8
-; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP31]]
+; DEFAULT-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2
+; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP16]]
; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; DEFAULT: vec.epilog.ph:
-; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC1]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 0, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; DEFAULT-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 8
+; DEFAULT-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 2
; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 0, [[TMP18]]
; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 0, [[N_MOD_VF]]
; DEFAULT-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 8
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[X]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <vscale x 8 x i16> [[BROADCAST_SPLATINSERT6]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP32:%.*]] = trunc <vscale x 8 x i16> [[BROADCAST_SPLAT7]] to <vscale x 8 x i8>
+; DEFAULT-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <vscale x 2 x i16> poison, i16 [[X]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <vscale x 2 x i16> [[BROADCAST_SPLATINSERT6]], <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP24:%.*]] = trunc <vscale x 2 x i16> [[BROADCAST_SPLAT7]] to <vscale x 2 x i8>
; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; DEFAULT: vec.epilog.vector.body:
; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; DEFAULT-NEXT: [[TMP21:%.*]] = add i64 [[INDEX5]], 0
-; DEFAULT-NEXT: [[TMP33:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META11:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP33]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP29:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT10]] to <vscale x 8 x i8>
-; DEFAULT-NEXT: [[TMP30:%.*]] = and <vscale x 8 x i8> [[TMP29]], [[TMP32]]
+; DEFAULT-NEXT: [[TMP22:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META11:![0-9]+]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP22]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP23:%.*]] = trunc <vscale x 2 x i64> [[BROADCAST_SPLAT]] to <vscale x 2 x i8>
+; DEFAULT-NEXT: [[TMP25:%.*]] = and <vscale x 2 x i8> [[TMP23]], [[TMP24]]
; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP21]]
; DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0
-; DEFAULT-NEXT: store <vscale x 8 x i8> [[TMP30]], ptr [[TMP27]], align 1, !alias.scope [[META14:![0-9]+]], !noalias [[META11]]
+; DEFAULT-NEXT: store <vscale x 2 x i8> [[TMP25]], ptr [[TMP27]], align 1, !alias.scope [[META14:![0-9]+]], !noalias [[META11]]
; DEFAULT-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX5]], [[TMP20]]
; DEFAULT-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC]]
; DEFAULT-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -228,7 +215,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 0, [[N_VEC]]
; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; DEFAULT: vec.epilog.scalar.ph:
-; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC1]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 0, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
; DEFAULT-NEXT: br label [[LOOP:%.*]]
; DEFAULT: loop:
; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -247,10 +234,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; PRED-LABEL: define void @trunc_store(
; PRED-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i16 [[X:%.*]]) #[[ATTR1:[0-9]+]] {
; PRED-NEXT: entry:
-; PRED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP7]], 16
-; PRED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP1]]
-; PRED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; PRED-NEXT: br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; PRED: vector.memcheck:
; PRED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 8
; PRED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP]]
@@ -258,35 +242,28 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; PRED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; PRED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; PRED: vector.ph:
-; PRED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 0, [[TMP3]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 0, [[N_MOD_VF]]
-; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP4]], 16
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i16> poison, i16 [[X]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-; PRED-NEXT: [[TMP12:%.*]] = trunc <vscale x 16 x i16> [[BROADCAST_SPLAT]] to <vscale x 16 x i8>
+; PRED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i16> poison, i16 [[X]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i16> [[BROADCAST_SPLATINSERT1]], <16 x i16> poison, <16 x i32> zeroinitializer
+; PRED-NEXT: [[TMP3:%.*]] = trunc <16 x i16> [[BROADCAST_SPLAT2]] to <16 x i8>
; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
; PRED: vector.body:
; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PRED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; PRED-NEXT: [[TMP8:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META4:![0-9]+]]
-; PRED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i64> poison, i64 [[TMP8]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer
-; PRED-NEXT: [[TMP9:%.*]] = trunc <vscale x 16 x i64> [[BROADCAST_SPLAT2]] to <vscale x 16 x i8>
-; PRED-NEXT: [[TMP10:%.*]] = and <vscale x 16 x i8> [[TMP9]], [[TMP12]]
+; PRED-NEXT: [[TMP1:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META4:![0-9]+]]
+; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TMP1]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer
+; PRED-NEXT: [[TMP2:%.*]] = trunc <16 x i64> [[BROADCAST_SPLAT]] to <16 x i8>
+; PRED-NEXT: [[TMP4:%.*]] = and <16 x i8> [[TMP2]], [[TMP3]]
; PRED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP0]]
; PRED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 0
-; PRED-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP6]], align 1, !alias.scope [[META7:![0-9]+]], !noalias [[META4]]
-; PRED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; PRED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; PRED-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; PRED-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP6]], align 1, !alias.scope [[META7:![0-9]+]], !noalias [[META4]]
+; PRED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; PRED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
+; PRED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; PRED: middle.block:
-; PRED-NEXT: [[CMP_N:%.*]] = icmp eq i64 0, [[N_VEC]]
-; PRED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; PRED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
; PRED: scalar.ph:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; PRED-NEXT: br label [[LOOP:%.*]]
; PRED: loop:
; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
index 6a7263d..0b3f28e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
@@ -19,19 +19,19 @@ target triple = "aarch64-unknown-linux-gnu"
; VPLANS-EMPTY:
; VPLANS-NEXT: vector.ph:
; VPLANS-NEXT: EMIT vp<[[NEWTC:%[0-9]+]]> = TC > VF ? TC - VF : 0 vp<[[TC]]>
-; VPLANS-NEXT: EMIT vp<[[VF:%[0-9]+]]> = VF * Part + ir<0>
-; VPLANS-NEXT: EMIT vp<[[LANEMASK_ENTRY:%[0-9]+]]> = active lane mask vp<[[VF]]>, vp<[[TC]]>
+; VPLANS-NEXT: EMIT vp<[[VF:%.+]]> = VF * Part + ir<0>
+; VPLANS-NEXT: EMIT vp<[[LANEMASK_ENTRY:%.+]]> = active lane mask vp<[[VF]]>, vp<[[TC]]>
; VPLANS-NEXT: Successor(s): vector loop
; VPLANS-EMPTY:
; VPLANS-NEXT: <x1> vector loop: {
; VPLANS-NEXT: vector.body:
; VPLANS-NEXT: EMIT vp<[[INDV:%[0-9]+]]> = CANONICAL-INDUCTION
-; VPLANS-NEXT: ACTIVE-LANE-MASK-PHI vp<[[LANEMASK_PHI:%[0-9]+]]> = phi vp<[[LANEMASK_ENTRY]]>, vp<[[LANEMASK_LOOP:%[0-9]+]]>
+; VPLANS-NEXT: ACTIVE-LANE-MASK-PHI vp<[[LANEMASK_PHI:%[0-9]+]]> = phi vp<[[LANEMASK_ENTRY]]>, vp<[[LANEMASK_LOOP:%.+]]>
; VPLANS-NEXT: vp<[[STEP:%[0-9]+]]> = SCALAR-STEPS vp<[[INDV]]>, ir<1>
; VPLANS-NEXT: CLONE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEP]]>
; VPLANS-NEXT: vp<[[VEC_PTR:%[0-9]+]]> = vector-pointer ir<%gep>
; VPLANS-NEXT: WIDEN store vp<[[VEC_PTR]]>, ir<%val>, vp<[[LANEMASK_PHI]]>
-; VPLANS-NEXT: EMIT vp<[[INDV_UPDATE:%[0-9]+]]> = add vp<[[INDV]]>, vp<[[VFxUF]]>
+; VPLANS-NEXT: EMIT vp<[[INDV_UPDATE:%.+]]> = add vp<[[INDV]]>, vp<[[VFxUF]]>
; VPLANS-NEXT: EMIT vp<[[INC:%[0-9]+]]> = VF * Part + vp<[[INDV]]>
; VPLANS-NEXT: EMIT vp<[[LANEMASK_LOOP]]> = active lane mask vp<[[INC]]>, vp<[[NEWTC]]>
; VPLANS-NEXT: EMIT vp<[[NOT:%[0-9]+]]> = not vp<[[LANEMASK_LOOP]]>
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index 04ac895..0c41477 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -313,68 +313,36 @@ for.exit:
define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 {
; CHECK-LABEL: define void @histogram_8bit(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: iter.check:
+; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP5]], 3
+; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP5]], 2
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP9]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
-; CHECK: vector.main.loop.iter.check:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -16
+; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4
+; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[TMP10:%.*]] = zext <vscale x 16 x i32> [[WIDE_LOAD]] to <vscale x 16 x i64>
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], <vscale x 16 x i64> [[TMP10]]
-; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> [[TMP20]], i8 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i64 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer))
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP6]]
+; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i8(<vscale x 4 x ptr> [[TMP7]], i8 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
-; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP11]], 3
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP12]]
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]]
-; CHECK: vec.epilog.ph:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG8:%.*]] = mul nsw i64 [[TMP13]], -8
-; CHECK-NEXT: [[N_VEC3:%.*]] = and i64 [[N]], [[DOTNEG8]]
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP14]], 3
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
-; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[FOR_BODY1]] ]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[INDEX4]]
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x i32>, ptr [[TMP16]], align 4
-; CHECK-NEXT: [[TMP17:%.*]] = zext <vscale x 8 x i32> [[WIDE_LOAD5]] to <vscale x 8 x i64>
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], <vscale x 8 x i64> [[TMP17]]
-; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv8p0.i8(<vscale x 8 x ptr> [[TMP18]], i8 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer))
-; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP15]]
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]]
-; CHECK-NEXT: br i1 [[TMP19]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP11:![0-9]+]]
-; CHECK: vec.epilog.middle.block:
-; CHECK-NEXT: [[CMP_N7:%.*]] = icmp eq i64 [[N]], [[N_VEC3]]
-; CHECK-NEXT: br i1 [[CMP_N7]], label [[FOR_EXIT]], label [[SCALAR_PH]]
-; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-NEXT: br label [[FOR_BODY2:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY2]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; CHECK-NEXT: [[GEP_INDICES:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV1]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_INDICES]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
@@ -384,7 +352,7 @@ define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N)
; CHECK-NEXT: store i8 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY2]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -425,7 +393,7 @@ define void @histogram_float(ptr noalias %buckets, ptr readonly %indices, i64 %N
; CHECK-NEXT: store float [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -468,7 +436,7 @@ define void @histogram_varying_increment(ptr noalias %buckets, ptr readonly %ind
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -526,7 +494,7 @@ define void @simple_histogram_user_interleave(ptr noalias %buckets, ptr readonly
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP21]], i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -544,7 +512,7 @@ define void @simple_histogram_user_interleave(ptr noalias %buckets, ptr readonly
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -596,7 +564,7 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 {
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP11]], i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -614,7 +582,7 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 {
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -666,7 +634,7 @@ define void @histogram_array_4op_gep_nonzero_const_idx(i64 noundef %N, ptr reado
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP7]], i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP4]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -684,7 +652,7 @@ define void @histogram_array_4op_gep_nonzero_const_idx(i64 noundef %N, ptr reado
; CHECK-NEXT: store i32 [[INC]], ptr [[GEP_BUCKET]], align 4
; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -733,13 +701,13 @@ define void @simple_histogram_tailfold(ptr noalias %buckets, ptr readonly %indic
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]])
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 poison, label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -806,7 +774,7 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr %
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -827,7 +795,7 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr %
; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[IDX_ADDR]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -919,7 +887,7 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> [[TMP6]], i64 1, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -936,7 +904,7 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: store i64 [[INC]], ptr [[GEP_BUCKET]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll
index 691c743..dec3c28 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll
@@ -24,25 +24,25 @@ define void @zext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 16
+; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
-; CHECK-NEXT: [[TMP10:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i16>
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 16 x i16> [[TMP10]], trunc (<vscale x 16 x i32> shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 2, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer) to <vscale x 16 x i16>)
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
+; CHECK-NEXT: [[TMP10:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i16>
+; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 8 x i16> [[TMP10]], trunc (<vscale x 8 x i32> shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 2, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer) to <vscale x 8 x i16>)
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 16 x i16> [[TMP11]], ptr [[TMP12]], align 2
+; CHECK-NEXT: store <vscale x 8 x i16> [[TMP11]], ptr [[TMP12]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -107,25 +107,25 @@ define void @sext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 16
+; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
-; CHECK-NEXT: [[TMP10:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i16>
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 16 x i16> [[TMP10]], trunc (<vscale x 16 x i32> shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 2, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer) to <vscale x 16 x i16>)
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
+; CHECK-NEXT: [[TMP10:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i16>
+; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 8 x i16> [[TMP10]], trunc (<vscale x 8 x i32> shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 2, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer) to <vscale x 8 x i16>)
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 16 x i16> [[TMP11]], ptr [[TMP12]], align 2
+; CHECK-NEXT: store <vscale x 8 x i16> [[TMP11]], ptr [[TMP12]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
index a1a13f1..4a2f9d0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=loop-vectorize,instsimplify -force-vector-interleave=1 -S | FileCheck %s --check-prefixes=WIDE
-; RUN: opt < %s -passes=loop-vectorize,instsimplify -force-vector-interleave=1 -vectorizer-maximize-bandwidth=false -vectorizer-maximize-bandwidth-for-vector-calls=false -S | FileCheck %s --check-prefixes=NARROW
+; RUN: opt < %s -passes=loop-vectorize,instsimplify -force-vector-interleave=1 -vectorizer-maximize-bandwidth-for-vector-calls=false -S | FileCheck %s --check-prefixes=NARROW
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
index 90c209c..1326751 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
@@ -37,7 +37,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: <x1> vector loop: {
; IF-EVL-INLOOP-NEXT: vector.body:
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
-; IF-EVL-INLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%[0-9]+]]>
+; IF-EVL-INLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-INLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi ir<%start>, ir<[[RDX_NEXT:%.+]]>
; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
; IF-EVL-INLOOP-NEXT: EMIT vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
@@ -48,7 +48,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: REDUCE ir<[[ADD:%.+]]> = ir<[[RDX_PHI]]> + vp.reduce.add (ir<[[LD1]]>, vp<[[EVL]]>)
; IF-EVL-INLOOP-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
-; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
+; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-INLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-INLOOP-NEXT: No successors
; IF-EVL-INLOOP-NEXT: }
@@ -86,7 +86,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]>
; NO-VP-OUTLOOP-NEXT: WIDEN ir<[[LD1:%.+]]> = load vp<[[PTR1]]>
; NO-VP-OUTLOOP-NEXT: WIDEN ir<[[ADD:%.+]]> = add ir<[[LD1]]>, ir<[[RDX_PHI]]>
-; NO-VP-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
+; NO-VP-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
; NO-VP-OUTLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; NO-VP-OUTLOOP-NEXT: No successors
; NO-VP-OUTLOOP-NEXT: }
@@ -125,7 +125,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]>
; NO-VP-INLOOP-NEXT: WIDEN ir<[[LD1:%.+]]> = load vp<[[PTR1]]>
; NO-VP-INLOOP-NEXT: REDUCE ir<[[ADD:%.+]]> = ir<[[RDX_PHI]]> + reduce.add (ir<[[LD1]]>)
-; NO-VP-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
+; NO-VP-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
; NO-VP-INLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; NO-VP-INLOOP-NEXT: No successors
; NO-VP-INLOOP-NEXT: }
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
index c14a8bc..706b6f8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
@@ -22,7 +22,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: <x1> vector loop: {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
-; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%[0-9]+]]>
+; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
; IF-EVL-NEXT: EMIT vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>
@@ -38,7 +38,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
-; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
+; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
; IF-EVL-NEXT: }
@@ -65,7 +65,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; NO-VP-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; NO-VP-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]>
-; NO-VP-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
+; NO-VP-NEXT: EMIT vp<[[IV_NEXT:%.+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
; NO-VP-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]>
; NO-VP-NEXT: No successors
; NO-VP-NEXT: }
@@ -110,7 +110,7 @@ define void @safe_dep(ptr %p) {
; CHECK-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr ir<%p>, ir<[[OFFSET]]>
; CHECK-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
; CHECK-NEXT: WIDEN store vp<[[PTR2]]>, ir<[[V]]>
-; CHECK-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
+; CHECK-NEXT: EMIT vp<[[IV_NEXT:%.+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]>
; CHECK-NEXT: No successors
; CHECK-NEXT: }
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-select-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-select-intrinsics.ll
index c26ab20..6d6cfb5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-select-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-select-intrinsics.ll
@@ -17,7 +17,7 @@
; IF-EVL: <x1> vector loop: {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
- ; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEX:%[0-9]+]]>
+ ; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEX:%.+]]>
; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
; IF-EVL-NEXT: EMIT vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>
@@ -36,7 +36,7 @@
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEX]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
- ; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
+ ; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
; IF-EVL-NEXT: }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll
index 9b49d44..1af03e74 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll
@@ -36,7 +36,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; IF-EVL-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[MASK]]>
-; IF-EVL-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
+; IF-EVL-NEXT: EMIT vp<[[IV_NEXT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
; IF-EVL-NEXT: }
@@ -63,7 +63,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; NO-VP-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; NO-VP-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]>
-; NO-VP-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
+; NO-VP-NEXT: EMIT vp<[[IV_NEXT:%.+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]>
; NO-VP-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]>
; NO-VP-NEXT: No successors
; NO-VP-NEXT: }
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll
index 5e4ea2c..9de675b 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-chains-vplan.ll
@@ -34,7 +34,7 @@ define void @test_chained_first_order_recurrences_1(ptr %ptr) {
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
; CHECK-NEXT: EMIT vp<[[RESUME_1:%.+]]> = extract-from-end ir<%for.1.next>, ir<1>
-; CHECK-NEXT: EMIT vp<[[RESUME_2:%.+]]> = extract-from-end vp<[[FOR1_SPLICE]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[RESUME_2:%.+]]>.1 = extract-from-end vp<[[FOR1_SPLICE]]>, ir<1>
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1000>, vp<[[VTC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
@@ -44,11 +44,11 @@ define void @test_chained_first_order_recurrences_1(ptr %ptr) {
; CHECK-EMPTY:
; CHECK-NEXT: scalar.ph
; CHECK-NEXT: EMIT vp<[[RESUME_1_P:%.*]]> = resume-phi vp<[[RESUME_1]]>, ir<22>
-; CHECK-NEXT: EMIT vp<[[RESUME_2_P:%.*]]> = resume-phi vp<[[RESUME_2]]>, ir<33>
+; CHECK-NEXT: EMIT vp<[[RESUME_2_P:%.*]]>.1 = resume-phi vp<[[RESUME_2]]>.1, ir<33>
; CHECK-NEXT: No successors
; CHECK-EMPTY:
; CHECK-NEXT: Live-out i16 %for.1 = vp<[[RESUME_1_P]]>
-; CHECK-NEXT: Live-out i16 %for.2 = vp<[[RESUME_2_P]]>
+; CHECK-NEXT: Live-out i16 %for.2 = vp<[[RESUME_2_P]]>.1
; CHECK-NEXT: }
;
entry:
@@ -105,8 +105,8 @@ define void @test_chained_first_order_recurrences_3(ptr %ptr) {
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
; CHECK-NEXT: EMIT vp<[[RESUME_1:%.+]]> = extract-from-end ir<%for.1.next>, ir<1>
-; CHECK-NEXT: EMIT vp<[[RESUME_2:%.+]]> = extract-from-end vp<[[FOR1_SPLICE]]>, ir<1>
-; CHECK-NEXT: EMIT vp<[[RESUME_3:%.+]]> = extract-from-end vp<[[FOR2_SPLICE]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[RESUME_2:%.+]]>.1 = extract-from-end vp<[[FOR1_SPLICE]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[RESUME_3:%.+]]>.2 = extract-from-end vp<[[FOR2_SPLICE]]>, ir<1>
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1000>, vp<[[VTC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
@@ -116,13 +116,13 @@ define void @test_chained_first_order_recurrences_3(ptr %ptr) {
; CHECK-EMPTY:
; CHECK-NEXT: scalar.ph
; CHECK-NEXT: EMIT vp<[[RESUME_1_P:%.*]]> = resume-phi vp<[[RESUME_1]]>, ir<22>
-; CHECK-NEXT: EMIT vp<[[RESUME_2_P:%.*]]> = resume-phi vp<[[RESUME_2]]>, ir<33>
-; CHECK-NEXT: EMIT vp<[[RESUME_3_P:%.*]]> = resume-phi vp<[[RESUME_3]]>, ir<33>
+; CHECK-NEXT: EMIT vp<[[RESUME_2_P:%.*]]>.1 = resume-phi vp<[[RESUME_2]]>.1, ir<33>
+; CHECK-NEXT: EMIT vp<[[RESUME_3_P:%.*]]>.2 = resume-phi vp<[[RESUME_3]]>.2, ir<33>
; CHECK-NEXT: No successors
; CHECK-EMPTY:
; CHECK-NEXT: Live-out i16 %for.1 = vp<[[RESUME_1_P]]>
-; CHECK-NEXT: Live-out i16 %for.2 = vp<[[RESUME_2_P]]>
-; CHECK-NEXT: Live-out i16 %for.3 = vp<[[RESUME_3_P]]>
+; CHECK-NEXT: Live-out i16 %for.2 = vp<[[RESUME_2_P]]>.1
+; CHECK-NEXT: Live-out i16 %for.3 = vp<[[RESUME_3_P]]>.2
; CHECK-NEXT: }
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-unused-interleave-group.ll b/llvm/test/Transforms/LoopVectorize/vplan-unused-interleave-group.ll
index 5ea2799..27d81de 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-unused-interleave-group.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-unused-interleave-group.ll
@@ -18,9 +18,9 @@ define void @test_unused_interleave(ptr %src, i32 %length) {
; CHECK-EMPTY:
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
-; CHECK-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION ir<0>, vp<%3>
-; CHECK-NEXT: EMIT vp<%3> = add nuw vp<%2>, vp<%0>
-; CHECK-NEXT: EMIT branch-on-count vp<%3>, vp<%1>
+; CHECK-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%2>, vp<%0>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%1>
; CHECK-NEXT: No successors
; CHECK-NEXT: }
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/remarks-insert-into-small-vector.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/remarks-insert-into-small-vector.ll
index 0961244..4788e1e 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/remarks-insert-into-small-vector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/remarks-insert-into-small-vector.ll
@@ -8,7 +8,7 @@
; YAML-NEXT: Function: test
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'Stores SLP vectorized with cost '
-; YAML-NEXT: - Cost: '0'
+; YAML-NEXT: - Cost: '-2'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '9'
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll
index 995cd7c..a0cb52a 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll
@@ -8,7 +8,7 @@
; YAML: Function: test1
; YAML: Args:
; YAML: - String: 'Stores SLP vectorized with cost '
-; YAML: - Cost: '6'
+; YAML: - Cost: '4'
; YAML: - String: ' and with tree size '
; YAML: - TreeSize: '5'
@@ -47,7 +47,7 @@ declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>)
; YAML: Function: test2
; YAML: Args:
; YAML: - String: 'Stores SLP vectorized with cost '
-; YAML: - Cost: '16'
+; YAML: - Cost: '12'
; YAML: - String: ' and with tree size '
; YAML: - TreeSize: '5'
diff --git a/llvm/test/Transforms/SROA/fake-use-sroa.ll b/llvm/test/Transforms/SROA/fake-use-sroa.ll
index 9e92df1..42b0cbb 100644
--- a/llvm/test/Transforms/SROA/fake-use-sroa.ll
+++ b/llvm/test/Transforms/SROA/fake-use-sroa.ll
@@ -1,5 +1,5 @@
; RUN: opt -S -passes=sroa %s | FileCheck %s
-; With fake use instrinsics generated for small aggregates, check that when
+; With fake use intrinsics generated for small aggregates, check that when
; SROA slices the aggregate, we generate individual fake use intrinsics for
; the individual values.
diff --git a/llvm/test/Transforms/Sink/invariant-load.ll b/llvm/test/Transforms/Sink/invariant-load.ll
index 1aab4a96..c8fb119 100644
--- a/llvm/test/Transforms/Sink/invariant-load.ll
+++ b/llvm/test/Transforms/Sink/invariant-load.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=sink -S < %s | FileCheck %s
-; Loads marked invariant can be sunk across critical edges
+; Loads marked invariant can be sunk across critical edges.
define <4 x float> @invariant_load(ptr %in, i32 %s) {
; CHECK-LABEL: @invariant_load(
@@ -12,7 +12,7 @@ define <4 x float> @invariant_load(ptr %in, i32 %s) {
; CHECK-NEXT: [[Z:%.*]] = add i32 [[S]], 1
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[V:%.*]] = load <4 x float>, ptr [[IN:%.*]], align 16, !invariant.load !0
+; CHECK-NEXT: [[V:%.*]] = load <4 x float>, ptr [[IN:%.*]], align 16, !invariant.load [[META0:![0-9]+]]
; CHECK-NEXT: ret <4 x float> [[V]]
;
main_body:
@@ -26,4 +26,67 @@ end:
ret <4 x float> %v
}
+; Loads that aren't marked invariant but used in one branch
+; can be sunk to that branch.
+
+define void @invariant_load_use_in_br(ptr %p, i1 %cond) {
+; CHECK-LABEL: @invariant_load_use_in_br(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[TRUE_BR:%.*]], label [[FALSE_BR:%.*]]
+; CHECK: true.br:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: false.br:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: call void @fn(i32 [[VAL]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %p
+ br i1 %cond, label %true.br, label %false.br
+true.br:
+ call void @fn()
+ br label %exit
+false.br:
+ call void @fn(i32 %val)
+ br label %exit
+exit:
+ ret void
+}
+
+; TODO: Invariant loads marked with metadata can be sunk past calls.
+
+define void @invariant_load_metadata_call(ptr %p, i1 %cond) {
+; CHECK-LABEL: @invariant_load_metadata_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load [[META0]]
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[TRUE_BR:%.*]], label [[FALSE_BR:%.*]]
+; CHECK: true.br:
+; CHECK-NEXT: call void @fn()
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: false.br:
+; CHECK-NEXT: call void @fn(i32 [[VAL]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %val = load i32, ptr %p, !invariant.load !0
+ call void @fn()
+ br i1 %cond, label %true.br, label %false.br
+true.br:
+ call void @fn()
+ br label %exit
+false.br:
+ call void @fn(i32 %val)
+ br label %exit
+exit:
+ ret void
+}
+
+declare void @fn()
+
!0 = !{}
diff --git a/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml b/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
index 3eb6d8b..aad5794 100644
--- a/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
+++ b/llvm/test/tools/llvm-tli-checker/ps4-tli-check.yaml
@@ -34,7 +34,7 @@
#
# CHECK: << Total TLI yes SDK no: 18
# CHECK: >> Total TLI no SDK yes: 0
-# CHECK: == Total TLI yes SDK yes: 256
+# CHECK: == Total TLI yes SDK yes: 259
#
# WRONG_DETAIL: << TLI yes SDK no : '_ZdaPv' aka operator delete[](void*)
# WRONG_DETAIL: >> TLI no SDK yes: '_ZdaPvj' aka operator delete[](void*, unsigned int)
@@ -48,14 +48,14 @@
# WRONG_DETAIL: << TLI yes SDK no : 'fminimum_numl'
# WRONG_SUMMARY: << Total TLI yes SDK no: 19{{$}}
# WRONG_SUMMARY: >> Total TLI no SDK yes: 1{{$}}
-# WRONG_SUMMARY: == Total TLI yes SDK yes: 255
+# WRONG_SUMMARY: == Total TLI yes SDK yes: 258
#
## The -COUNT suffix doesn't care if there are too many matches, so check
## the exact count first; the two directives should add up to that.
## Yes, this means additions to TLI will fail this test, but the argument
## to -COUNT can't be an expression.
-# AVAIL: TLI knows 507 symbols, 274 available
-# AVAIL-COUNT-274: {{^}} available
+# AVAIL: TLI knows 510 symbols, 277 available
+# AVAIL-COUNT-277: {{^}} available
# AVAIL-NOT: {{^}} available
# UNAVAIL-COUNT-233: not available
# UNAVAIL-NOT: not available
@@ -654,6 +654,18 @@ DynamicSymbols:
Type: STT_FUNC
Section: .text
Binding: STB_GLOBAL
+ - Name: ilogb
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ - Name: ilogbf
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ - Name: ilogbl
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
- Name: logb
Type: STT_FUNC
Section: .text
diff --git a/llvm/tools/llvm-readtapi/llvm-readtapi.cpp b/llvm/tools/llvm-readtapi/llvm-readtapi.cpp
index 6e42ed7..1d74010 100644
--- a/llvm/tools/llvm-readtapi/llvm-readtapi.cpp
+++ b/llvm/tools/llvm-readtapi/llvm-readtapi.cpp
@@ -325,8 +325,8 @@ static void stubifyDirectory(const StringRef InputPath, Context &Ctx) {
continue;
}
- auto itr = SymLinks.insert({LinkTarget.c_str(), std::vector<SymLink>()});
- itr.first->second.emplace_back(LinkSrc.str(), std::string(SymPath.str()));
+ SymLinks[LinkTarget.c_str()].emplace_back(LinkSrc.str(),
+ std::string(SymPath.str()));
continue;
}
diff --git a/llvm/unittests/ADT/STLExtrasTest.cpp b/llvm/unittests/ADT/STLExtrasTest.cpp
index ee8299c..406ff2b 100644
--- a/llvm/unittests/ADT/STLExtrasTest.cpp
+++ b/llvm/unittests/ADT/STLExtrasTest.cpp
@@ -504,6 +504,43 @@ TEST(STLExtrasTest, ConcatRange) {
EXPECT_EQ(Expected, Test);
}
+template <typename T> struct Iterator {
+ int i = 0;
+ T operator*() const { return i; }
+ Iterator &operator++() {
+ ++i;
+ return *this;
+ }
+ bool operator==(Iterator RHS) const { return i == RHS.i; }
+};
+
+template <typename T> struct RangeWithValueType {
+ int i;
+ RangeWithValueType(int i) : i(i) {}
+ Iterator<T> begin() { return Iterator<T>{0}; }
+ Iterator<T> end() { return Iterator<T>{i}; }
+};
+
+TEST(STLExtrasTest, ValueReturn) {
+ RangeWithValueType<int> R(1);
+ auto C = concat<int>(R, R);
+ auto I = C.begin();
+ ASSERT_NE(I, C.end());
+ static_assert(std::is_same_v<decltype((*I)), int>);
+ auto V = *I;
+ ASSERT_EQ(V, 0);
+}
+
+TEST(STLExtrasTest, ReferenceReturn) {
+ RangeWithValueType<const int&> R(1);
+ auto C = concat<const int>(R, R);
+ auto I = C.begin();
+ ASSERT_NE(I, C.end());
+ static_assert(std::is_same_v<decltype((*I)), const int &>);
+ auto V = *I;
+ ASSERT_EQ(V, 0);
+}
+
TEST(STLExtrasTest, PartitionAdaptor) {
std::vector<int> V = {1, 2, 3, 4, 5, 6, 7, 8};
diff --git a/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp b/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
index 4975651..b4856b5 100644
--- a/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
+++ b/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
@@ -266,6 +266,9 @@ TEST_F(TargetLibraryInfoTest, ValidProto) {
"declare double @log2(double)\n"
"declare float @log2f(float)\n"
"declare x86_fp80 @log2l(x86_fp80)\n"
+ "declare i32 @ilogb(double)\n"
+ "declare i32 @ilogbf(float)\n"
+ "declare i32 @ilogbl(x86_fp80)\n"
"declare double @logb(double)\n"
"declare float @logbf(float)\n"
"declare x86_fp80 @logbl(x86_fp80)\n"
diff --git a/llvm/unittests/CodeGen/MFCommon.inc b/llvm/unittests/CodeGen/MFCommon.inc
index 5d5720c..749c578 100644
--- a/llvm/unittests/CodeGen/MFCommon.inc
+++ b/llvm/unittests/CodeGen/MFCommon.inc
@@ -14,7 +14,9 @@ public:
MachineBasicBlock &MBB) const override {}
void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const override {}
- bool hasFP(const MachineFunction &MF) const override { return false; }
+
+protected:
+ bool hasFPImpl(const MachineFunction &MF) const override { return false; }
};
static TargetRegisterClass *const BogusRegisterClasses[] = {nullptr};
diff --git a/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp b/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp
index 3ce85f5..8fe74e3 100644
--- a/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp
+++ b/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp
@@ -487,7 +487,7 @@ TEST(RandomIRBuilderTest, findSourceAndSink) {
ASSERT_TRUE(DT.dominates(Insts[IP - 1], Sink));
}
}
-TEST(RandomIRBuilderTest, sinkToInstrinsic) {
+TEST(RandomIRBuilderTest, sinkToIntrinsic) {
const char *Source = "\n\
declare double @llvm.sqrt.f64(double %Val) \n\
declare void @llvm.ubsantrap(i8 immarg) cold noreturn nounwind \n\
diff --git a/llvm/unittests/Object/GOFFObjectFileTest.cpp b/llvm/unittests/Object/GOFFObjectFileTest.cpp
index 69f60d0..e2fbf81 100644
--- a/llvm/unittests/Object/GOFFObjectFileTest.cpp
+++ b/llvm/unittests/Object/GOFFObjectFileTest.cpp
@@ -507,72 +507,72 @@ TEST(GOFFObjectFileTest, TXTConstruct) {
char GOFFData[GOFF::RecordLength * 6] = {};
// HDR record.
- GOFFData[0] = 0x03;
- GOFFData[1] = 0xF0;
- GOFFData[50] = 0x01;
+ GOFFData[0] = (char)0x03;
+ GOFFData[1] = (char)0xF0;
+ GOFFData[50] = (char)0x01;
// ESD record.
- GOFFData[GOFF::RecordLength] = 0x03;
- GOFFData[GOFF::RecordLength + 7] = 0x01; // ESDID.
- GOFFData[GOFF::RecordLength + 71] = 0x05; // Size of symbol name.
- GOFFData[GOFF::RecordLength + 72] = 0xa5; // Symbol name is v.
- GOFFData[GOFF::RecordLength + 73] = 0x81; // Symbol name is a.
- GOFFData[GOFF::RecordLength + 74] = 0x99; // Symbol name is r.
- GOFFData[GOFF::RecordLength + 75] = 0x7b; // Symbol name is #.
- GOFFData[GOFF::RecordLength + 76] = 0x83; // Symbol name is c.
+ GOFFData[GOFF::RecordLength] = (char)0x03;
+ GOFFData[GOFF::RecordLength + 7] = (char)0x01; // ESDID.
+ GOFFData[GOFF::RecordLength + 71] = (char)0x05; // Size of symbol name.
+ GOFFData[GOFF::RecordLength + 72] = (char)0xa5; // Symbol name is v.
+ GOFFData[GOFF::RecordLength + 73] = (char)0x81; // Symbol name is a.
+ GOFFData[GOFF::RecordLength + 74] = (char)0x99; // Symbol name is r.
+ GOFFData[GOFF::RecordLength + 75] = (char)0x7b; // Symbol name is #.
+ GOFFData[GOFF::RecordLength + 76] = (char)0x83; // Symbol name is c.
// ESD record.
- GOFFData[GOFF::RecordLength * 2] = 0x03;
- GOFFData[GOFF::RecordLength * 2 + 3] = 0x01;
- GOFFData[GOFF::RecordLength * 2 + 7] = 0x02; // ESDID.
- GOFFData[GOFF::RecordLength * 2 + 11] = 0x01; // Parent ESDID.
- GOFFData[GOFF::RecordLength * 2 + 27] = 0x08; // Length.
- GOFFData[GOFF::RecordLength * 2 + 40] = 0x01; // Name Space ID.
- GOFFData[GOFF::RecordLength * 2 + 41] = 0x80;
- GOFFData[GOFF::RecordLength * 2 + 60] = 0x04; // Size of symbol name.
- GOFFData[GOFF::RecordLength * 2 + 61] = 0x04; // Size of symbol name.
- GOFFData[GOFF::RecordLength * 2 + 63] = 0x0a; // Size of symbol name.
- GOFFData[GOFF::RecordLength * 2 + 66] = 0x03; // Size of symbol name.
- GOFFData[GOFF::RecordLength * 2 + 71] = 0x08; // Size of symbol name.
- GOFFData[GOFF::RecordLength * 2 + 72] = 0xc3; // Symbol name is c.
- GOFFData[GOFF::RecordLength * 2 + 73] = 0x6d; // Symbol name is _.
- GOFFData[GOFF::RecordLength * 2 + 74] = 0xc3; // Symbol name is c.
- GOFFData[GOFF::RecordLength * 2 + 75] = 0xd6; // Symbol name is o.
- GOFFData[GOFF::RecordLength * 2 + 76] = 0xc4; // Symbol name is D.
- GOFFData[GOFF::RecordLength * 2 + 77] = 0xc5; // Symbol name is E.
- GOFFData[GOFF::RecordLength * 2 + 78] = 0xf6; // Symbol name is 6.
- GOFFData[GOFF::RecordLength * 2 + 79] = 0xf4; // Symbol name is 4.
+ GOFFData[GOFF::RecordLength * 2] = (char)0x03;
+ GOFFData[GOFF::RecordLength * 2 + 3] = (char)0x01;
+ GOFFData[GOFF::RecordLength * 2 + 7] = (char)0x02; // ESDID.
+ GOFFData[GOFF::RecordLength * 2 + 11] = (char)0x01; // Parent ESDID.
+ GOFFData[GOFF::RecordLength * 2 + 27] = (char)0x08; // Length.
+ GOFFData[GOFF::RecordLength * 2 + 40] = (char)0x01; // Name Space ID.
+ GOFFData[GOFF::RecordLength * 2 + 41] = (char)0x80;
+ GOFFData[GOFF::RecordLength * 2 + 60] = (char)0x04; // Size of symbol name.
+ GOFFData[GOFF::RecordLength * 2 + 61] = (char)0x04; // Size of symbol name.
+ GOFFData[GOFF::RecordLength * 2 + 63] = (char)0x0a; // Size of symbol name.
+ GOFFData[GOFF::RecordLength * 2 + 66] = (char)0x03; // Size of symbol name.
+ GOFFData[GOFF::RecordLength * 2 + 71] = (char)0x08; // Size of symbol name.
+ GOFFData[GOFF::RecordLength * 2 + 72] = (char)0xc3; // Symbol name is c.
+ GOFFData[GOFF::RecordLength * 2 + 73] = (char)0x6d; // Symbol name is _.
+ GOFFData[GOFF::RecordLength * 2 + 74] = (char)0xc3; // Symbol name is c.
+ GOFFData[GOFF::RecordLength * 2 + 75] = (char)0xd6; // Symbol name is o.
+ GOFFData[GOFF::RecordLength * 2 + 76] = (char)0xc4; // Symbol name is D.
+ GOFFData[GOFF::RecordLength * 2 + 77] = (char)0xc5; // Symbol name is E.
+ GOFFData[GOFF::RecordLength * 2 + 78] = (char)0xf6; // Symbol name is 6.
+ GOFFData[GOFF::RecordLength * 2 + 79] = (char)0xf4; // Symbol name is 4.
// ESD record.
- GOFFData[GOFF::RecordLength * 3] = 0x03;
- GOFFData[GOFF::RecordLength * 3 + 3] = 0x02;
- GOFFData[GOFF::RecordLength * 3 + 7] = 0x03; // ESDID.
- GOFFData[GOFF::RecordLength * 3 + 11] = 0x02; // Parent ESDID.
- GOFFData[GOFF::RecordLength * 3 + 71] = 0x05; // Size of symbol name.
- GOFFData[GOFF::RecordLength * 3 + 72] = 0xa5; // Symbol name is v.
- GOFFData[GOFF::RecordLength * 3 + 73] = 0x81; // Symbol name is a.
- GOFFData[GOFF::RecordLength * 3 + 74] = 0x99; // Symbol name is r.
- GOFFData[GOFF::RecordLength * 3 + 75] = 0x7b; // Symbol name is #.
- GOFFData[GOFF::RecordLength * 3 + 76] = 0x83; // Symbol name is c.
+ GOFFData[GOFF::RecordLength * 3] = (char)0x03;
+ GOFFData[GOFF::RecordLength * 3 + 3] = (char)0x02;
+ GOFFData[GOFF::RecordLength * 3 + 7] = (char)0x03; // ESDID.
+ GOFFData[GOFF::RecordLength * 3 + 11] = (char)0x02; // Parent ESDID.
+ GOFFData[GOFF::RecordLength * 3 + 71] = (char)0x05; // Size of symbol name.
+ GOFFData[GOFF::RecordLength * 3 + 72] = (char)0xa5; // Symbol name is v.
+ GOFFData[GOFF::RecordLength * 3 + 73] = (char)0x81; // Symbol name is a.
+ GOFFData[GOFF::RecordLength * 3 + 74] = (char)0x99; // Symbol name is r.
+ GOFFData[GOFF::RecordLength * 3 + 75] = (char)0x7b; // Symbol name is #.
+ GOFFData[GOFF::RecordLength * 3 + 76] = (char)0x83; // Symbol name is c.
// TXT record.
- GOFFData[GOFF::RecordLength * 4] = 0x03;
- GOFFData[GOFF::RecordLength * 4 + 1] = 0x10;
- GOFFData[GOFF::RecordLength * 4 + 7] = 0x02;
- GOFFData[GOFF::RecordLength * 4 + 23] = 0x08; // Data Length.
- GOFFData[GOFF::RecordLength * 4 + 24] = 0x12;
- GOFFData[GOFF::RecordLength * 4 + 25] = 0x34;
- GOFFData[GOFF::RecordLength * 4 + 26] = 0x56;
- GOFFData[GOFF::RecordLength * 4 + 27] = 0x78;
- GOFFData[GOFF::RecordLength * 4 + 28] = 0x9a;
- GOFFData[GOFF::RecordLength * 4 + 29] = 0xbc;
- GOFFData[GOFF::RecordLength * 4 + 30] = 0xde;
- GOFFData[GOFF::RecordLength * 4 + 31] = 0xf0;
+ GOFFData[GOFF::RecordLength * 4] = (char)0x03;
+ GOFFData[GOFF::RecordLength * 4 + 1] = (char)0x10;
+ GOFFData[GOFF::RecordLength * 4 + 7] = (char)0x02;
+ GOFFData[GOFF::RecordLength * 4 + 23] = (char)0x08; // Data Length.
+ GOFFData[GOFF::RecordLength * 4 + 24] = (char)0x12;
+ GOFFData[GOFF::RecordLength * 4 + 25] = (char)0x34;
+ GOFFData[GOFF::RecordLength * 4 + 26] = (char)0x56;
+ GOFFData[GOFF::RecordLength * 4 + 27] = (char)0x78;
+ GOFFData[GOFF::RecordLength * 4 + 28] = (char)0x9a;
+ GOFFData[GOFF::RecordLength * 4 + 29] = (char)0xbc;
+ GOFFData[GOFF::RecordLength * 4 + 30] = (char)0xde;
+ GOFFData[GOFF::RecordLength * 4 + 31] = (char)0xf0;
// END record.
- GOFFData[GOFF::RecordLength * 5] = 0x03;
- GOFFData[GOFF::RecordLength * 5 + 1] = 0x40;
- GOFFData[GOFF::RecordLength * 5 + 11] = 0x06;
+ GOFFData[GOFF::RecordLength * 5] = (char)0x03;
+ GOFFData[GOFF::RecordLength * 5 + 1] = (char)0x40;
+ GOFFData[GOFF::RecordLength * 5 + 11] = (char)0x06;
StringRef Data(GOFFData, GOFF::RecordLength * 6);
diff --git a/llvm/unittests/SandboxIR/CMakeLists.txt b/llvm/unittests/SandboxIR/CMakeLists.txt
index 1e83bda..b20ef82 100644
--- a/llvm/unittests/SandboxIR/CMakeLists.txt
+++ b/llvm/unittests/SandboxIR/CMakeLists.txt
@@ -9,6 +9,7 @@ add_llvm_unittest(SandboxIRTests
IntrinsicInstTest.cpp
PassTest.cpp
RegionTest.cpp
+ OperatorTest.cpp
SandboxIRTest.cpp
TrackerTest.cpp
TypesTest.cpp
diff --git a/llvm/unittests/SandboxIR/OperatorTest.cpp b/llvm/unittests/SandboxIR/OperatorTest.cpp
new file mode 100644
index 0000000..b1e3244
--- /dev/null
+++ b/llvm/unittests/SandboxIR/OperatorTest.cpp
@@ -0,0 +1,141 @@
+//===- OperatorTest.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/SandboxIR/Operator.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/SandboxIR/Context.h"
+#include "llvm/SandboxIR/Function.h"
+#include "llvm/SandboxIR/Instruction.h"
+#include "llvm/SandboxIR/Module.h"
+#include "llvm/SandboxIR/Value.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+struct OperatorTest : public testing::Test {
+ LLVMContext C;
+ std::unique_ptr<Module> M;
+
+ void parseIR(LLVMContext &C, const char *IR) {
+ SMDiagnostic Err;
+ M = parseAssemblyString(IR, Err, C);
+ if (!M)
+ Err.print("OperatorTest", errs());
+ }
+ BasicBlock *getBasicBlockByName(Function &F, StringRef Name) {
+ for (BasicBlock &BB : F)
+ if (BB.getName() == Name)
+ return &BB;
+ llvm_unreachable("Expected to find basic block!");
+ }
+};
+
+TEST_F(OperatorTest, Operator) {
+ parseIR(C, R"IR(
+define void @foo(i8 %v1) {
+ %add0 = add i8 %v1, 42
+ %add1 = add nuw i8 %v1, 42
+ ret void
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ sandboxir::Function *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *OperatorI0 = cast<sandboxir::Operator>(&*It++);
+ auto *OperatorI1 = cast<sandboxir::Operator>(&*It++);
+ EXPECT_FALSE(OperatorI0->hasPoisonGeneratingFlags());
+ EXPECT_TRUE(OperatorI1->hasPoisonGeneratingFlags());
+}
+
+TEST_F(OperatorTest, OverflowingBinaryOperator) {
+ parseIR(C, R"IR(
+define void @foo(i8 %v1) {
+ %add = add i8 %v1, 42
+ %addNSW = add nsw i8 %v1, 42
+ %addNUW = add nuw i8 %v1, 42
+ ret void
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ sandboxir::Function *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *Add = cast<sandboxir::OverflowingBinaryOperator>(&*It++);
+ auto *AddNSW = cast<sandboxir::OverflowingBinaryOperator>(&*It++);
+ auto *AddNUW = cast<sandboxir::OverflowingBinaryOperator>(&*It++);
+ EXPECT_FALSE(Add->hasNoUnsignedWrap());
+ EXPECT_FALSE(Add->hasNoSignedWrap());
+ EXPECT_EQ(Add->getNoWrapKind(), llvm::OverflowingBinaryOperator::AnyWrap);
+
+ EXPECT_FALSE(AddNSW->hasNoUnsignedWrap());
+ EXPECT_TRUE(AddNSW->hasNoSignedWrap());
+ EXPECT_EQ(AddNSW->getNoWrapKind(),
+ llvm::OverflowingBinaryOperator::NoSignedWrap);
+
+ EXPECT_TRUE(AddNUW->hasNoUnsignedWrap());
+ EXPECT_FALSE(AddNUW->hasNoSignedWrap());
+ EXPECT_EQ(AddNUW->getNoWrapKind(),
+ llvm::OverflowingBinaryOperator::NoUnsignedWrap);
+}
+
+TEST_F(OperatorTest, FPMathOperator) {
+ parseIR(C, R"IR(
+define void @foo(float %v1, double %v2) {
+ %fadd = fadd float %v1, 42.0
+ %Fast = fadd fast float %v1, 42.0
+ %Reassoc = fmul reassoc float %v1, 42.0
+ %NNAN = fmul nnan float %v1, 42.0
+ %NINF = fmul ninf float %v1, 42.0
+ %NSZ = fmul nsz float %v1, 42.0
+ %ARCP = fmul arcp float %v1, 42.0
+ %CONTRACT = fmul contract float %v1, 42.0
+ %AFN = fmul afn double %v2, 42.0
+ ret void
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ auto *LLVMBB = &*LLVMF->begin();
+ auto LLVMIt = LLVMBB->begin();
+
+ sandboxir::Context Ctx(C);
+ sandboxir::Function *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto TermIt = BB->getTerminator()->getIterator();
+ while (It != TermIt) {
+ auto *FPM = cast<sandboxir::FPMathOperator>(&*It++);
+ auto *LLVMFPM = cast<llvm::FPMathOperator>(&*LLVMIt++);
+ EXPECT_EQ(FPM->isFast(), LLVMFPM->isFast());
+ EXPECT_EQ(FPM->hasAllowReassoc(), LLVMFPM->hasAllowReassoc());
+ EXPECT_EQ(FPM->hasNoNaNs(), LLVMFPM->hasNoNaNs());
+ EXPECT_EQ(FPM->hasNoInfs(), LLVMFPM->hasNoInfs());
+ EXPECT_EQ(FPM->hasNoSignedZeros(), LLVMFPM->hasNoSignedZeros());
+ EXPECT_EQ(FPM->hasAllowReciprocal(), LLVMFPM->hasAllowReciprocal());
+ EXPECT_EQ(FPM->hasAllowContract(), LLVMFPM->hasAllowContract());
+ EXPECT_EQ(FPM->hasApproxFunc(), LLVMFPM->hasApproxFunc());
+
+ // There doesn't seem to be an operator== for FastMathFlags so let's do a
+ // string comparison instead.
+ std::string Str1;
+ raw_string_ostream SS1(Str1);
+ std::string Str2;
+ raw_string_ostream SS2(Str2);
+ FPM->getFastMathFlags().print(SS1);
+ LLVMFPM->getFastMathFlags().print(SS2);
+ EXPECT_EQ(Str1, Str2);
+
+ EXPECT_EQ(FPM->getFPAccuracy(), LLVMFPM->getFPAccuracy());
+ EXPECT_EQ(
+ sandboxir::FPMathOperator::isSupportedFloatingPointType(FPM->getType()),
+ llvm::FPMathOperator::isSupportedFloatingPointType(LLVMFPM->getType()));
+ }
+}
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp
index 4926afb..00a3c73 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp
@@ -133,8 +133,8 @@ compound=true
N2 -> N4 [ label="" ltail=cluster_N3]
N4 [label =
"middle.block:\l" +
- " EMIT vp\<%1\> = icmp eq ir\<%N\>, vp\<%0\>\l" +
- " EMIT branch-on-cond vp\<%1\>\l" +
+ " EMIT vp\<%cmp.n\> = icmp eq ir\<%N\>, vp\<%0\>\l" +
+ " EMIT branch-on-cond vp\<%cmp.n\>\l" +
"Successor(s): ir-bb\<for.end\>, scalar.ph\l"
]
N4 -> N5 [ label="T"]
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index fe9621a..e3d9d01 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -1208,7 +1208,7 @@ ClassInfo *AsmMatcherInfo::getOperandClass(const Record *Rec, int SubOpIdx) {
"Record `" + Rec->getName() +
"' does not have a ParserMatchClass!\n");
- if (DefInit *DI = dyn_cast<DefInit>(R->getValue())) {
+ if (const DefInit *DI = dyn_cast<DefInit>(R->getValue())) {
const Record *MatchClass = DI->getDef();
if (ClassInfo *CI = AsmOperandClasses[MatchClass])
return CI;
@@ -1349,12 +1349,12 @@ void AsmMatcherInfo::buildRegisterClasses(
} else
CI->ValueName = CI->ValueName + "," + RC.getName();
- Init *DiagnosticType = Def->getValueInit("DiagnosticType");
- if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
+ const Init *DiagnosticType = Def->getValueInit("DiagnosticType");
+ if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
CI->DiagnosticType = std::string(SI->getValue());
- Init *DiagnosticString = Def->getValueInit("DiagnosticString");
- if (StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
+ const Init *DiagnosticString = Def->getValueInit("DiagnosticString");
+ if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
CI->DiagnosticString = std::string(SI->getValue());
// If we have a diagnostic string but the diagnostic type is not specified
@@ -1398,9 +1398,9 @@ void AsmMatcherInfo::buildOperandClasses() {
ClassInfo *CI = AsmOperandClasses[Rec];
CI->Kind = ClassInfo::UserClass0 + Index;
- ListInit *Supers = Rec->getValueAsListInit("SuperClasses");
- for (Init *I : Supers->getValues()) {
- DefInit *DI = dyn_cast<DefInit>(I);
+ const ListInit *Supers = Rec->getValueAsListInit("SuperClasses");
+ for (const Init *I : Supers->getValues()) {
+ const DefInit *DI = dyn_cast<DefInit>(I);
if (!DI) {
PrintError(Rec->getLoc(), "Invalid super class reference!");
continue;
@@ -1417,8 +1417,8 @@ void AsmMatcherInfo::buildOperandClasses() {
CI->ValueName = std::string(Rec->getName());
// Get or construct the predicate method name.
- Init *PMName = Rec->getValueInit("PredicateMethod");
- if (StringInit *SI = dyn_cast<StringInit>(PMName)) {
+ const Init *PMName = Rec->getValueInit("PredicateMethod");
+ if (const StringInit *SI = dyn_cast<StringInit>(PMName)) {
CI->PredicateMethod = std::string(SI->getValue());
} else {
assert(isa<UnsetInit>(PMName) && "Unexpected PredicateMethod field!");
@@ -1426,8 +1426,8 @@ void AsmMatcherInfo::buildOperandClasses() {
}
// Get or construct the render method name.
- Init *RMName = Rec->getValueInit("RenderMethod");
- if (StringInit *SI = dyn_cast<StringInit>(RMName)) {
+ const Init *RMName = Rec->getValueInit("RenderMethod");
+ if (const StringInit *SI = dyn_cast<StringInit>(RMName)) {
CI->RenderMethod = std::string(SI->getValue());
} else {
assert(isa<UnsetInit>(RMName) && "Unexpected RenderMethod field!");
@@ -1435,29 +1435,29 @@ void AsmMatcherInfo::buildOperandClasses() {
}
// Get the parse method name or leave it as empty.
- Init *PRMName = Rec->getValueInit("ParserMethod");
- if (StringInit *SI = dyn_cast<StringInit>(PRMName))
+ const Init *PRMName = Rec->getValueInit("ParserMethod");
+ if (const StringInit *SI = dyn_cast<StringInit>(PRMName))
CI->ParserMethod = std::string(SI->getValue());
// Get the diagnostic type and string or leave them as empty.
- Init *DiagnosticType = Rec->getValueInit("DiagnosticType");
- if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
+ const Init *DiagnosticType = Rec->getValueInit("DiagnosticType");
+ if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
CI->DiagnosticType = std::string(SI->getValue());
- Init *DiagnosticString = Rec->getValueInit("DiagnosticString");
- if (StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
+ const Init *DiagnosticString = Rec->getValueInit("DiagnosticString");
+ if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
CI->DiagnosticString = std::string(SI->getValue());
// If we have a DiagnosticString, we need a DiagnosticType for use within
// the matcher.
if (!CI->DiagnosticString.empty() && CI->DiagnosticType.empty())
CI->DiagnosticType = CI->ClassName;
- Init *IsOptional = Rec->getValueInit("IsOptional");
- if (BitInit *BI = dyn_cast<BitInit>(IsOptional))
+ const Init *IsOptional = Rec->getValueInit("IsOptional");
+ if (const BitInit *BI = dyn_cast<BitInit>(IsOptional))
CI->IsOptional = BI->getValue();
// Get or construct the default method name.
- Init *DMName = Rec->getValueInit("DefaultMethod");
- if (StringInit *SI = dyn_cast<StringInit>(DMName)) {
+ const Init *DMName = Rec->getValueInit("DefaultMethod");
+ if (const StringInit *SI = dyn_cast<StringInit>(DMName)) {
CI->DefaultMethod = std::string(SI->getValue());
} else {
assert(isa<UnsetInit>(DMName) && "Unexpected DefaultMethod field!");
diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp
index 83205b5..3f09564 100644
--- a/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -1031,9 +1031,9 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
bool IsOr = CombineType == "any_of";
// Change (any_of FeatureAll, (any_of ...)) to (any_of FeatureAll, ...).
if (IsOr && D->getNumArgs() == 2 && isa<DagInit>(D->getArg(1))) {
- DagInit *RHS = cast<DagInit>(D->getArg(1));
- SmallVector<Init *> Args{D->getArg(0)};
- SmallVector<StringInit *> ArgNames{D->getArgName(0)};
+ const DagInit *RHS = cast<DagInit>(D->getArg(1));
+ SmallVector<const Init *> Args{D->getArg(0)};
+ SmallVector<const StringInit *> ArgNames{D->getArgName(0)};
for (unsigned i = 0, e = RHS->getNumArgs(); i != e; ++i) {
Args.push_back(RHS->getArg(i));
ArgNames.push_back(RHS->getArgName(i));
diff --git a/llvm/utils/TableGen/Attributes.cpp b/llvm/utils/TableGen/Attributes.cpp
index ed00deb..1382753 100644
--- a/llvm/utils/TableGen/Attributes.cpp
+++ b/llvm/utils/TableGen/Attributes.cpp
@@ -122,7 +122,7 @@ void Attributes::emitAttributeProperties(raw_ostream &OS) {
bool AllowIntersectMin = KindName == "IntAttr";
for (auto *A : Records.getAllDerivedDefinitions(KindName)) {
OS << "0";
- for (Init *P : *A->getValueAsListInit("Properties")) {
+ for (const Init *P : *A->getValueAsListInit("Properties")) {
if (!AllowIntersectAnd &&
cast<DefInit>(P)->getDef()->getName() == "IntersectAnd")
PrintFatalError("'IntersectAnd' only compatible with 'EnumAttr'");
diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
index 2a246d6..18e0b8f 100644
--- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
+++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
@@ -324,7 +324,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(const Record *R,
IS.ParamTys.push_back(TypeList->getElementAsRecord(Idx));
// Parse the intrinsic properties.
- ListInit *PropList = R->getValueAsListInit("IntrProperties");
+ const ListInit *PropList = R->getValueAsListInit("IntrProperties");
for (unsigned i = 0, e = PropList->size(); i != e; ++i) {
const Record *Property = PropList->getElementAsRecord(i);
assert(Property->isSubClassOf("IntrinsicProperty") &&
diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp
index 4d35677..be822c4 100644
--- a/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -348,7 +348,7 @@ CodeEmitterGen::getInstructionCases(const Record *R,
void CodeEmitterGen::addInstructionCasesForEncoding(
const Record *R, const Record *EncodingDef, const CodeGenTarget &Target,
std::string &Case, std::string &BitOffsetCase) {
- BitsInit *BI = EncodingDef->getValueAsBitsInit("Inst");
+ const BitsInit *BI = EncodingDef->getValueAsBitsInit("Inst");
// Loop over all of the fields in the instruction, determining which are the
// operands to the instruction.
diff --git a/llvm/utils/TableGen/CodeGenMapTable.cpp b/llvm/utils/TableGen/CodeGenMapTable.cpp
index b599ee1..7876db6 100644
--- a/llvm/utils/TableGen/CodeGenMapTable.cpp
+++ b/llvm/utils/TableGen/CodeGenMapTable.cpp
@@ -128,7 +128,7 @@ public:
// Ex: ValueCols = [['true'],['false']] -- it results two columns in the
// table. First column requires all the instructions to have predSense
// set to 'true' and second column requires it to be 'false'.
- ListInit *ColValList = MapRec->getValueAsListInit("ValueCols");
+ const ListInit *ColValList = MapRec->getValueAsListInit("ValueCols");
// Each instruction map must specify at least one column for it to be valid.
if (ColValList->empty())
@@ -479,7 +479,7 @@ void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) {
OS << "// " << InstrMapDesc.getName() << "\nLLVM_READONLY\n";
OS << "int " << InstrMapDesc.getName() << "(uint16_t Opcode";
if (ValueCols.size() > 1) {
- for (Init *CF : ColFields->getValues()) {
+ for (const Init *CF : ColFields->getValues()) {
std::string ColName = CF->getAsUnquotedString();
OS << ", enum " << ColName << " in" << ColName;
}
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
index 751ac3d..d2228c9 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
@@ -2639,7 +2639,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// If the operand has sub-operands, they may be provided by distinct
// child patterns, so attempt to match each sub-operand separately.
if (OperandNode->isSubClassOf("Operand")) {
- DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
+ const DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
if (unsigned NumArgs = MIOpInfo->getNumArgs()) {
// But don't do that if the whole operand is being provided by
// a single ComplexPattern-related Operand.
@@ -2786,11 +2786,11 @@ TreePattern::TreePattern(const Record *TheRec, const ListInit *RawPat,
bool isInput, CodeGenDAGPatterns &cdp)
: TheRecord(TheRec), CDP(cdp), isInputPattern(isInput), HasError(false),
Infer(*this) {
- for (Init *I : RawPat->getValues())
+ for (const Init *I : RawPat->getValues())
Trees.push_back(ParseTreePattern(I, ""));
}
-TreePattern::TreePattern(const Record *TheRec, DagInit *Pat, bool isInput,
+TreePattern::TreePattern(const Record *TheRec, const DagInit *Pat, bool isInput,
CodeGenDAGPatterns &cdp)
: TheRecord(TheRec), CDP(cdp), isInputPattern(isInput), HasError(false),
Infer(*this) {
@@ -2825,12 +2825,12 @@ void TreePattern::ComputeNamedNodes(TreePatternNode &N) {
ComputeNamedNodes(N.getChild(i));
}
-TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
+TreePatternNodePtr TreePattern::ParseTreePattern(const Init *TheInit,
StringRef OpName) {
RecordKeeper &RK = TheInit->getRecordKeeper();
// Here, we are creating new records (BitsInit->InitInit), so const_cast
// TheInit back to non-const pointer.
- if (DefInit *DI = dyn_cast<DefInit>(TheInit)) {
+ if (const DefInit *DI = dyn_cast<DefInit>(TheInit)) {
const Record *R = DI->getDef();
// Direct reference to a leaf DagNode or PatFrag? Turn it into a
@@ -2838,8 +2838,9 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
/// (foo GPR, imm) -> (foo GPR, (imm))
if (R->isSubClassOf("SDNode") || R->isSubClassOf("PatFrags"))
return ParseTreePattern(
- DagInit::get(DI, nullptr,
- std::vector<std::pair<Init *, StringInit *>>()),
+ DagInit::get(
+ DI, nullptr,
+ std::vector<std::pair<const Init *, const StringInit *>>()),
OpName);
// Input argument?
@@ -2872,22 +2873,22 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
return makeIntrusiveRefCnt<TreePatternNode>(TheInit, 1);
}
- if (BitsInit *BI = dyn_cast<BitsInit>(TheInit)) {
+ if (const BitsInit *BI = dyn_cast<BitsInit>(TheInit)) {
// Turn this into an IntInit.
- Init *II = BI->convertInitializerTo(IntRecTy::get(RK));
+ const Init *II = BI->convertInitializerTo(IntRecTy::get(RK));
if (!II || !isa<IntInit>(II))
error("Bits value must be constants!");
return II ? ParseTreePattern(II, OpName) : nullptr;
}
- DagInit *Dag = dyn_cast<DagInit>(TheInit);
+ const DagInit *Dag = dyn_cast<DagInit>(TheInit);
if (!Dag) {
TheInit->print(errs());
error("Pattern has unexpected init kind!");
return nullptr;
}
- auto ParseCastOperand = [this](DagInit *Dag, StringRef OpName) {
+ auto ParseCastOperand = [this](const DagInit *Dag, StringRef OpName) {
if (Dag->getNumArgs() != 1)
error("Type cast only takes one operand!");
@@ -2897,7 +2898,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
return ParseTreePattern(Dag->getArg(0), Dag->getArgNameStr(0));
};
- if (ListInit *LI = dyn_cast<ListInit>(Dag->getOperator())) {
+ if (const ListInit *LI = dyn_cast<ListInit>(Dag->getOperator())) {
// If the operator is a list (of value types), then this must be "type cast"
// of a leaf node with multiple results.
TreePatternNodePtr New = ParseCastOperand(Dag, OpName);
@@ -2915,7 +2916,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
return New;
}
- DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
+ const DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
if (!OpDef) {
error("Pattern has unexpected operator type!");
return nullptr;
@@ -3252,7 +3253,7 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
if (OutFrags != Frag->isSubClassOf("OutPatFrag"))
continue;
- ListInit *LI = Frag->getValueAsListInit("Fragments");
+ const ListInit *LI = Frag->getValueAsListInit("Fragments");
TreePattern *P = (PatternFragments[Frag] = std::make_unique<TreePattern>(
Frag, LI, !Frag->isSubClassOf("OutPatFrag"), *this))
.get();
@@ -3268,8 +3269,8 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
P->error("Cannot have unnamed 'node' values in pattern fragment!");
// Parse the operands list.
- DagInit *OpsList = Frag->getValueAsDag("Operands");
- DefInit *OpsOp = dyn_cast<DefInit>(OpsList->getOperator());
+ const DagInit *OpsList = Frag->getValueAsDag("Operands");
+ const DefInit *OpsOp = dyn_cast<DefInit>(OpsList->getOperator());
// Special cases: ops == outs == ins. Different names are used to
// improve readability.
if (!OpsOp || (OpsOp->getDef()->getName() != "ops" &&
@@ -3336,18 +3337,18 @@ void CodeGenDAGPatterns::ParseDefaultOperands() {
// Find some SDNode.
assert(!SDNodes.empty() && "No SDNodes parsed?");
- Init *SomeSDNode = SDNodes.begin()->first->getDefInit();
+ const Init *SomeSDNode = SDNodes.begin()->first->getDefInit();
for (unsigned i = 0, e = DefaultOps.size(); i != e; ++i) {
- DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps");
+ const DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps");
// Clone the DefaultInfo dag node, changing the operator from 'ops' to
// SomeSDnode so that we can parse this.
- std::vector<std::pair<Init *, StringInit *>> Ops;
+ std::vector<std::pair<const Init *, const StringInit *>> Ops;
for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
Ops.push_back(
std::pair(DefaultInfo->getArg(op), DefaultInfo->getArgName(op)));
- DagInit *DI = DagInit::get(SomeSDNode, nullptr, Ops);
+ const DagInit *DI = DagInit::get(SomeSDNode, nullptr, Ops);
// Create a TreePattern to parse this.
TreePattern P(DefaultOps[i], DI, false, *this);
@@ -3694,8 +3695,8 @@ static bool InferFromPattern(CodeGenInstruction &InstInfo,
/// hasNullFragReference - Return true if the DAG has any reference to the
/// null_frag operator.
-static bool hasNullFragReference(DagInit *DI) {
- DefInit *OpDef = dyn_cast<DefInit>(DI->getOperator());
+static bool hasNullFragReference(const DagInit *DI) {
+ const DefInit *OpDef = dyn_cast<DefInit>(DI->getOperator());
if (!OpDef)
return false;
const Record *Operator = OpDef->getDef();
@@ -3708,7 +3709,7 @@ static bool hasNullFragReference(DagInit *DI) {
if (auto Arg = dyn_cast<DefInit>(DI->getArg(i)))
if (Arg->getDef()->getName() == "null_frag")
return true;
- DagInit *Arg = dyn_cast<DagInit>(DI->getArg(i));
+ const DagInit *Arg = dyn_cast<DagInit>(DI->getArg(i));
if (Arg && hasNullFragReference(Arg))
return true;
}
@@ -3718,9 +3719,9 @@ static bool hasNullFragReference(DagInit *DI) {
/// hasNullFragReference - Return true if any DAG in the list references
/// the null_frag operator.
-static bool hasNullFragReference(ListInit *LI) {
- for (Init *I : LI->getValues()) {
- DagInit *DI = dyn_cast<DagInit>(I);
+static bool hasNullFragReference(const ListInit *LI) {
+ for (const Init *I : LI->getValues()) {
+ const DagInit *DI = dyn_cast<DagInit>(I);
assert(DI && "non-dag in an instruction Pattern list?!");
if (hasNullFragReference(DI))
return true;
@@ -3948,7 +3949,7 @@ void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI,
/// resolved instructions.
void CodeGenDAGPatterns::ParseInstructions() {
for (const Record *Instr : Records.getAllDerivedDefinitions("Instruction")) {
- ListInit *LI = nullptr;
+ const ListInit *LI = nullptr;
if (isa<ListInit>(Instr->getValueInit("Pattern")))
LI = Instr->getValueAsListInit("Pattern");
@@ -4310,7 +4311,7 @@ void CodeGenDAGPatterns::ParseOnePattern(
TreePattern Temp(Result.getRecord(), DstShared, false, *this);
Temp.InferAllTypes();
- ListInit *Preds = TheDef->getValueAsListInit("Predicates");
+ const ListInit *Preds = TheDef->getValueAsListInit("Predicates");
int Complexity = TheDef->getValueAsInt("AddedComplexity");
if (PatternRewriter)
@@ -4345,7 +4346,7 @@ void CodeGenDAGPatterns::ParseOnePattern(
void CodeGenDAGPatterns::ParsePatterns() {
for (const Record *CurPattern : Records.getAllDerivedDefinitions("Pattern")) {
- DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch");
+ const DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch");
// If the pattern references the null_frag, there's nothing to do.
if (hasNullFragReference(Tree))
@@ -4353,7 +4354,7 @@ void CodeGenDAGPatterns::ParsePatterns() {
TreePattern Pattern(CurPattern, Tree, true, *this);
- ListInit *LI = CurPattern->getValueAsListInit("ResultInstrs");
+ const ListInit *LI = CurPattern->getValueAsListInit("ResultInstrs");
if (LI->empty())
continue; // no pattern.
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
index 1da7dea..f85753f 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
@@ -910,7 +910,7 @@ public:
/// current record.
TreePattern(const Record *TheRec, const ListInit *RawPat, bool isInput,
CodeGenDAGPatterns &ise);
- TreePattern(const Record *TheRec, DagInit *Pat, bool isInput,
+ TreePattern(const Record *TheRec, const DagInit *Pat, bool isInput,
CodeGenDAGPatterns &ise);
TreePattern(const Record *TheRec, TreePatternNodePtr Pat, bool isInput,
CodeGenDAGPatterns &ise);
@@ -975,7 +975,7 @@ public:
void dump() const;
private:
- TreePatternNodePtr ParseTreePattern(Init *DI, StringRef OpName);
+ TreePatternNodePtr ParseTreePattern(const Init *DI, StringRef OpName);
void ComputeNamedNodes();
void ComputeNamedNodes(TreePatternNode &N);
};
@@ -1055,7 +1055,7 @@ public:
/// processed to produce isel.
class PatternToMatch {
const Record *SrcRecord; // Originating Record for the pattern.
- ListInit *Predicates; // Top level predicate conditions to match.
+ const ListInit *Predicates; // Top level predicate conditions to match.
TreePatternNodePtr SrcPattern; // Source pattern to match.
TreePatternNodePtr DstPattern; // Resulting pattern.
std::vector<const Record *> Dstregs; // Physical register defs being matched.
@@ -1065,7 +1065,7 @@ class PatternToMatch {
unsigned ID; // Unique ID for the record.
public:
- PatternToMatch(const Record *srcrecord, ListInit *preds,
+ PatternToMatch(const Record *srcrecord, const ListInit *preds,
TreePatternNodePtr src, TreePatternNodePtr dst,
ArrayRef<const Record *> dstregs, int complexity, unsigned uid,
bool ignore, const Twine &hwmodefeatures = "")
@@ -1074,7 +1074,7 @@ public:
AddedComplexity(complexity), GISelShouldIgnore(ignore), ID(uid) {}
const Record *getSrcRecord() const { return SrcRecord; }
- ListInit *getPredicates() const { return Predicates; }
+ const ListInit *getPredicates() const { return Predicates; }
TreePatternNode &getSrcPattern() const { return *SrcPattern; }
TreePatternNodePtr getSrcPatternShared() const { return SrcPattern; }
TreePatternNode &getDstPattern() const { return *DstPattern; }
diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
index 69e0029..293ed76 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
@@ -67,7 +67,7 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
// Handle explicit registers.
if (ADI && ADI->getDef()->isSubClassOf("Register")) {
if (InstOpRec->isSubClassOf("OptionalDefOperand")) {
- DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo");
+ const DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo");
// The operand info should only have a single (register) entry. We
// want the register class of it.
InstOpRec = cast<DefInit>(DI->getArg(0))->getDef();
@@ -172,7 +172,7 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T)
AsmString = std::string(R->getValueAsString("AsmString"));
// Verify that the root of the result is an instruction.
- DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
+ const DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
if (!DI || !DI->getDef()->isSubClassOf("Instruction"))
PrintFatalError(R->getLoc(),
"result of inst alias should be an instruction");
diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.h b/llvm/utils/TableGen/Common/CodeGenInstAlias.h
index 00680b0..f045b9f 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstAlias.h
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.h
@@ -39,7 +39,7 @@ public:
std::string AsmString;
/// Result - The result instruction.
- DagInit *Result;
+ const DagInit *Result;
/// ResultInst - The instruction generated by the alias (decoded from
/// Result).
diff --git a/llvm/utils/TableGen/Common/CodeGenInstruction.cpp b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
index 7fedc177..1c0ab59 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
@@ -27,9 +27,9 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
hasOptionalDef = false;
isVariadic = false;
- DagInit *OutDI = R->getValueAsDag("OutOperandList");
+ const DagInit *OutDI = R->getValueAsDag("OutOperandList");
- if (DefInit *Init = dyn_cast<DefInit>(OutDI->getOperator())) {
+ if (const DefInit *Init = dyn_cast<DefInit>(OutDI->getOperator())) {
if (Init->getDef()->getName() != "outs")
PrintFatalError(R->getLoc(),
R->getName() +
@@ -40,8 +40,8 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
NumDefs = OutDI->getNumArgs();
- DagInit *InDI = R->getValueAsDag("InOperandList");
- if (DefInit *Init = dyn_cast<DefInit>(InDI->getOperator())) {
+ const DagInit *InDI = R->getValueAsDag("InOperandList");
+ if (const DefInit *Init = dyn_cast<DefInit>(InDI->getOperator())) {
if (Init->getDef()->getName() != "ins")
PrintFatalError(R->getLoc(),
R->getName() +
@@ -56,7 +56,7 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
OperandList.reserve(e);
bool VariadicOuts = false;
for (unsigned i = 0; i != e; ++i) {
- Init *ArgInit;
+ const Init *ArgInit;
StringRef ArgName;
if (i < NumDefs) {
ArgInit = OutDI->getArg(i);
@@ -66,11 +66,11 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
ArgName = InDI->getArgNameStr(i - NumDefs);
}
- DagInit *SubArgDag = dyn_cast<DagInit>(ArgInit);
+ const DagInit *SubArgDag = dyn_cast<DagInit>(ArgInit);
if (SubArgDag)
ArgInit = SubArgDag->getOperator();
- DefInit *Arg = dyn_cast<DefInit>(ArgInit);
+ const DefInit *Arg = dyn_cast<DefInit>(ArgInit);
if (!Arg)
PrintFatalError(R->getLoc(), "Illegal operand for the '" + R->getName() +
"' instruction!");
@@ -81,7 +81,7 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
std::string OperandType = "OPERAND_UNKNOWN";
std::string OperandNamespace = "MCOI";
unsigned NumOps = 1;
- DagInit *MIOpInfo = nullptr;
+ const DagInit *MIOpInfo = nullptr;
if (Rec->isSubClassOf("RegisterOperand")) {
PrintMethod = std::string(Rec->getValueAsString("PrintMethod"));
OperandType = std::string(Rec->getValueAsString("OperandType"));
@@ -280,7 +280,7 @@ CGIOperandList::ParseOperandName(StringRef Op, bool AllowWholeOp) {
}
// Find the suboperand number involved.
- DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
+ const DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
if (!MIOpInfo)
PrintFatalError(TheDef->getLoc(), TheDef->getName() +
": unknown suboperand name in '" +
@@ -581,11 +581,11 @@ std::string CodeGenInstruction::FlattenAsmStringVariants(StringRef Cur,
bool CodeGenInstruction::isOperandImpl(StringRef OpListName, unsigned i,
StringRef PropertyName) const {
- DagInit *ConstraintList = TheDef->getValueAsDag(OpListName);
+ const DagInit *ConstraintList = TheDef->getValueAsDag(OpListName);
if (!ConstraintList || i >= ConstraintList->getNumArgs())
return false;
- DefInit *Constraint = dyn_cast<DefInit>(ConstraintList->getArg(i));
+ const DefInit *Constraint = dyn_cast<DefInit>(ConstraintList->getArg(i));
if (!Constraint)
return false;
diff --git a/llvm/utils/TableGen/Common/CodeGenInstruction.h b/llvm/utils/TableGen/Common/CodeGenInstruction.h
index 18294b1..a799d02 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstruction.h
+++ b/llvm/utils/TableGen/Common/CodeGenInstruction.h
@@ -110,7 +110,7 @@ public:
/// MIOperandInfo - Default MI operand type. Note an operand may be made
/// up of multiple MI operands.
- DagInit *MIOperandInfo;
+ const DagInit *MIOperandInfo;
/// Constraint info for this operand. This operand can have pieces, so we
/// track constraint info for each.
@@ -118,7 +118,7 @@ public:
OperandInfo(const Record *R, const std::string &N, const std::string &PMN,
const std::string &OT, unsigned MION, unsigned MINO,
- DagInit *MIOI)
+ const DagInit *MIOI)
: Rec(R), Name(N), SubOpNames(MINO), PrinterMethodName(PMN),
EncoderMethodNames(MINO), OperandType(OT), MIOperandNo(MION),
MINumOperands(MINO), DoNotEncode(MINO), MIOperandInfo(MIOI),
diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
index b53492d..9e1ebf3 100644
--- a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
@@ -630,7 +630,7 @@ struct TupleExpander : SetTheory::Expander {
std::vector<const Record *> Indices =
Def->getValueAsListOfDefs("SubRegIndices");
unsigned Dim = Indices.size();
- ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
+ const ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
if (Dim != SubRegs->size())
PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
if (Dim < 2)
@@ -669,11 +669,11 @@ struct TupleExpander : SetTheory::Expander {
}
// Take the cost list of the first register in the tuple.
- ListInit *CostList = Proto->getValueAsListInit("CostPerUse");
- SmallVector<Init *, 2> CostPerUse;
+ const ListInit *CostList = Proto->getValueAsListInit("CostPerUse");
+ SmallVector<const Init *, 2> CostPerUse;
CostPerUse.insert(CostPerUse.end(), CostList->begin(), CostList->end());
- StringInit *AsmName = StringInit::get(RK, "");
+ const StringInit *AsmName = StringInit::get(RK, "");
if (!RegNames.empty()) {
if (RegNames.size() <= n)
PrintFatalError(Def->getLoc(),
@@ -776,7 +776,7 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
// Allocation order 0 is the full set. AltOrders provides others.
const SetTheory::RecVec *Elements = RegBank.getSets().expand(R);
- ListInit *AltOrders = R->getValueAsListInit("AltOrders");
+ const ListInit *AltOrders = R->getValueAsListInit("AltOrders");
Orders.resize(1 + AltOrders->size());
// Default allocation order always contains all registers.
@@ -808,7 +808,7 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
Namespace = R->getValueAsString("Namespace");
if (const RecordVal *RV = R->getValue("RegInfos"))
- if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue()))
+ if (const DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue()))
RSI = RegSizeInfoByHwMode(DI->getDef(), RegBank.getHwModes());
unsigned Size = R->getValueAsInt("Size");
assert((RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) &&
@@ -831,9 +831,9 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
GlobalPriority = R->getValueAsBit("GlobalPriority");
- BitsInit *TSF = R->getValueAsBitsInit("TSFlags");
+ const BitsInit *TSF = R->getValueAsBitsInit("TSFlags");
for (unsigned I = 0, E = TSF->getNumBits(); I != E; ++I) {
- BitInit *Bit = cast<BitInit>(TSF->getBit(I));
+ const BitInit *Bit = cast<BitInit>(TSF->getBit(I));
TSFlags |= uint8_t(Bit->getValue()) << I;
}
}
diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
index 9c37fbe..06d82da 100644
--- a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
@@ -86,8 +86,8 @@ struct InstRegexOp : public SetTheory::Operator {
auto Pseudos = Instructions.slice(NumGeneric, NumPseudos);
auto NonPseudos = Instructions.slice(NumGeneric + NumPseudos);
- for (Init *Arg : Expr->getArgs()) {
- StringInit *SI = dyn_cast<StringInit>(Arg);
+ for (const Init *Arg : Expr->getArgs()) {
+ const StringInit *SI = dyn_cast<StringInit>(Arg);
if (!SI)
PrintFatalError(Loc, "instregex requires pattern string: " +
Expr->getAsString());
@@ -1828,13 +1828,14 @@ void CodeGenSchedModels::collectRegisterFiles() {
ConstRecVec RegisterClasses = RF->getValueAsListOfDefs("RegClasses");
std::vector<int64_t> RegisterCosts = RF->getValueAsListOfInts("RegCosts");
- ListInit *MoveElimInfo = RF->getValueAsListInit("AllowMoveElimination");
+ const ListInit *MoveElimInfo =
+ RF->getValueAsListInit("AllowMoveElimination");
for (unsigned I = 0, E = RegisterClasses.size(); I < E; ++I) {
int Cost = RegisterCosts.size() > I ? RegisterCosts[I] : 1;
bool AllowMoveElim = false;
if (MoveElimInfo->size() > I) {
- BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I));
+ const BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I));
AllowMoveElim = Val->getValue();
}
diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.cpp b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
index 9883cf5..b358518 100644
--- a/llvm/utils/TableGen/Common/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
@@ -361,16 +361,16 @@ void CodeGenTarget::reverseBitsForLittleEndianEncoding() {
R->getValueAsBit("isPseudo"))
continue;
- BitsInit *BI = R->getValueAsBitsInit("Inst");
+ const BitsInit *BI = R->getValueAsBitsInit("Inst");
unsigned numBits = BI->getNumBits();
- SmallVector<Init *, 16> NewBits(numBits);
+ SmallVector<const Init *, 16> NewBits(numBits);
for (unsigned bit = 0, end = numBits / 2; bit != end; ++bit) {
unsigned bitSwapIdx = numBits - bit - 1;
- Init *OrigBit = BI->getBit(bit);
- Init *BitSwap = BI->getBit(bitSwapIdx);
+ const Init *OrigBit = BI->getBit(bit);
+ const Init *BitSwap = BI->getBit(bitSwapIdx);
NewBits[bit] = BitSwap;
NewBits[bitSwapIdx] = OrigBit;
}
@@ -380,7 +380,7 @@ void CodeGenTarget::reverseBitsForLittleEndianEncoding() {
}
RecordKeeper &MutableRC = const_cast<RecordKeeper &>(Records);
- BitsInit *NewBI = BitsInit::get(MutableRC, NewBits);
+ const BitsInit *NewBI = BitsInit::get(MutableRC, NewBits);
// Update the bits in reversed order so that emitters will get the correct
// endianness.
diff --git a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
index 9dcc5f4..364b80c 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
@@ -57,7 +57,7 @@ bool PatternParser::parsePatternList(
// The match section consists of a list of matchers and predicates. Parse each
// one and add the equivalent GIMatchDag nodes, predicates, and edges.
for (unsigned I = 0; I < List.getNumArgs(); ++I) {
- Init *Arg = List.getArg(I);
+ const Init *Arg = List.getArg(I);
std::string Name = List.getArgName(I)
? List.getArgName(I)->getValue().str()
: ("__" + AnonPatNamePrefix + "_" + Twine(I)).str();
@@ -138,7 +138,7 @@ PatternParser::parseInstructionPattern(const Init &Arg, StringRef Name) {
return nullptr;
for (unsigned K = 0; K < DagPat->getNumArgs(); ++K) {
- Init *Arg = DagPat->getArg(K);
+ const Init *Arg = DagPat->getArg(K);
if (auto *DagArg = getDagWithSpecificOperator(*Arg, "MIFlags")) {
if (!parseInstructionPatternMIFlags(*Pat, DagArg))
return nullptr;
diff --git a/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp b/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
index 52f7b0f..0b84a9b 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
@@ -382,7 +382,7 @@ bool CodeGenInstructionPattern::hasVariadicDefs() const {
if (I.variadicOpsAreDefs)
return true;
- DagInit *OutOps = I.TheDef->getValueAsDag("OutOperandList");
+ const DagInit *OutOps = I.TheDef->getValueAsDag("OutOperandList");
if (OutOps->arg_empty())
return false;
diff --git a/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp
index 9b454cf..0a835bd 100644
--- a/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp
@@ -100,7 +100,8 @@ public:
// Get the name of custom encoder or decoder, if there is any.
// Returns `{encoder name, decoder name}`.
-static std::pair<StringRef, StringRef> getCustomCoders(ArrayRef<Init *> Args) {
+static std::pair<StringRef, StringRef>
+getCustomCoders(ArrayRef<const Init *> Args) {
std::pair<StringRef, StringRef> Result;
for (const auto *Arg : Args) {
const auto *DI = dyn_cast<DagInit>(Arg);
@@ -187,8 +188,8 @@ void VarLenInst::buildRec(const DagInit *DI) {
PrintFatalError(TheDef->getLoc(),
"Expecting at least 3 arguments for `slice`");
HasDynamicSegment = true;
- Init *OperandName = DI->getArg(0), *HiBit = DI->getArg(1),
- *LoBit = DI->getArg(2);
+ const Init *OperandName = DI->getArg(0), *HiBit = DI->getArg(1),
+ *LoBit = DI->getArg(2);
if (!isa<StringInit>(OperandName) || !isa<IntInit>(HiBit) ||
!isa<IntInit>(LoBit))
PrintFatalError(TheDef->getLoc(), "Invalid argument types for `slice`");
@@ -211,7 +212,7 @@ void VarLenInst::buildRec(const DagInit *DI) {
if (NeedSwap) {
// Normalization: Hi bit should always be the second argument.
- Init *const NewArgs[] = {OperandName, LoBit, HiBit};
+ const Init *const NewArgs[] = {OperandName, LoBit, HiBit};
Segments.push_back({NumBits,
DagInit::get(DI->getOperator(), nullptr, NewArgs, {}),
CustomEncoder, CustomDecoder});
@@ -241,7 +242,7 @@ void VarLenCodeEmitterGen::run(raw_ostream &OS) {
for (const auto [Mode, EncodingDef] : EBM) {
Modes.insert({Mode, "_" + HWM.getMode(Mode).Name.str()});
const RecordVal *RV = EncodingDef->getValue("Inst");
- DagInit *DI = cast<DagInit>(RV->getValue());
+ const DagInit *DI = cast<DagInit>(RV->getValue());
VarLenInsts[R].insert({Mode, VarLenInst(DI, RV)});
}
continue;
diff --git a/llvm/utils/TableGen/CompressInstEmitter.cpp b/llvm/utils/TableGen/CompressInstEmitter.cpp
index 5ee02f4f..e087ff0 100644
--- a/llvm/utils/TableGen/CompressInstEmitter.cpp
+++ b/llvm/utils/TableGen/CompressInstEmitter.cpp
@@ -248,7 +248,8 @@ void CompressInstEmitter::addDagOperandMapping(const Record *Rec,
"' in the corresponding instruction operand!");
OperandMap[I].Kind = OpData::Operand;
- } else if (IntInit *II = dyn_cast<IntInit>(Dag->getArg(I - TiedCount))) {
+ } else if (const IntInit *II =
+ dyn_cast<IntInit>(Dag->getArg(I - TiedCount))) {
// Validate that corresponding instruction operand expects an immediate.
if (Inst.Operands[I].Rec->isSubClassOf("RegisterClass"))
PrintFatalError(
@@ -428,7 +429,7 @@ void CompressInstEmitter::createInstOperandMapping(
/// Instruction type and generate a warning.
void CompressInstEmitter::evaluateCompressPat(const Record *Rec) {
// Validate input Dag operands.
- DagInit *SourceDag = Rec->getValueAsDag("Input");
+ const DagInit *SourceDag = Rec->getValueAsDag("Input");
assert(SourceDag && "Missing 'Input' in compress pattern!");
LLVM_DEBUG(dbgs() << "Input: " << *SourceDag << "\n");
@@ -438,7 +439,7 @@ void CompressInstEmitter::evaluateCompressPat(const Record *Rec) {
verifyDagOpCount(SourceInst, SourceDag, true);
// Validate output Dag operands.
- DagInit *DestDag = Rec->getValueAsDag("Output");
+ const DagInit *DestDag = Rec->getValueAsDag("Output");
assert(DestDag && "Missing 'Output' in compress pattern!");
LLVM_DEBUG(dbgs() << "Output: " << *DestDag << "\n");
diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index 31c46d5..09c1ee4 100644
--- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -844,7 +844,7 @@ void MatcherGen::EmitResultInstructionAsOperand(
// children may themselves emit multiple MI operands.
unsigned NumSubOps = 1;
if (OperandNode->isSubClassOf("Operand")) {
- DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
+ const DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
if (unsigned NumArgs = MIOpInfo->getNumArgs())
NumSubOps = NumArgs;
}
diff --git a/llvm/utils/TableGen/DFAEmitter.cpp b/llvm/utils/TableGen/DFAEmitter.cpp
index 7d274a1..264cccf 100644
--- a/llvm/utils/TableGen/DFAEmitter.cpp
+++ b/llvm/utils/TableGen/DFAEmitter.cpp
@@ -306,7 +306,7 @@ StringRef Automaton::getActionSymbolType(StringRef A) {
}
Transition::Transition(const Record *R, Automaton *Parent) {
- BitsInit *NewStateInit = R->getValueAsBitsInit("NewState");
+ const BitsInit *NewStateInit = R->getValueAsBitsInit("NewState");
NewState = 0;
assert(NewStateInit->getNumBits() <= sizeof(uint64_t) * 8 &&
"State cannot be represented in 64 bits!");
diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp
index 06bf7a0..0598bae 100644
--- a/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/llvm/utils/TableGen/DXILEmitter.cpp
@@ -160,7 +160,7 @@ DXILOperationDesc::DXILOperationDesc(const Record *R) {
const RecordVal *RV = R->getValue("LLVMIntrinsic");
if (RV && RV->getValue()) {
- if (DefInit *DI = dyn_cast<DefInit>(RV->getValue())) {
+ if (const DefInit *DI = dyn_cast<DefInit>(RV->getValue())) {
auto *IntrinsicDef = DI->getDef();
auto DefName = IntrinsicDef->getName();
assert(DefName.starts_with("int_") && "invalid intrinsic name");
diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp
index d4f4e3f..4d2320b 100644
--- a/llvm/utils/TableGen/DecoderEmitter.cpp
+++ b/llvm/utils/TableGen/DecoderEmitter.cpp
@@ -208,7 +208,7 @@ static int Value(bit_value_t V) {
}
static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
- if (BitInit *bit = dyn_cast<BitInit>(bits.getBit(index)))
+ if (const BitInit *bit = dyn_cast<BitInit>(bits.getBit(index)))
return bit->getValue() ? BIT_TRUE : BIT_FALSE;
// The bit is uninitialized.
@@ -234,14 +234,14 @@ static void dumpBits(raw_ostream &OS, const BitsInit &bits) {
}
}
-static BitsInit &getBitsField(const Record &def, StringRef str) {
+static const BitsInit &getBitsField(const Record &def, StringRef str) {
const RecordVal *RV = def.getValue(str);
- if (BitsInit *Bits = dyn_cast<BitsInit>(RV->getValue()))
+ if (const BitsInit *Bits = dyn_cast<BitsInit>(RV->getValue()))
return *Bits;
// variable length instruction
VarLenInst VLI = VarLenInst(cast<DagInit>(RV->getValue()), RV);
- SmallVector<Init *, 16> Bits;
+ SmallVector<const Init *, 16> Bits;
for (const auto &SI : VLI) {
if (const BitsInit *BI = dyn_cast<BitsInit>(SI.Value)) {
@@ -459,7 +459,7 @@ protected:
// Populates the insn given the uid.
void insnWithID(insn_t &Insn, unsigned Opcode) const {
const Record *EncodingDef = AllInstructions[Opcode].EncodingDef;
- BitsInit &Bits = getBitsField(*EncodingDef, "Inst");
+ const BitsInit &Bits = getBitsField(*EncodingDef, "Inst");
Insn.resize(std::max(BitWidth, Bits.getNumBits()), BIT_UNSET);
// We may have a SoftFail bitmask, which specifies a mask where an encoding
// may differ from the value in "Inst" and yet still be valid, but the
@@ -1290,7 +1290,7 @@ bool FilterChooser::emitPredicateMatchAux(const Init &Val, bool ParenIfBinOp,
}
bool FilterChooser::emitPredicateMatch(raw_ostream &OS, unsigned Opc) const {
- ListInit *Predicates =
+ const ListInit *Predicates =
AllInstructions[Opc].EncodingDef->getValueAsListInit("Predicates");
bool IsFirstEmission = true;
for (unsigned i = 0; i < Predicates->size(); ++i) {
@@ -1374,11 +1374,11 @@ void FilterChooser::emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const {
const Record *EncodingDef = AllInstructions[Opc].EncodingDef;
const RecordVal *RV = EncodingDef->getValue("SoftFail");
- BitsInit *SFBits = RV ? dyn_cast<BitsInit>(RV->getValue()) : nullptr;
+ const BitsInit *SFBits = RV ? dyn_cast<BitsInit>(RV->getValue()) : nullptr;
if (!SFBits)
return;
- BitsInit *InstBits = EncodingDef->getValueAsBitsInit("Inst");
+ const BitsInit *InstBits = EncodingDef->getValueAsBitsInit("Inst");
APInt PositiveMask(BitWidth, 0ULL);
APInt NegativeMask(BitWidth, 0ULL);
@@ -1886,7 +1886,7 @@ OperandInfo getOpInfo(const Record *TypeRecord) {
const RecordVal *HasCompleteDecoderVal =
TypeRecord->getValue("hasCompleteDecoder");
- BitInit *HasCompleteDecoderBit =
+ const BitInit *HasCompleteDecoderBit =
HasCompleteDecoderVal
? dyn_cast<BitInit>(HasCompleteDecoderVal->getValue())
: nullptr;
@@ -1976,10 +1976,10 @@ static void addOneOperandFields(const Record &EncodingDef, const BitsInit &Bits,
OpInfo.InitValue |= 1ULL << I;
for (unsigned I = 0, J = 0; I != Bits.getNumBits(); I = J) {
- VarInit *Var;
+ const VarInit *Var;
unsigned Offset = 0;
for (; J != Bits.getNumBits(); ++J) {
- VarBitInit *BJ = dyn_cast<VarBitInit>(Bits.getBit(J));
+ const VarBitInit *BJ = dyn_cast<VarBitInit>(Bits.getBit(J));
if (BJ) {
Var = dyn_cast<VarInit>(BJ->getBitVar());
if (I == J)
@@ -2010,7 +2010,7 @@ populateInstruction(const CodeGenTarget &Target, const Record &EncodingDef,
// We are bound to fail! For proper disassembly, the well-known encoding bits
// of the instruction must be fully specified.
- BitsInit &Bits = getBitsField(EncodingDef, "Inst");
+ const BitsInit &Bits = getBitsField(EncodingDef, "Inst");
if (Bits.allInComplete())
return 0;
@@ -2035,9 +2035,9 @@ populateInstruction(const CodeGenTarget &Target, const Record &EncodingDef,
// Gather the outputs/inputs of the instruction, so we can find their
// positions in the encoding. This assumes for now that they appear in the
// MCInst in the order that they're listed.
- std::vector<std::pair<Init *, StringRef>> InOutOperands;
- DagInit *Out = Def.getValueAsDag("OutOperandList");
- DagInit *In = Def.getValueAsDag("InOperandList");
+ std::vector<std::pair<const Init *, StringRef>> InOutOperands;
+ const DagInit *Out = Def.getValueAsDag("OutOperandList");
+ const DagInit *In = Def.getValueAsDag("InOperandList");
for (const auto &[Idx, Arg] : enumerate(Out->getArgs()))
InOutOperands.push_back(std::pair(Arg, Out->getArgNameStr(Idx)));
for (const auto &[Idx, Arg] : enumerate(In->getArgs()))
@@ -2069,7 +2069,7 @@ populateInstruction(const CodeGenTarget &Target, const Record &EncodingDef,
} else {
// For each operand, see if we can figure out where it is encoded.
for (const auto &Op : InOutOperands) {
- Init *OpInit = Op.first;
+ const Init *OpInit = Op.first;
StringRef OpName = Op.second;
// We're ready to find the instruction encoding locations for this
@@ -2077,7 +2077,7 @@ populateInstruction(const CodeGenTarget &Target, const Record &EncodingDef,
// First, find the operand type ("OpInit"), and sub-op names
// ("SubArgDag") if present.
- DagInit *SubArgDag = dyn_cast<DagInit>(OpInit);
+ const DagInit *SubArgDag = dyn_cast<DagInit>(OpInit);
if (SubArgDag)
OpInit = SubArgDag->getOperator();
const Record *OpTypeRec = cast<DefInit>(OpInit)->getDef();
@@ -2521,7 +2521,7 @@ namespace llvm {
for (const auto &NumberedInstruction : NumberedInstructions) {
const Record *InstDef = NumberedInstruction->TheDef;
if (const RecordVal *RV = InstDef->getValue("EncodingInfos")) {
- if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ if (const DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
EncodingInfoByHwMode EBM(DI->getDef(), HWM);
for (auto &[ModeId, Encoding] : EBM) {
// DecoderTables with DefaultMode should not have any suffix.
diff --git a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
index 2524a44..424f1cc 100644
--- a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
@@ -1375,7 +1375,7 @@ bool CombineRuleBuilder::addFeaturePredicates(RuleMatcher &M) {
if (!RuleDef.getValue("Predicates"))
return true;
- ListInit *Preds = RuleDef.getValueAsListInit("Predicates");
+ const ListInit *Preds = RuleDef.getValueAsListInit("Predicates");
for (const Init *PI : Preds->getValues()) {
const DefInit *Pred = dyn_cast<DefInit>(PI);
if (!Pred)
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index c53f705..e866bd9 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -579,8 +579,8 @@ Expected<InstructionMatcher &> GlobalISelEmitter::addBuiltinPredicates(
if (const ListInit *AddrSpaces = Predicate.getAddressSpaces()) {
SmallVector<unsigned, 4> ParsedAddrSpaces;
- for (Init *Val : AddrSpaces->getValues()) {
- IntInit *IntVal = dyn_cast<IntInit>(Val);
+ for (const Init *Val : AddrSpaces->getValues()) {
+ const IntInit *IntVal = dyn_cast<IntInit>(Val);
if (!IntVal)
return failedImport("Address space is not an integer");
ParsedAddrSpaces.push_back(IntVal->getValue());
@@ -2023,7 +2023,10 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
auto &DstI = Target.getInstruction(DstOp);
StringRef DstIName = DstI.TheDef->getName();
- unsigned DstNumDefs = DstI.Operands.NumDefs,
+ // Count both implicit and explicit defs in the dst instruction.
+ // This avoids errors importing patterns that have inherent implicit defs.
+ unsigned DstExpDefs = DstI.Operands.NumDefs,
+ DstNumDefs = DstI.ImplicitDefs.size() + DstExpDefs,
SrcNumDefs = Src.getExtTypes().size();
if (DstNumDefs < SrcNumDefs) {
if (DstNumDefs != 0)
@@ -2045,7 +2048,7 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
// The root of the match also has constraints on the register bank so that it
// matches the result instruction.
unsigned OpIdx = 0;
- unsigned N = std::min(DstNumDefs, SrcNumDefs);
+ unsigned N = std::min(DstExpDefs, SrcNumDefs);
for (unsigned I = 0; I < N; ++I) {
const TypeSetByHwMode &VTy = Src.getExtType(I);
diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp
index a7039ff..8c0e272 100644
--- a/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -411,7 +411,7 @@ void InstrInfoEmitter::emitOperandTypeMappings(
OperandRecords.push_back(Op.Rec);
++CurrentOffset;
} else {
- for (Init *Arg : MIOI->getArgs()) {
+ for (const Init *Arg : MIOI->getArgs()) {
OperandRecords.push_back(cast<DefInit>(Arg)->getDef());
++CurrentOffset;
}
@@ -1296,7 +1296,7 @@ void InstrInfoEmitter::emitRecord(
OS << "|(1ULL<<MCID::Authenticated)";
// Emit all of the target-specific flags...
- BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
+ const BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
if (!TSF)
PrintFatalError(Inst.TheDef->getLoc(), "no TSFlags?");
uint64_t Value = 0;
diff --git a/llvm/utils/TableGen/OptionParserEmitter.cpp b/llvm/utils/TableGen/OptionParserEmitter.cpp
index 424cf16..2872762 100644
--- a/llvm/utils/TableGen/OptionParserEmitter.cpp
+++ b/llvm/utils/TableGen/OptionParserEmitter.cpp
@@ -433,10 +433,10 @@ static void EmitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
OS << ", ";
int NumFlags = 0;
const ListInit *LI = R.getValueAsListInit("Flags");
- for (Init *I : *LI)
+ for (const Init *I : *LI)
OS << (NumFlags++ ? " | " : "") << cast<DefInit>(I)->getDef()->getName();
if (GroupFlags) {
- for (Init *I : *GroupFlags)
+ for (const Init *I : *GroupFlags)
OS << (NumFlags++ ? " | " : "")
<< cast<DefInit>(I)->getDef()->getName();
}
@@ -447,11 +447,11 @@ static void EmitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
OS << ", ";
int NumVisFlags = 0;
LI = R.getValueAsListInit("Visibility");
- for (Init *I : *LI)
+ for (const Init *I : *LI)
OS << (NumVisFlags++ ? " | " : "")
<< cast<DefInit>(I)->getDef()->getName();
if (GroupVis) {
- for (Init *I : *GroupVis)
+ for (const Init *I : *GroupVis)
OS << (NumVisFlags++ ? " | " : "")
<< cast<DefInit>(I)->getDef()->getName();
}
@@ -473,7 +473,7 @@ static void EmitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
HelpTextsForVariants;
for (const Record *VisibilityHelp :
R.getValueAsListOfDefs("HelpTextsForVariants")) {
- ArrayRef<Init *> Visibilities =
+ ArrayRef<const Init *> Visibilities =
VisibilityHelp->getValueAsListInit("Visibilities")->getValues();
std::vector<std::string> VisibilityNames;
diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index 371ee75..be2a2b3 100644
--- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -445,7 +445,7 @@ void RegisterInfoEmitter::EmitRegMappingTables(
if (!V || !V->getValue())
continue;
- DefInit *DI = cast<DefInit>(V->getValue());
+ const DefInit *DI = cast<DefInit>(V->getValue());
const Record *Alias = DI->getDef();
const auto &AliasIter = llvm::lower_bound(
DwarfRegNums, Alias, [](const DwarfRegNumsMapPair &A, const Record *B) {
@@ -1061,10 +1061,10 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS) {
OS << " 0,\n";
for (const auto &RE : Regs) {
const Record *Reg = RE.TheDef;
- BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
+ const BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
uint64_t Value = 0;
for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
- if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
+ if (const BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
Value |= (uint64_t)B->getValue() << b;
}
OS << " " << Value << ",\n";
diff --git a/llvm/utils/TableGen/SearchableTableEmitter.cpp b/llvm/utils/TableGen/SearchableTableEmitter.cpp
index d6cb94c..4bf4df6 100644
--- a/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -196,7 +196,7 @@ private:
bool IsPrimary, raw_ostream &OS);
void emitIfdef(StringRef Guard, raw_ostream &OS);
- bool parseFieldType(GenericField &Field, Init *II);
+ bool parseFieldType(GenericField &Field, const Init *II);
std::unique_ptr<SearchIndex>
parseSearchIndex(GenericTable &Table, const RecordVal *RecVal, StringRef Name,
ArrayRef<StringRef> Key, bool EarlyOut, bool ReturnRange);
@@ -233,8 +233,8 @@ int64_t SearchableTableEmitter::getNumericKey(const SearchIndex &Index,
bool SearchableTableEmitter::compareBy(const Record *LHS, const Record *RHS,
const SearchIndex &Index) {
for (const auto &Field : Index.Fields) {
- Init *LHSI = LHS->getValueInit(Field.Name);
- Init *RHSI = RHS->getValueInit(Field.Name);
+ const Init *LHSI = LHS->getValueInit(Field.Name);
+ const Init *RHSI = RHS->getValueInit(Field.Name);
if (isa<BitsRecTy>(Field.RecType) || isa<IntRecTy>(Field.RecType)) {
int64_t LHSi = getAsInt(LHSI);
@@ -574,7 +574,8 @@ void SearchableTableEmitter::emitGenericTable(const GenericTable &Table,
OS << "#endif\n\n";
}
-bool SearchableTableEmitter::parseFieldType(GenericField &Field, Init *TypeOf) {
+bool SearchableTableEmitter::parseFieldType(GenericField &Field,
+ const Init *TypeOf) {
auto Type = dyn_cast<StringInit>(TypeOf);
if (!Type)
return false;
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 8ab7bdc..bcc5712 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -251,7 +251,7 @@ static uint8_t byteFromBitsInit(const BitsInit *B) {
uint8_t Value = 0;
for (unsigned I = 0; I != N; ++I) {
- BitInit *Bit = cast<BitInit>(B->getBit(I));
+ const BitInit *Bit = cast<BitInit>(B->getBit(I));
Value |= Bit->getValue() << I;
}
return Value;
@@ -487,7 +487,7 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
uint8_t Enc = byteFromBitsInit(RegRec->getValueAsBitsInit("OpEncBits"));
if (isExplicitAlign(RegInst)) {
// The instruction require explicitly aligned memory.
- BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize");
+ const BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize");
Result.Alignment = Align(byteFromBitsInit(VectSize));
} else if (!Enc && !isExplicitUnalign(RegInst) &&
getMemOperandSize(MemOpRec) > 64) {
@@ -512,7 +512,7 @@ void X86FoldTablesEmitter::addBroadcastEntry(
assert(Table.find(RegInst) == Table.end() && "Override entry unexpectedly");
X86FoldTableEntry Result = X86FoldTableEntry(RegInst, MemInst);
- DagInit *In = MemInst->TheDef->getValueAsDag("InOperandList");
+ const DagInit *In = MemInst->TheDef->getValueAsDag("InOperandList");
for (unsigned I = 0, E = In->getNumArgs(); I != E; ++I) {
Result.BroadcastKind =
StringSwitch<X86FoldTableEntry::BcastType>(In->getArg(I)->getAsString())
diff --git a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp
index 47df5bf..10fab46 100644
--- a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp
+++ b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp
@@ -112,7 +112,7 @@ static uint8_t byteFromBitsInit(const BitsInit *B) {
uint8_t Value = 0;
for (unsigned I = 0; I != N; ++I) {
- BitInit *Bit = cast<BitInit>(B->getBit(I));
+ const BitInit *Bit = cast<BitInit>(B->getBit(I));
Value |= Bit->getValue() << I;
}
return Value;
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 60fc1d1..26b8816 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -77,17 +77,15 @@ unsigned X86Disassembler::getMemOperandSize(const Record *MemRec) {
/// @param init - A reference to the BitsInit to be decoded.
/// @return - The field, with the first bit in the BitsInit as the lowest
/// order bit.
-static uint8_t byteFromBitsInit(BitsInit &init) {
+static uint8_t byteFromBitsInit(const BitsInit &init) {
int width = init.getNumBits();
assert(width <= 8 && "Field is too large for uint8_t!");
- int index;
uint8_t mask = 0x01;
-
uint8_t ret = 0;
- for (index = 0; index < width; index++) {
+ for (int index = 0; index < width; index++) {
if (cast<BitInit>(init.getBit(index))->getValue())
ret |= mask;
@@ -104,7 +102,7 @@ static uint8_t byteFromBitsInit(BitsInit &init) {
/// @param name - The name of the field in the record.
/// @return - The field, as translated by byteFromBitsInit().
static uint8_t byteFromRec(const Record *rec, StringRef name) {
- BitsInit *bits = rec->getValueAsBitsInit(name);
+ const BitsInit *bits = rec->getValueAsBitsInit(name);
return byteFromBitsInit(*bits);
}
diff --git a/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn
index 84d569d..1b193af 100644
--- a/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Basic/BUILD.gn
@@ -96,6 +96,7 @@ static_library("Basic") {
"SourceManager.cpp",
"SourceMgrAdapter.cpp",
"Stack.cpp",
+ "StackExhaustionHandler.cpp",
"TargetID.cpp",
"TargetInfo.cpp",
"Targets.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/SandboxIR/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/SandboxIR/BUILD.gn
index 578b2b1..5f2bf7e 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/SandboxIR/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/SandboxIR/BUILD.gn
@@ -9,6 +9,7 @@ unittest("SandboxIRTests") {
]
sources = [
"IntrinsicInstTest.cpp",
+ "OperatorTest.cpp",
"PassTest.cpp",
"RegionTest.cpp",
"SandboxIRTest.cpp",
diff --git a/llvm/utils/lit/tests/shtest-output-printing.py b/llvm/utils/lit/tests/shtest-output-printing.py
index 129cff9..b9045c3 100644
--- a/llvm/utils/lit/tests/shtest-output-printing.py
+++ b/llvm/utils/lit/tests/shtest-output-printing.py
@@ -25,7 +25,7 @@
# CHECK-NEXT: not not wc missing-file &> [[FILE:.*]] || true
# CHECK-NEXT: # executed command: not not wc missing-file
# CHECK-NEXT: # .---redirected output from '[[FILE]]'
-# CHECK-NEXT: # | wc: {{cannot open missing-file|missing-file.* No such file or directory}}
+# CHECK-NEXT: # | {{.*}}wc: {{cannot open missing-file|missing-file.* No such file or directory}}
# CHECK-NEXT: # `-----------------------------
# CHECK-NEXT: # note: command had no output on stdout or stderr
# CHECK-NEXT: # error: command failed with exit status: 1
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index a150e2b..984af50 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -678,7 +678,7 @@ lowerReductionWithStartValue(ConversionPatternRewriter &rewriter, Location loc,
vectorOperand, fmf);
}
-/// Overloaded methods to lower a *predicated* reduction to an llvm instrinsic
+/// Overloaded methods to lower a *predicated* reduction to an llvm intrinsic
/// that requires a start value. This start value format spans across fp
/// reductions without mask and all the masked reduction intrinsics.
template <class LLVMVPRedIntrinOp, class ReductionNeutral>
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 635273b..e1b97fb 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -250,6 +250,32 @@ private:
LinalgOp linalgOp,
std::optional<AffineMap> maybeMaskingMap);
+ /// Check whether this permutation map can be used for masking. At the
+ /// moment we only make sure that there are no broadcast dimensions, but this
+ /// might change if indexing maps evolve.
+ bool isValidMaskingMap(AffineMap maskingMap) {
+ return maskingMap.getBroadcastDims().size() == 0;
+ }
+
+ /// Turn the input indexing map into a valid masking map.
+ ///
+ /// The input indexing map may contain "zero" results, e.g.:
+ /// (d0, d1, d2, d3) -> (d2, d1, d0, 0)
+ /// Applying such maps to canonical vector shapes like this one:
+ /// (1, 16, 16, 4)
+ /// would yield an invalid vector shape like this:
+ /// (16, 16, 1, 0)
+ /// Instead, drop the broadcasting dims that make no sense for masking perm.
+ /// maps:
+ /// (d0, d1, d2, d3) -> (d2, d1, d0)
+ /// This way, the corresponding vector/mask type will be:
+ /// vector<16x16x1xty>
+ /// rather than this invalid Vector type:
+ /// vector<16x16x1x0xty>
+ AffineMap getMaskingMapFromIndexingMap(AffineMap &indexingMap) {
+ return indexingMap.dropZeroResults();
+ }
+
// Holds the compile-time static sizes of the iteration space to vectorize.
// Dynamic dimensions are represented using ShapedType::kDynamic.
SmallVector<int64_t> iterSpaceStaticSizes;
@@ -360,6 +386,10 @@ VectorizationState::initState(RewriterBase &rewriter, LinalgOp linalgOp,
Value VectorizationState::getOrCreateMaskFor(
RewriterBase &rewriter, Operation *opToMask, LinalgOp linalgOp,
std::optional<AffineMap> maybeMaskingMap) {
+
+ assert((!maybeMaskingMap || isValidMaskingMap(*maybeMaskingMap)) &&
+ "Ill-formed masking map.");
+
// No mask is needed if the operation is not maskable.
auto maskableOp = dyn_cast<vector::MaskableOpInterface>(opToMask);
if (!maskableOp)
@@ -429,20 +459,8 @@ VectorizationState::maskOperation(RewriterBase &rewriter, Operation *opToMask,
LDBG("Trying to mask: " << *opToMask << "\n");
std::optional<AffineMap> maybeMaskingMap = std::nullopt;
- // The Operand indexing map may contain "zero" results, e.g.:
- // (d0, d1, d2, d3) -> (d0, d1, d2, 0)
- // When applied to canonical vector shapes like these:
- // (1, 16, 16, 4)
- // we would get:
- // (1, 16, 16, 0)
- // Instead, we should extract the following map permutation map for masking:
- // (d0, d1, d2, d3) -> (d0, d1, d2)
- // This way, the corresponding vector/mask type will be:
- // vector<1x16x16xty>
- // rather than:
- // vector<1x16x16x0xty>
if (maybeIndexingMap)
- maybeMaskingMap = maybeIndexingMap->dropZeroResults();
+ maybeMaskingMap = getMaskingMapFromIndexingMap(*maybeIndexingMap);
// Create or retrieve mask for this operation.
Value mask =
@@ -845,9 +863,10 @@ static uint64_t getTrailingNonUnitLoopDimIdx(LinalgOp linalgOp) {
llvm::count_if(loopRanges, [](int64_t dim) { return dim != 1; }) == 1) &&
"For statically shaped Linalg Ops, only one "
"non-unit loop dim is expected");
+ assert(loopRanges.size() != 0 && "Empty loops, nothing to analyse.");
size_t idx = loopRanges.size() - 1;
- for (; idx >= 0; idx--)
+ for (; idx != 0; idx--)
if (loopRanges[idx] != 1)
break;
diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
index d2ab4ca..70b2aaf 100644
--- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
+++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
@@ -47,7 +47,7 @@ mlir::getReassociationIndicesForCollapse(ArrayRef<int64_t> sourceShape,
break;
int64_t currTargetShape = targetShape[targetDim];
- while (sourceDim < sourceShape.size() &&
+ while (sourceDim < (sourceShape.size() - 1) &&
sourceShape[sourceDim] != ShapedType::kDynamic &&
prodOfCollapsedDims * sourceShape[sourceDim] < currTargetShape) {
prodOfCollapsedDims *= sourceShape[sourceDim];
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index 6e97b2a..1f63519 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -2180,7 +2180,7 @@ ModuleImport::processDebugIntrinsic(llvm::DbgVariableIntrinsic *dbgIntr,
return emitError(loc) << "failed to convert a debug intrinsic operand: "
<< diag(*dbgIntr);
- // Ensure that the debug instrinsic is inserted right after its operand is
+ // Ensure that the debug intrinsic is inserted right after its operand is
// defined. Otherwise, the operand might not necessarily dominate the
// intrinsic. If the defining operation is a terminator, insert the intrinsic
// into a dominated block.
diff --git a/mlir/lib/Transforms/RemoveDeadValues.cpp b/mlir/lib/Transforms/RemoveDeadValues.cpp
index 3de4fb7..7e45f18 100644
--- a/mlir/lib/Transforms/RemoveDeadValues.cpp
+++ b/mlir/lib/Transforms/RemoveDeadValues.cpp
@@ -589,7 +589,7 @@ void RemoveDeadValues::runOnOperation() {
});
if (acceptableIR.wasInterrupted())
- return;
+ return signalPassFailure();
module->walk([&](Operation *op) {
if (auto funcOp = dyn_cast<FunctionOpInterface>(op)) {
diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
index 2c56b71..3560ab2 100644
--- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
@@ -38,6 +38,33 @@ module attributes {transform.with_named_sequence} {
// -----
+#map = affine_map<() -> ()>
+func.func @negative_no_loops(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
+ %1 = linalg.generic {
+ indexing_maps = [#map],
+ iterator_types = []
+ } outs(%arg1 : tensor<f32>) {
+ ^bb0(%arg4: f32):
+ %2 = tensor.extract %arg0[] : tensor<f32>
+ linalg.yield %2 : f32
+ } -> tensor<f32>
+ return %1 : tensor<f32>
+}
+// CHECK-LABEL: func.func @negative_no_loops
+// CHECK: tensor.extract
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+
+// -----
+
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
func.func @vectorize_nd_tensor_extract_constant_idx(%arg0: tensor<3x3xf32>, %arg2: tensor<1x1x3xf32>) -> tensor<1x1x3xf32> {
%c0 = arith.constant 1 : index
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index 0aa2d33..dbf0f0b 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -1251,6 +1251,29 @@ func.func @no_fold_expand_of_collapse_dynamic(%arg0 : tensor<?x?x?xf32>, %arg1:
// -----
+func.func @compose_expand_of_collapse_last_two_dims(%arg0: tensor<?x64x1xf32>) -> tensor<?x384xf32> {
+ %collapsed = tensor.collapse_shape %arg0 [[0, 1, 2]] : tensor<?x64x1xf32> into tensor<?xf32>
+ %c0 = arith.constant 0 : index
+ %dim = tensor.dim %collapsed, %c0 : tensor<?xf32>
+ %c384= arith.constant 384 : index
+ %div = arith.divui %dim, %c384 : index
+ %expanded = tensor.expand_shape %collapsed [[0, 1]] output_shape [%div, 384] : tensor<?xf32> into tensor<?x384xf32>
+ return %expanded : tensor<?x384xf32>
+}
+// CHECK: #[[$MAP:.*]] = affine_map<()[s0] -> (s0 * 64)>
+// CHECK-LABEL: @compose_expand_of_collapse_last_two_dims
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x64x1xf32>
+// CHECK: %[[CONSTANT0:.+]] = arith.constant 0 : index
+// CHECK: %[[CONSTANT384:.+]] = arith.constant 384 : index
+// CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1, 2]] : tensor<?x64x1xf32> into tensor<?xf32>
+// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[CONSTANT0]] : tensor<?x64x1xf32>
+// CHECK: %[[AFFAPPLY:.+]] = affine.apply #[[$MAP]]()[%[[DIM]]]
+// CHECK: %[[DIVUI:.+]] = arith.divui %[[AFFAPPLY]], %[[CONSTANT384]] : index
+// CHECK: %[[RESULT:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1]] output_shape [%[[DIVUI]], 384] : tensor<?xf32> into tensor<?x384xf32>
+// CHECK: return %[[RESULT]]
+
+// -----
+
func.func @compose_expand_of_collapse(%arg0 : tensor<2x3x4x5x6x7x8xf32>)
-> tensor<24x5x42x8xf32> {
%0 = tensor.collapse_shape %arg0 [[0, 1, 2, 3, 4, 5, 6]]
diff --git a/mlir/test/Dialect/Tensor/invalid.mlir b/mlir/test/Dialect/Tensor/invalid.mlir
index 84e6c59..921d7f9 100644
--- a/mlir/test/Dialect/Tensor/invalid.mlir
+++ b/mlir/test/Dialect/Tensor/invalid.mlir
@@ -200,7 +200,6 @@ func.func @tensor.reshape_num_elements_mismatch(
func.func @extract_slice_wrong_result_rank(%t: tensor<?xf32>, %idx : index) {
// expected-error @+1 {{expected rank to be smaller or equal to the other rank.}}
%0 = tensor.extract_slice %t[0][4][1] : tensor<?xf32> to tensor<?x?xf32>
-
return
}
@@ -209,7 +208,25 @@ func.func @extract_slice_wrong_result_rank(%t: tensor<?xf32>, %idx : index) {
func.func @extract_slice_wrong_result_rank(%t: tensor<?xf32>, %idx : index) {
// expected-error @+1 {{expected element type to be 'f32'}}
%0 = tensor.extract_slice %t[0][4][1] : tensor<?xf32> to tensor<4xi8>
+ return
+}
+
+
+// -----
+
+func.func @extract_slice_size_and_output_dim_mismatch_static_size(%t: tensor<16xf32>) {
+ // expected-error @+1 {{expected type to be 'tensor<4xf32>' or a rank-reduced version. (size mismatch)}}
+ %0 = tensor.extract_slice %t[0][4][1]
+ : tensor<16xf32> to tensor<6xf32>
+ return
+}
+
+// -----
+func.func @extract_slice_size_and_output_dim_mismatch_dynamic_size(%t: tensor<?xf32>, %idx : index) {
+ // expected-error @+2 {{expected type to be 'tensor<?xf32>' or a rank-reduced version. (size mismatch)}}
+ %c4 = arith.constant 4 : index
+ %0 = tensor.extract_slice %t[0][%c4][1] : tensor<?xf32> to tensor<4xi8>
return
}
@@ -219,7 +236,6 @@ func.func @extract_slice_wrong_static_type(%t: tensor<8x16x4xf32>, %idx : index)
// expected-error @+1 {{expected type to be 'tensor<?x4x4xf32>' or a rank-reduced version. (size mismatch)}}
%0 = tensor.extract_slice %t[0, 0, 0][%idx, 4, 4][1, 1, 1]
: tensor<8x16x4xf32> to tensor<4x4x4xf32>
-
return
}
@@ -229,7 +245,14 @@ func.func @extract_slice_wrong_dynamic_type(%t: tensor<8x16x4xf32>, %idx : index
// expected-error @+1 {{expected type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (size mismatch)}}
%0 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1]
: tensor<8x16x4xf32> to tensor<?x4x4xf32>
+ return
+}
+// -----
+
+func.func @illegal_num_offsets(%arg0 : tensor<?x?x?xf32>, %arg1 : index, %arg2 : index) {
+ // expected-error@+1 {{expected 3 offset values}}
+ %0 = tensor.extract_slice %arg0[0, 0] [%arg1, %arg2] [1, 1] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
return
}
@@ -349,14 +372,6 @@ func.func @rank(%0: f32) {
// -----
-func.func @illegal_num_offsets(%arg0 : tensor<?x?x?xf32>, %arg1 : index, %arg2 : index) {
- // expected-error@+1 {{expected 3 offset values}}
- %0 = tensor.extract_slice %arg0[0, 0] [%arg1, %arg2] [1, 1] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
- return
-}
-
-// -----
-
func.func @illegal_num_offsets(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?x?xf32>,
%arg2 : index, %arg3 : index) {
// expected-error@+1 {{expected 3 offset values}}
diff --git a/mlir/test/mlir-rewrite/simple.mlir b/mlir/test/mlir-rewrite/simple.mlir
index ab6bfe2..66f17f0 100644
--- a/mlir/test/mlir-rewrite/simple.mlir
+++ b/mlir/test/mlir-rewrite/simple.mlir
@@ -4,8 +4,7 @@
func.func @two_dynamic_one_direct_shape(%arg0: tensor<?x4x?xf32>, %arg1: tensor<2x4x?xf32>) -> tensor<?x4x?xf32> {
// RENAME: "test.concat"({{.*}}) {bxis = 0 : i64}
- // RANGE: 《%{{.*}} = 〖"test.concat"〗({{.*}}) {axis = 0 : i64} : (tensor<?x4x?xf32>, tensor<2x4x?xf32>) -> tensor<?x4x?xf32>》
+ // RANGE: <%{{.*}} = ["test.concat"]({{.*}}) {axis = 0 : i64} : (tensor<?x4x?xf32>, tensor<2x4x?xf32>) -> tensor<?x4x?xf32>>
%5 = "test.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<?x4x?xf32>, tensor<2x4x?xf32>) -> tensor<?x4x?xf32>
return %5 : tensor<?x4x?xf32>
}
-
diff --git a/mlir/tools/mlir-rewrite/mlir-rewrite.cpp b/mlir/tools/mlir-rewrite/mlir-rewrite.cpp
index 308e649..e70aa5d 100644
--- a/mlir/tools/mlir-rewrite/mlir-rewrite.cpp
+++ b/mlir/tools/mlir-rewrite/mlir-rewrite.cpp
@@ -320,25 +320,25 @@ LogicalResult markRanges(RewritePad &rewriteState, raw_ostream &os) {
for (auto it : rewriteState.getOpDefs()) {
auto [startOp, endOp] = getOpRange(it);
- rewriteState.insertText(startOp, "《");
- rewriteState.insertText(endOp, "》");
+ rewriteState.insertText(startOp, "<");
+ rewriteState.insertText(endOp, ">");
auto nameRange = getOpNameRange(it);
if (isGeneric(it)) {
- rewriteState.insertText(nameRange.Start, "〖");
- rewriteState.insertText(nameRange.End, "〗");
+ rewriteState.insertText(nameRange.Start, "[");
+ rewriteState.insertText(nameRange.End, "]");
} else {
- rewriteState.insertText(nameRange.Start, "〔");
- rewriteState.insertText(nameRange.End, "〕");
+ rewriteState.insertText(nameRange.Start, "![");
+ rewriteState.insertText(nameRange.End, "]!");
}
}
// Highlight all comment lines.
// TODO: Could be replaced if this is kept in memory.
for (auto commentLine : rewriteState.getSingleLineComments()) {
- rewriteState.insertText(commentLine.Start, "❰");
- rewriteState.insertText(commentLine.End, "❱");
+ rewriteState.insertText(commentLine.Start, "{");
+ rewriteState.insertText(commentLine.End, "}");
}
return success();
diff --git a/mlir/tools/mlir-tblgen/BytecodeDialectGen.cpp b/mlir/tools/mlir-tblgen/BytecodeDialectGen.cpp
index 6a3d5a2..d7967c7 100644
--- a/mlir/tools/mlir-tblgen/BytecodeDialectGen.cpp
+++ b/mlir/tools/mlir-tblgen/BytecodeDialectGen.cpp
@@ -258,8 +258,7 @@ void Generator::emitParseHelper(StringRef kind, StringRef returnType,
SmallVector<std::string> argNames;
if (def->isSubClassOf("CompositeBytecode")) {
const DagInit *members = def->getValueAsDag("members");
- args = llvm::to_vector(map_range(
- members->getArgs(), [](Init *init) { return (const Init *)init; }));
+ args = llvm::to_vector(members->getArgs());
argNames = llvm::to_vector(
map_range(members->getArgNames(), [](const StringInit *init) {
return init->getAsUnquotedString();
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 8f3bbe6..1abc0cc 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -104,6 +104,17 @@ libc_support_library(
)
libc_support_library(
+ name = "llvm_libc_types_cfloat128",
+ hdrs = ["include/llvm-libc-types/cfloat128.h"],
+ deps = [":llvm_libc_macros_float_macros"],
+)
+
+libc_support_library(
+ name = "llvm_libc_types_cfloat16",
+ hdrs = ["include/llvm-libc-types/cfloat16.h"],
+)
+
+libc_support_library(
name = "llvm_libc_macros_fcntl_macros",
hdrs = ["include/llvm-libc-macros/linux/fcntl-macros.h"],
)
@@ -269,6 +280,16 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_macros_properties_complex_types",
+ hdrs = ["src/__support/macros/properties/complex_types.h"],
+ deps = [
+ ":__support_macros_properties_types",
+ ":llvm_libc_types_cfloat128",
+ ":llvm_libc_types_cfloat16",
+ ],
+)
+
+libc_support_library(
name = "__support_macros_properties_types",
hdrs = ["src/__support/macros/properties/types.h"],
deps = [
@@ -493,6 +514,7 @@ libc_support_library(
deps = [
":__support_macros_attributes",
":__support_macros_config",
+ ":__support_macros_properties_complex_types",
":__support_macros_properties_types",
":llvm_libc_macros_stdfix_macros",
],